Merge pull request #1004 from TheBlueMatt/2021-07-forward-event
[rust-lightning] / lightning / src / ln / channelmanager.rs
index cec068a10429b0c5ee94e51fa4e7d919fa949950..afbfd0ee970da43bc050c488281cf16da4c4e8c7 100644 (file)
@@ -44,7 +44,7 @@ use chain::transaction::{OutPoint, TransactionData};
 // construct one themselves.
 use ln::{PaymentHash, PaymentPreimage, PaymentSecret};
 pub use ln::channel::CounterpartyForwardingInfo;
-use ln::channel::{Channel, ChannelError, ChannelUpdateStatus};
+use ln::channel::{Channel, ChannelError, ChannelUpdateStatus, UpdateFulfillCommitFetch};
 use ln::features::{InitFeatures, NodeFeatures};
 use routing::router::{Route, RouteHop};
 use ln::msgs;
@@ -57,13 +57,14 @@ use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEv
 use util::{byte_utils, events};
 use util::ser::{Readable, ReadableArgs, MaybeReadable, Writeable, Writer};
 use util::chacha20::{ChaCha20, ChaChaReader};
-use util::logger::Logger;
+use util::logger::{Logger, Level};
 use util::errors::APIError;
 
+use io;
 use prelude::*;
 use core::{cmp, mem};
 use core::cell::RefCell;
-use std::io::{Cursor, Read};
+use io::{Cursor, Read};
 use sync::{Arc, Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard};
 use core::sync::atomic::{AtomicUsize, Ordering};
 use core::time::Duration;
@@ -207,6 +208,14 @@ pub(super) enum HTLCFailReason {
        }
 }
 
+/// Return value for claim_funds_from_hop
+enum ClaimFundsFromHop {
+       PrevHopForceClosed,
+       MonitorUpdateFail(PublicKey, MsgHandleErrInternal, Option<u64>),
+       Success(u64),
+       DuplicateClaim,
+}
+
 type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>);
 
 /// Error type returned across the channel_state mutex boundary. When an Err is generated for a
@@ -1465,10 +1474,11 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
 
                        // OUR PAYMENT!
                        // final_expiry_too_soon
-                       // We have to have some headroom to broadcast on chain if we have the preimage, so make sure we have at least
-                       // HTLC_FAIL_BACK_BUFFER blocks to go.
-                       // Also, ensure that, in the case of an unknown payment hash, our payment logic has enough time to fail the HTLC backward
-                       // before our onchain logic triggers a channel closure (see HTLC_FAIL_BACK_BUFFER rational).
+                       // We have to have some headroom to broadcast on chain if we have the preimage, so make sure
+                       // we have at least HTLC_FAIL_BACK_BUFFER blocks to go.
+                       // Also, ensure that, in the case of an unknown preimage for the received payment hash, our
+                       // payment logic has enough time to fail the HTLC backward before our onchain logic triggers a
+                       // channel closure (see HTLC_FAIL_BACK_BUFFER rationale).
                        if (msg.cltv_expiry as u64) <= self.best_block.read().unwrap().height() as u64 + HTLC_FAIL_BACK_BUFFER as u64 + 1 {
                                return_err!("The final CLTV expiry is too soon to handle", 17, &[0;0]);
                        }
@@ -2786,16 +2796,22 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                         HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_height_data });
                                } else {
                                        match self.claim_funds_from_hop(channel_state.as_mut().unwrap(), htlc.prev_hop, payment_preimage) {
-                                               Err(Some(e)) => {
-                                                       if let msgs::ErrorAction::IgnoreError = e.1.err.action {
+                                               ClaimFundsFromHop::MonitorUpdateFail(pk, err, _) => {
+                                                       if let msgs::ErrorAction::IgnoreError = err.err.action {
                                                                // We got a temporary failure updating monitor, but will claim the
                                                                // HTLC when the monitor updating is restored (or on chain).
-                                                               log_error!(self.logger, "Temporary failure claiming HTLC, treating as success: {}", e.1.err.err);
+                                                               log_error!(self.logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err);
                                                                claimed_any_htlcs = true;
-                                                       } else { errs.push(e); }
+                                                       } else { errs.push((pk, err)); }
+                                               },
+                                               ClaimFundsFromHop::PrevHopForceClosed => unreachable!("We already checked for channel existence, we can't fail here!"),
+                                               ClaimFundsFromHop::DuplicateClaim => {
+                                                       // While we should never get here in most cases, if we do, it likely
+                                                       // indicates that the HTLC was timed out some time ago and is no longer
+                                                       // available to be claimed. Thus, it does not make sense to set
+                                                       // `claimed_any_htlcs`.
                                                },
-                                               Err(None) => unreachable!("We already checked for channel existence, we can't fail here!"),
-                                               Ok(()) => claimed_any_htlcs = true,
+                                               ClaimFundsFromHop::Success(_) => claimed_any_htlcs = true,
                                        }
                                }
                        }
@@ -2813,62 +2829,68 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                } else { false }
        }
 
-       fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard<ChannelHolder<Signer>>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> Result<(), Option<(PublicKey, MsgHandleErrInternal)>> {
+       fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard<ChannelHolder<Signer>>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> ClaimFundsFromHop {
                //TODO: Delay the claimed_funds relaying just like we do outbound relay!
                let channel_state = &mut **channel_state_lock;
                let chan_id = match channel_state.short_to_id.get(&prev_hop.short_channel_id) {
                        Some(chan_id) => chan_id.clone(),
                        None => {
-                               return Err(None)
+                               return ClaimFundsFromHop::PrevHopForceClosed
                        }
                };
 
                if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(chan_id) {
-                       let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update();
                        match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) {
-                               Ok((msgs, monitor_option)) => {
-                                       if let Some(monitor_update) = monitor_option {
+                               Ok(msgs_monitor_option) => {
+                                       if let UpdateFulfillCommitFetch::NewClaim { msgs, htlc_value_msat, monitor_update } = msgs_monitor_option {
                                                if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
-                                                       if was_frozen_for_monitor {
-                                                               assert!(msgs.is_none());
-                                                       } else {
-                                                               return Err(Some((chan.get().get_counterparty_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err())));
-                                                       }
+                                                       log_given_level!(self.logger, if e == ChannelMonitorUpdateErr::PermanentFailure { Level::Error } else { Level::Debug },
+                                                               "Failed to update channel monitor with preimage {:?}: {:?}",
+                                                               payment_preimage, e);
+                                                       return ClaimFundsFromHop::MonitorUpdateFail(
+                                                               chan.get().get_counterparty_node_id(),
+                                                               handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(),
+                                                               Some(htlc_value_msat)
+                                                       );
                                                }
+                                               if let Some((msg, commitment_signed)) = msgs {
+                                                       log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}",
+                                                               log_bytes!(payment_preimage.0), log_bytes!(chan.get().channel_id()));
+                                                       channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+                                                               node_id: chan.get().get_counterparty_node_id(),
+                                                               updates: msgs::CommitmentUpdate {
+                                                                       update_add_htlcs: Vec::new(),
+                                                                       update_fulfill_htlcs: vec![msg],
+                                                                       update_fail_htlcs: Vec::new(),
+                                                                       update_fail_malformed_htlcs: Vec::new(),
+                                                                       update_fee: None,
+                                                                       commitment_signed,
+                                                               }
+                                                       });
+                                               }
+                                               return ClaimFundsFromHop::Success(htlc_value_msat);
+                                       } else {
+                                               return ClaimFundsFromHop::DuplicateClaim;
                                        }
-                                       if let Some((msg, commitment_signed)) = msgs {
-                                               log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}",
-                                                       log_bytes!(payment_preimage.0), log_bytes!(chan.get().channel_id()));
-                                               channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
-                                                       node_id: chan.get().get_counterparty_node_id(),
-                                                       updates: msgs::CommitmentUpdate {
-                                                               update_add_htlcs: Vec::new(),
-                                                               update_fulfill_htlcs: vec![msg],
-                                                               update_fail_htlcs: Vec::new(),
-                                                               update_fail_malformed_htlcs: Vec::new(),
-                                                               update_fee: None,
-                                                               commitment_signed,
-                                                       }
-                                               });
-                                       }
-                                       return Ok(())
                                },
-                               Err(e) => {
-                                       // TODO: Do something with e?
-                                       // This should only occur if we are claiming an HTLC at the same time as the
-                                       // HTLC is being failed (eg because a block is being connected and this caused
-                                       // an HTLC to time out). This should, of course, only occur if the user is the
-                                       // one doing the claiming (as it being a part of a peer claim would imply we're
-                                       // about to lose funds) and only if the lock in claim_funds was dropped as a
-                                       // previous HTLC was failed (thus not for an MPP payment).
-                                       debug_assert!(false, "This shouldn't be reachable except in absurdly rare cases between monitor updates and HTLC timeouts: {:?}", e);
-                                       return Err(None)
+                               Err((e, monitor_update)) => {
+                                       if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
+                                               log_given_level!(self.logger, if e == ChannelMonitorUpdateErr::PermanentFailure { Level::Error } else { Level::Info },
+                                                       "Failed to update channel monitor with preimage {:?} immediately prior to force-close: {:?}",
+                                                       payment_preimage, e);
+                                       }
+                                       let counterparty_node_id = chan.get().get_counterparty_node_id();
+                                       let (drop, res) = convert_chan_err!(self, e, channel_state.short_to_id, chan.get_mut(), &chan_id);
+                                       if drop {
+                                               chan.remove_entry();
+                                       }
+                                       return ClaimFundsFromHop::MonitorUpdateFail(counterparty_node_id, res, None);
                                },
                        }
                } else { unreachable!(); }
        }
 
-       fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<Signer>>, source: HTLCSource, payment_preimage: PaymentPreimage) {
+       fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<Signer>>, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option<u64>, from_onchain: bool) {
                match source {
                        HTLCSource::OutboundRoute { session_priv, .. } => {
                                mem::drop(channel_state_lock);
@@ -2887,29 +2909,51 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        },
                        HTLCSource::PreviousHopData(hop_data) => {
                                let prev_outpoint = hop_data.outpoint;
-                               if let Err((counterparty_node_id, err)) = match self.claim_funds_from_hop(&mut channel_state_lock, hop_data, payment_preimage) {
-                                       Ok(()) => Ok(()),
-                                       Err(None) => {
-                                               let preimage_update = ChannelMonitorUpdate {
-                                                       update_id: CLOSED_CHANNEL_UPDATE_ID,
-                                                       updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
-                                                               payment_preimage: payment_preimage.clone(),
-                                                       }],
-                                               };
-                                               // We update the ChannelMonitor on the backward link, after
-                                               // receiving an offchain preimage event from the forward link (the
-                                               // event being update_fulfill_htlc).
-                                               if let Err(e) = self.chain_monitor.update_channel(prev_outpoint, preimage_update) {
-                                                       log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
-                                                                  payment_preimage, e);
-                                               }
-                                               Ok(())
-                                       },
-                                       Err(Some(res)) => Err(res),
-                               } {
-                                       mem::drop(channel_state_lock);
-                                       let res: Result<(), _> = Err(err);
-                                       let _ = handle_error!(self, res, counterparty_node_id);
+                               let res = self.claim_funds_from_hop(&mut channel_state_lock, hop_data, payment_preimage);
+                               let claimed_htlc = if let ClaimFundsFromHop::DuplicateClaim = res { false } else { true };
+                               let htlc_claim_value_msat = match res {
+                                       ClaimFundsFromHop::MonitorUpdateFail(_, _, amt_opt) => amt_opt,
+                                       ClaimFundsFromHop::Success(amt) => Some(amt),
+                                       _ => None,
+                               };
+                               if let ClaimFundsFromHop::PrevHopForceClosed = res {
+                                       let preimage_update = ChannelMonitorUpdate {
+                                               update_id: CLOSED_CHANNEL_UPDATE_ID,
+                                               updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
+                                                       payment_preimage: payment_preimage.clone(),
+                                               }],
+                                       };
+                                       // We update the ChannelMonitor on the backward link, after
+                                       // receiving an offchain preimage event from the forward link (the
+                                       // event being update_fulfill_htlc).
+                                       if let Err(e) = self.chain_monitor.update_channel(prev_outpoint, preimage_update) {
+                                               log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
+                                                                                        payment_preimage, e);
+                                       }
+                                       // Note that we do *not* set `claimed_htlc` to false here. In fact, this
+                                       // totally could be a duplicate claim, but we have no way of knowing
+                                       // without interrogating the `ChannelMonitor` we've provided the above
+                                       // update to. Instead, we simply document in `PaymentForwarded` that this
+                                       // can happen.
+                               }
+                               mem::drop(channel_state_lock);
+                               if let ClaimFundsFromHop::MonitorUpdateFail(pk, err, _) = res {
+                                       let result: Result<(), _> = Err(err);
+                                       let _ = handle_error!(self, result, pk);
+                               }
+
+                               if claimed_htlc {
+                                       if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
+                                               let fee_earned_msat = if let Some(claimed_htlc_value) = htlc_claim_value_msat {
+                                                       Some(claimed_htlc_value - forwarded_htlc_value)
+                                               } else { None };
+
+                                               let mut pending_events = self.pending_events.lock().unwrap();
+                                               pending_events.push(events::Event::PaymentForwarded {
+                                                       fee_earned_msat,
+                                                       claim_from_onchain_tx: from_onchain,
+                                               });
+                                       }
                                }
                        },
                }
@@ -3305,7 +3349,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
 
        fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> {
                let mut channel_lock = self.channel_state.lock().unwrap();
-               let htlc_source = {
+               let (htlc_source, forwarded_htlc_value) = {
                        let channel_state = &mut *channel_lock;
                        match channel_state.by_id.entry(msg.channel_id) {
                                hash_map::Entry::Occupied(mut chan) => {
@@ -3317,7 +3361,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                        }
                };
-               self.claim_funds_internal(channel_lock, htlc_source, msg.payment_preimage.clone());
+               self.claim_funds_internal(channel_lock, htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false);
                Ok(())
        }
 
@@ -3684,14 +3728,14 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        /// Process pending events from the `chain::Watch`, returning whether any events were processed.
        fn process_pending_monitor_events(&self) -> bool {
                let mut failed_channels = Vec::new();
-               let pending_monitor_events = self.chain_monitor.release_pending_monitor_events();
+               let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events();
                let has_pending_monitor_events = !pending_monitor_events.is_empty();
-               for monitor_event in pending_monitor_events {
+               for monitor_event in pending_monitor_events.drain(..) {
                        match monitor_event {
                                MonitorEvent::HTLCEvent(htlc_update) => {
                                        if let Some(preimage) = htlc_update.payment_preimage {
                                                log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
-                                               self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage);
+                                               self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage, htlc_update.onchain_value_satoshis.map(|v| v * 1000), true);
                                        } else {
                                                log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
                                                self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
@@ -3986,7 +4030,7 @@ where
                                result = NotifyOption::DoPersist;
                        }
 
-                       let mut pending_events = std::mem::replace(&mut *self.pending_events.lock().unwrap(), vec![]);
+                       let mut pending_events = mem::replace(&mut *self.pending_events.lock().unwrap(), vec![]);
                        if !pending_events.is_empty() {
                                result = NotifyOption::DoPersist;
                        }
@@ -4606,7 +4650,7 @@ impl_writeable_tlv_based!(HTLCPreviousHopData, {
 });
 
 impl Writeable for ClaimableHTLC {
-       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
                let payment_data = match &self.onion_payload {
                        OnionPayload::Invoice(data) => Some(data.clone()),
                        _ => None,
@@ -4710,7 +4754,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable f
         F::Target: FeeEstimator,
         L::Target: Logger,
 {
-       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
                let _consistency_lock = self.total_consistency_lock.write().unwrap();
 
                write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
@@ -4906,7 +4950,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
         F::Target: FeeEstimator,
         L::Target: Logger,
 {
-       fn read<R: ::std::io::Read>(reader: &mut R, args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result<Self, DecodeError> {
+       fn read<R: io::Read>(reader: &mut R, args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result<Self, DecodeError> {
                let (blockhash, chan_manager) = <(BlockHash, ChannelManager<Signer, M, T, K, F, L>)>::read(reader, args)?;
                Ok((blockhash, Arc::new(chan_manager)))
        }
@@ -4920,7 +4964,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
         F::Target: FeeEstimator,
         L::Target: Logger,
 {
-       fn read<R: ::std::io::Read>(reader: &mut R, mut args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result<Self, DecodeError> {
+       fn read<R: io::Read>(reader: &mut R, mut args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result<Self, DecodeError> {
                let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
 
                let genesis_hash: BlockHash = Readable::read(reader)?;
@@ -5108,18 +5152,26 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
 
 #[cfg(test)]
 mod tests {
-       use ln::channelmanager::PersistenceNotifier;
-       use sync::Arc;
-       use core::sync::atomic::{AtomicBool, Ordering};
-       use std::thread;
+       use bitcoin::hashes::Hash;
+       use bitcoin::hashes::sha256::Hash as Sha256;
        use core::time::Duration;
+       use ln::{PaymentPreimage, PaymentHash, PaymentSecret};
+       use ln::features::{InitFeatures, InvoiceFeatures};
        use ln::functional_test_utils::*;
-       use ln::features::InitFeatures;
+       use ln::msgs;
        use ln::msgs::ChannelMessageHandler;
+       use routing::router::{get_keysend_route, get_route};
+       use util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
+       use util::test_utils;
 
        #[cfg(feature = "std")]
        #[test]
        fn test_wait_timeout() {
+               use ln::channelmanager::PersistenceNotifier;
+               use sync::Arc;
+               use core::sync::atomic::{AtomicBool, Ordering};
+               use std::thread;
+
                let persistence_notifier = Arc::new(PersistenceNotifier::new());
                let thread_notifier = Arc::clone(&persistence_notifier);
 
@@ -5169,6 +5221,12 @@ mod tests {
                let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
                let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
 
+               // All nodes start with a persistable update pending as `create_network` connects each node
+               // with all other nodes to make most tests simpler.
+               assert!(nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
+               assert!(nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
+               assert!(nodes[2].node.await_persistable_update_timeout(Duration::from_millis(1)));
+
                let mut chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
 
                // We check that the channel info nodes have doesn't change too early, even though we try
@@ -5231,6 +5289,265 @@ mod tests {
                assert_ne!(nodes[0].node.list_channels()[0], node_a_chan_info);
                assert_ne!(nodes[1].node.list_channels()[0], node_b_chan_info);
        }
+
+       #[test]
+       fn test_keysend_dup_hash_partial_mpp() {
+               // Test that a keysend payment with a duplicate hash to an existing partial MPP payment fails as
+               // expected.
+               let chanmon_cfgs = create_chanmon_cfgs(2);
+               let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+               let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+               create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+               let logger = test_utils::TestLogger::new();
+
+               // First, send a partial MPP payment.
+               let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
+               let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap();
+               let (payment_preimage, our_payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[1]);
+               // Use the utility function send_payment_along_path to send the payment with MPP data which
+               // indicates there are more HTLCs coming.
+               let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match.
+               nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, &None).unwrap();
+               check_added_monitors!(nodes[0], 1);
+               let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(events.len(), 1);
+               pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None);
+
+               // Next, send a keysend payment with the same payment_hash and make sure it fails.
+               nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap();
+               check_added_monitors!(nodes[0], 1);
+               let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(events.len(), 1);
+               let ev = events.drain(..).next().unwrap();
+               let payment_event = SendEvent::from_event(ev);
+               nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
+               check_added_monitors!(nodes[1], 0);
+               commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
+               expect_pending_htlcs_forwardable!(nodes[1]);
+               expect_pending_htlcs_forwardable!(nodes[1]);
+               check_added_monitors!(nodes[1], 1);
+               let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+               assert!(updates.update_add_htlcs.is_empty());
+               assert!(updates.update_fulfill_htlcs.is_empty());
+               assert_eq!(updates.update_fail_htlcs.len(), 1);
+               assert!(updates.update_fail_malformed_htlcs.is_empty());
+               assert!(updates.update_fee.is_none());
+               nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
+               commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
+               expect_payment_failed!(nodes[0], our_payment_hash, true);
+
+               // Send the second half of the original MPP payment.
+               nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, &None).unwrap();
+               check_added_monitors!(nodes[0], 1);
+               let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(events.len(), 1);
+               pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), true, None);
+
+               // Claim the full MPP payment. Note that we can't use a test utility like
+               // claim_funds_along_route because the ordering of the messages causes the second half of the
+               // payment to be put in the holding cell, which confuses the test utilities. So we exchange the
+               // lightning messages manually.
+               assert!(nodes[1].node.claim_funds(payment_preimage));
+               check_added_monitors!(nodes[1], 2);
+               let bs_first_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+               nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_first_updates.update_fulfill_htlcs[0]);
+               nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_first_updates.commitment_signed);
+               check_added_monitors!(nodes[0], 1);
+               let (as_first_raa, as_first_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+               nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_first_raa);
+               check_added_monitors!(nodes[1], 1);
+               let bs_second_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+               nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_first_cs);
+               check_added_monitors!(nodes[1], 1);
+               let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
+               nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_second_updates.update_fulfill_htlcs[0]);
+               nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_updates.commitment_signed);
+               check_added_monitors!(nodes[0], 1);
+               let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+               nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa);
+               let as_second_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+               check_added_monitors!(nodes[0], 1);
+               nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
+               check_added_monitors!(nodes[1], 1);
+               nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_updates.commitment_signed);
+               check_added_monitors!(nodes[1], 1);
+               let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
+               nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_third_raa);
+               check_added_monitors!(nodes[0], 1);
+
+               // There's an existing bug that generates a PaymentSent event for each MPP path, so handle that here.
+               let events = nodes[0].node.get_and_clear_pending_events();
+               match events[0] {
+                       Event::PaymentSent { payment_preimage: ref preimage } => {
+                               assert_eq!(payment_preimage, *preimage);
+                       },
+                       _ => panic!("Unexpected event"),
+               }
+               match events[1] {
+                       Event::PaymentSent { payment_preimage: ref preimage } => {
+                               assert_eq!(payment_preimage, *preimage);
+                       },
+                       _ => panic!("Unexpected event"),
+               }
+       }
+
+       #[test]
+       fn test_keysend_dup_payment_hash() {
+               // (1): Test that a keysend payment with a duplicate payment hash to an existing pending
+               //      outbound regular payment fails as expected.
+               // (2): Test that a regular payment with a duplicate payment hash to an existing keysend payment
+               //      fails as expected.
+               let chanmon_cfgs = create_chanmon_cfgs(2);
+               let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+               let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+               create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+               let logger = test_utils::TestLogger::new();
+
+               // To start (1), send a regular payment but don't claim it.
+               let expected_route = [&nodes[1]];
+               let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &expected_route, 100_000);
+
+               // Next, attempt a keysend payment and make sure it fails.
+               let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph.read().unwrap(), &expected_route.last().unwrap().node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap();
+               nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap();
+               check_added_monitors!(nodes[0], 1);
+               let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(events.len(), 1);
+               let ev = events.drain(..).next().unwrap();
+               let payment_event = SendEvent::from_event(ev);
+               nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
+               check_added_monitors!(nodes[1], 0);
+               commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
+               expect_pending_htlcs_forwardable!(nodes[1]);
+               expect_pending_htlcs_forwardable!(nodes[1]);
+               check_added_monitors!(nodes[1], 1);
+               let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+               assert!(updates.update_add_htlcs.is_empty());
+               assert!(updates.update_fulfill_htlcs.is_empty());
+               assert_eq!(updates.update_fail_htlcs.len(), 1);
+               assert!(updates.update_fail_malformed_htlcs.is_empty());
+               assert!(updates.update_fee.is_none());
+               nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
+               commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
+               expect_payment_failed!(nodes[0], payment_hash, true);
+
+               // Finally, claim the original payment.
+               claim_payment(&nodes[0], &expected_route, payment_preimage);
+
+               // To start (2), send a keysend payment but don't claim it.
+               let payment_preimage = PaymentPreimage([42; 32]);
+               let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph.read().unwrap(), &expected_route.last().unwrap().node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap();
+               let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap();
+               check_added_monitors!(nodes[0], 1);
+               let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(events.len(), 1);
+               let event = events.pop().unwrap();
+               let path = vec![&nodes[1]];
+               pass_along_path(&nodes[0], &path, 100_000, payment_hash, None, event, true, Some(payment_preimage));
+
+               // Next, attempt a regular payment and make sure it fails.
+               let payment_secret = PaymentSecret([43; 32]);
+               nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
+               check_added_monitors!(nodes[0], 1);
+               let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(events.len(), 1);
+               let ev = events.drain(..).next().unwrap();
+               let payment_event = SendEvent::from_event(ev);
+               nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
+               check_added_monitors!(nodes[1], 0);
+               commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
+               expect_pending_htlcs_forwardable!(nodes[1]);
+               expect_pending_htlcs_forwardable!(nodes[1]);
+               check_added_monitors!(nodes[1], 1);
+               let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+               assert!(updates.update_add_htlcs.is_empty());
+               assert!(updates.update_fulfill_htlcs.is_empty());
+               assert_eq!(updates.update_fail_htlcs.len(), 1);
+               assert!(updates.update_fail_malformed_htlcs.is_empty());
+               assert!(updates.update_fee.is_none());
+               nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
+               commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
+               expect_payment_failed!(nodes[0], payment_hash, true);
+
+               // Finally, succeed the keysend payment.
+               claim_payment(&nodes[0], &expected_route, payment_preimage);
+       }
+
+       #[test]
+       fn test_keysend_hash_mismatch() {
+               // Test that if we receive a keysend `update_add_htlc` msg, we fail as expected if the keysend
+               // preimage doesn't match the msg's payment hash.
+               let chanmon_cfgs = create_chanmon_cfgs(2);
+               let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+               let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+               let payer_pubkey = nodes[0].node.get_our_node_id();
+               let payee_pubkey = nodes[1].node.get_our_node_id();
+               nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known() });
+               nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known() });
+
+               let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known());
+               let network_graph = nodes[0].net_graph_msg_handler.network_graph.read().unwrap();
+               let first_hops = nodes[0].node.list_usable_channels();
+               let route = get_keysend_route(&payer_pubkey, &network_graph, &payee_pubkey,
+                                  Some(&first_hops.iter().collect::<Vec<_>>()), &vec![], 10000, 40,
+                                  nodes[0].logger).unwrap();
+
+               let test_preimage = PaymentPreimage([42; 32]);
+               let mismatch_payment_hash = PaymentHash([43; 32]);
+               let _ = nodes[0].node.send_payment_internal(&route, mismatch_payment_hash, &None, Some(test_preimage)).unwrap();
+               check_added_monitors!(nodes[0], 1);
+
+               let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+               assert_eq!(updates.update_add_htlcs.len(), 1);
+               assert!(updates.update_fulfill_htlcs.is_empty());
+               assert!(updates.update_fail_htlcs.is_empty());
+               assert!(updates.update_fail_malformed_htlcs.is_empty());
+               assert!(updates.update_fee.is_none());
+               nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
+
+               nodes[1].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Payment preimage didn't match payment hash".to_string(), 1);
+       }
+
+       #[test]
+       fn test_keysend_msg_with_secret_err() {
+               // Test that we error as expected if we receive a keysend payment that includes a payment secret.
+               let chanmon_cfgs = create_chanmon_cfgs(2);
+               let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+               let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+               let payer_pubkey = nodes[0].node.get_our_node_id();
+               let payee_pubkey = nodes[1].node.get_our_node_id();
+               nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known() });
+               nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known() });
+
+               let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known());
+               let network_graph = nodes[0].net_graph_msg_handler.network_graph.read().unwrap();
+               let first_hops = nodes[0].node.list_usable_channels();
+               let route = get_keysend_route(&payer_pubkey, &network_graph, &payee_pubkey,
+                                  Some(&first_hops.iter().collect::<Vec<_>>()), &vec![], 10000, 40,
+                                  nodes[0].logger).unwrap();
+
+               let test_preimage = PaymentPreimage([42; 32]);
+               let test_secret = PaymentSecret([43; 32]);
+               let payment_hash = PaymentHash(Sha256::hash(&test_preimage.0).into_inner());
+               let _ = nodes[0].node.send_payment_internal(&route, payment_hash, &Some(test_secret), Some(test_preimage)).unwrap();
+               check_added_monitors!(nodes[0], 1);
+
+               let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+               assert_eq!(updates.update_add_htlcs.len(), 1);
+               assert!(updates.update_fulfill_htlcs.is_empty());
+               assert!(updates.update_fail_htlcs.is_empty());
+               assert!(updates.update_fail_malformed_htlcs.is_empty());
+               assert!(updates.update_fee.is_none());
+               nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
+
+               nodes[1].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "We don't support MPP keysend payments".to_string(), 1);
+       }
 }
 
 #[cfg(all(any(test, feature = "_test_utils"), feature = "unstable"))]