Explicitly support counterparty setting 0 channel reserve
[rust-lightning] / lightning / src / ln / channelmanager.rs
index a0ffe263c84e492773bc8d79b2b9ccfe90910095..0eaf244d0e47653cc04b2cd0c508f519820f5c78 100644 (file)
@@ -36,16 +36,16 @@ use bitcoin::secp256k1::ecdh::SharedSecret;
 use bitcoin::secp256k1;
 
 use chain;
-use chain::{Confirm, Watch, BestBlock};
+use chain::{Confirm, ChannelMonitorUpdateErr, Watch, BestBlock};
 use chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator};
-use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, ChannelMonitorUpdateErr, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID};
+use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID};
 use chain::transaction::{OutPoint, TransactionData};
 // Since this struct is returned in `list_channels` methods, expose it here in case users want to
 // construct one themselves.
 use ln::{PaymentHash, PaymentPreimage, PaymentSecret};
 use ln::channel::{Channel, ChannelError, ChannelUpdateStatus, UpdateFulfillCommitFetch};
 use ln::features::{InitFeatures, NodeFeatures};
-use routing::router::{Route, RouteHop};
+use routing::router::{Payee, Route, RouteHop, RoutePath, RouteParameters};
 use ln::msgs;
 use ln::msgs::NetAddress;
 use ln::onion_utils;
@@ -145,7 +145,7 @@ pub(super) enum HTLCForwardInfo {
 }
 
 /// Tracks the inbound corresponding to an outbound HTLC
-#[derive(Clone, PartialEq)]
+#[derive(Clone, Hash, PartialEq, Eq)]
 pub(crate) struct HTLCPreviousHopData {
        short_channel_id: u64,
        htlc_id: u64,
@@ -172,24 +172,26 @@ struct ClaimableHTLC {
        onion_payload: OnionPayload,
 }
 
-/// A payment identifier used to correlate an MPP payment's per-path HTLC sources internally.
+/// A payment identifier used to uniquely identify a payment to LDK.
+/// (C-not exported) as we just use [u8; 32] directly
 #[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
-pub(crate) struct MppId(pub [u8; 32]);
+pub struct PaymentId(pub [u8; 32]);
 
-impl Writeable for MppId {
+impl Writeable for PaymentId {
        fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                self.0.write(w)
        }
 }
 
-impl Readable for MppId {
+impl Readable for PaymentId {
        fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
                let buf: [u8; 32] = Readable::read(r)?;
-               Ok(MppId(buf))
+               Ok(PaymentId(buf))
        }
 }
 /// Tracks the inbound corresponding to an outbound HTLC
-#[derive(Clone, PartialEq)]
+#[allow(clippy::derive_hash_xor_eq)] // Our Hash is faithful to the data, we just don't have SecretKey::hash
+#[derive(Clone, PartialEq, Eq)]
 pub(crate) enum HTLCSource {
        PreviousHopData(HTLCPreviousHopData),
        OutboundRoute {
@@ -198,9 +200,31 @@ pub(crate) enum HTLCSource {
                /// Technically we can recalculate this from the route, but we cache it here to avoid
                /// doing a double-pass on route when we get a failure back
                first_hop_htlc_msat: u64,
-               mpp_id: MppId,
+               payment_id: PaymentId,
+               payment_secret: Option<PaymentSecret>,
+               payee: Option<Payee>,
        },
 }
+#[allow(clippy::derive_hash_xor_eq)] // Our Hash is faithful to the data, we just don't have SecretKey::hash
+impl core::hash::Hash for HTLCSource {
+       fn hash<H: core::hash::Hasher>(&self, hasher: &mut H) {
+               match self {
+                       HTLCSource::PreviousHopData(prev_hop_data) => {
+                               0u8.hash(hasher);
+                               prev_hop_data.hash(hasher);
+                       },
+                       HTLCSource::OutboundRoute { path, session_priv, payment_id, payment_secret, first_hop_htlc_msat, payee } => {
+                               1u8.hash(hasher);
+                               path.hash(hasher);
+                               session_priv[..].hash(hasher);
+                               payment_id.hash(hasher);
+                               payment_secret.hash(hasher);
+                               first_hop_htlc_msat.hash(hasher);
+                               payee.hash(hasher);
+                       },
+               }
+       }
+}
 #[cfg(test)]
 impl HTLCSource {
        pub fn dummy() -> Self {
@@ -208,7 +232,9 @@ impl HTLCSource {
                        path: Vec::new(),
                        session_priv: SecretKey::from_slice(&[1; 32]).unwrap(),
                        first_hop_htlc_msat: 0,
-                       mpp_id: MppId([2; 32]),
+                       payment_id: PaymentId([2; 32]),
+                       payment_secret: None,
+                       payee: None,
                }
        }
 }
@@ -242,7 +268,7 @@ type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource
 
 struct MsgHandleErrInternal {
        err: msgs::LightningError,
-       chan_id: Option<[u8; 32]>, // If Some a channel of ours has been closed
+       chan_id: Option<([u8; 32], u64)>, // If Some a channel of ours has been closed
        shutdown_finish: Option<(ShutdownResult, Option<msgs::ChannelUpdate>)>,
 }
 impl MsgHandleErrInternal {
@@ -278,7 +304,7 @@ impl MsgHandleErrInternal {
                Self { err, chan_id: None, shutdown_finish: None }
        }
        #[inline]
-       fn from_finish_shutdown(err: String, channel_id: [u8; 32], shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>) -> Self {
+       fn from_finish_shutdown(err: String, channel_id: [u8; 32], user_channel_id: u64, shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>) -> Self {
                Self {
                        err: LightningError {
                                err: err.clone(),
@@ -289,7 +315,7 @@ impl MsgHandleErrInternal {
                                        },
                                },
                        },
-                       chan_id: Some(channel_id),
+                       chan_id: Some((channel_id, user_channel_id)),
                        shutdown_finish: Some((shutdown_res, channel_update)),
                }
        }
@@ -387,6 +413,9 @@ struct PeerState {
 ///
 /// For users who don't want to bother doing their own payment preimage storage, we also store that
 /// here.
+///
+/// Note that this struct will be removed entirely soon, in favor of storing no inbound payment data
+/// and instead encoding it in the payment secret.
 struct PendingInboundPayment {
        /// The payment secret that the sender must use for us to accept this payment
        payment_secret: PaymentSecret,
@@ -400,6 +429,126 @@ struct PendingInboundPayment {
        min_value_msat: Option<u64>,
 }
 
+/// Stores the session_priv for each part of a payment that is still pending. For versions 0.0.102
+/// and later, also stores information for retrying the payment.
+pub(crate) enum PendingOutboundPayment {
+       Legacy {
+               session_privs: HashSet<[u8; 32]>,
+       },
+       Retryable {
+               session_privs: HashSet<[u8; 32]>,
+               payment_hash: PaymentHash,
+               payment_secret: Option<PaymentSecret>,
+               pending_amt_msat: u64,
+               /// Used to track the fee paid. Only present if the payment was serialized on 0.0.103+.
+               pending_fee_msat: Option<u64>,
+               /// The total payment amount across all paths, used to verify that a retry is not overpaying.
+               total_msat: u64,
+               /// Our best known block height at the time this payment was initiated.
+               starting_block_height: u32,
+       },
+       /// When a pending payment is fulfilled, we continue tracking it until all pending HTLCs have
+       /// been resolved. This ensures we don't look up pending payments in ChannelMonitors on restart
+       /// and add a pending payment that was already fulfilled.
+       Fulfilled {
+               session_privs: HashSet<[u8; 32]>,
+               payment_hash: Option<PaymentHash>,
+       },
+}
+
+impl PendingOutboundPayment {
+       fn is_retryable(&self) -> bool {
+               match self {
+                       PendingOutboundPayment::Retryable { .. } => true,
+                       _ => false,
+               }
+       }
+       fn is_fulfilled(&self) -> bool {
+               match self {
+                       PendingOutboundPayment::Fulfilled { .. } => true,
+                       _ => false,
+               }
+       }
+       fn get_pending_fee_msat(&self) -> Option<u64> {
+               match self {
+                       PendingOutboundPayment::Retryable { pending_fee_msat, .. } => pending_fee_msat.clone(),
+                       _ => None,
+               }
+       }
+
+       fn payment_hash(&self) -> Option<PaymentHash> {
+               match self {
+                       PendingOutboundPayment::Legacy { .. } => None,
+                       PendingOutboundPayment::Retryable { payment_hash, .. } => Some(*payment_hash),
+                       PendingOutboundPayment::Fulfilled { payment_hash, .. } => *payment_hash,
+               }
+       }
+
+       fn mark_fulfilled(&mut self) {
+               let mut session_privs = HashSet::new();
+               core::mem::swap(&mut session_privs, match self {
+                       PendingOutboundPayment::Legacy { session_privs } |
+                       PendingOutboundPayment::Retryable { session_privs, .. } |
+                       PendingOutboundPayment::Fulfilled { session_privs, .. }
+                               => session_privs
+               });
+               let payment_hash = self.payment_hash();
+               *self = PendingOutboundPayment::Fulfilled { session_privs, payment_hash };
+       }
+
+       /// panics if path is None and !self.is_fulfilled
+       fn remove(&mut self, session_priv: &[u8; 32], path: Option<&Vec<RouteHop>>) -> bool {
+               let remove_res = match self {
+                       PendingOutboundPayment::Legacy { session_privs } |
+                       PendingOutboundPayment::Retryable { session_privs, .. } |
+                       PendingOutboundPayment::Fulfilled { session_privs, .. } => {
+                               session_privs.remove(session_priv)
+                       }
+               };
+               if remove_res {
+                       if let PendingOutboundPayment::Retryable { ref mut pending_amt_msat, ref mut pending_fee_msat, .. } = self {
+                               let path = path.expect("Fulfilling a payment should always come with a path");
+                               let path_last_hop = path.last().expect("Outbound payments must have had a valid path");
+                               *pending_amt_msat -= path_last_hop.fee_msat;
+                               if let Some(fee_msat) = pending_fee_msat.as_mut() {
+                                       *fee_msat -= path.get_path_fees();
+                               }
+                       }
+               }
+               remove_res
+       }
+
+       fn insert(&mut self, session_priv: [u8; 32], path: &Vec<RouteHop>) -> bool {
+               let insert_res = match self {
+                       PendingOutboundPayment::Legacy { session_privs } |
+                       PendingOutboundPayment::Retryable { session_privs, .. } => {
+                               session_privs.insert(session_priv)
+                       }
+                       PendingOutboundPayment::Fulfilled { .. } => false
+               };
+               if insert_res {
+                       if let PendingOutboundPayment::Retryable { ref mut pending_amt_msat, ref mut pending_fee_msat, .. } = self {
+                               let path_last_hop = path.last().expect("Outbound payments must have had a valid path");
+                               *pending_amt_msat += path_last_hop.fee_msat;
+                               if let Some(fee_msat) = pending_fee_msat.as_mut() {
+                                       *fee_msat += path.get_path_fees();
+                               }
+                       }
+               }
+               insert_res
+       }
+
+       fn remaining_parts(&self) -> usize {
+               match self {
+                       PendingOutboundPayment::Legacy { session_privs } |
+                       PendingOutboundPayment::Retryable { session_privs, .. } |
+                       PendingOutboundPayment::Fulfilled { session_privs, .. } => {
+                               session_privs.len()
+                       }
+               }
+       }
+}
+
 /// SimpleArcChannelManager is useful when you need a ChannelManager with a static lifetime, e.g.
 /// when you're using lightning-net-tokio (since tokio::spawn requires parameters with static
 /// lifetimes). Other times you can afford a reference, which is more efficient, in which case
@@ -486,7 +635,7 @@ pub struct ChannelManager<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref,
        /// Locked *after* channel_state.
        pending_inbound_payments: Mutex<HashMap<PaymentHash, PendingInboundPayment>>,
 
-       /// The session_priv bytes of outbound payments which are pending resolution.
+       /// The session_priv bytes and retry metadata of outbound payments which are pending resolution.
        /// The authoritative state of these HTLCs resides either within Channels or ChannelMonitors
        /// (if the channel has been force-closed), however we track them here to prevent duplicative
        /// PaymentSent/PaymentPathFailed events. Specifically, in the case of a duplicative
@@ -495,11 +644,10 @@ pub struct ChannelManager<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref,
        /// which may generate a claim event, we may receive similar duplicate claim/fail MonitorEvents
        /// after reloading from disk while replaying blocks against ChannelMonitors.
        ///
-       /// Each payment has each of its MPP part's session_priv bytes in the HashSet of the map (even
-       /// payments over a single path).
+       /// See `PendingOutboundPayment` documentation for more info.
        ///
        /// Locked *after* channel_state.
-       pending_outbound_payments: Mutex<HashMap<MppId, HashSet<[u8; 32]>>>,
+       pending_outbound_payments: Mutex<HashMap<PaymentId, PendingOutboundPayment>>,
 
        our_network_key: SecretKey,
        our_network_pubkey: PublicKey,
@@ -718,8 +866,8 @@ pub struct ChannelDetails {
        ///
        /// [`outbound_capacity_msat`]: ChannelDetails::outbound_capacity_msat
        pub unspendable_punishment_reserve: Option<u64>,
-       /// The user_id passed in to create_channel, or 0 if the channel was inbound.
-       pub user_id: u64,
+       /// The `user_channel_id` passed in to create_channel, or 0 if the channel was inbound.
+       pub user_channel_id: u64,
        /// The available outbound capacity for sending HTLCs to the remote peer. This does not include
        /// any pending HTLCs which are not yet fully resolved (and, thus, who's balance is not
        /// available for inclusion in new outbound HTLCs). This further does not include any pending
@@ -811,7 +959,16 @@ pub enum PaymentSendFailure {
        /// as they will result in over-/re-payment. These HTLCs all either successfully sent (in the
        /// case of Ok(())) or will send once channel_monitor_updated is called on the next-hop channel
        /// with the latest update_id.
-       PartialFailure(Vec<Result<(), APIError>>),
+       PartialFailure {
+               /// The errors themselves, in the same order as the route hops.
+               results: Vec<Result<(), APIError>>,
+               /// If some paths failed without irrevocably committing to the new HTLC(s), this will
+               /// contain a [`RouteParameters`] object which can be used to calculate a new route that
+               /// will pay all remaining unpaid balance.
+               failed_paths_retry: Option<RouteParameters>,
+               /// The payment id for the payment, which is now at least partially pending.
+               payment_id: PaymentId,
+       },
 }
 
 macro_rules! handle_error {
@@ -836,8 +993,11 @@ macro_rules! handle_error {
                                                        msg: update
                                                });
                                        }
-                                       if let Some(channel_id) = chan_id {
-                                               $self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id,  reason: ClosureReason::ProcessingError { err: err.err.clone() } });
+                                       if let Some((channel_id, user_channel_id)) = chan_id {
+                                               $self.pending_events.lock().unwrap().push(events::Event::ChannelClosed {
+                                                       channel_id, user_channel_id,
+                                                       reason: ClosureReason::ProcessingError { err: err.err.clone() }
+                                               });
                                        }
                                }
 
@@ -879,7 +1039,8 @@ macro_rules! convert_chan_err {
                                        $short_to_id.remove(&short_id);
                                }
                                let shutdown_res = $channel.force_shutdown(true);
-                               (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
+                               (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.get_user_id(),
+                                       shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
                        },
                        ChannelError::CloseDelayBroadcast(msg) => {
                                log_error!($self.logger, "Channel {} need to be shutdown but closing transactions not broadcast due to {}", log_bytes!($channel_id[..]), msg);
@@ -887,7 +1048,8 @@ macro_rules! convert_chan_err {
                                        $short_to_id.remove(&short_id);
                                }
                                let shutdown_res = $channel.force_shutdown(false);
-                               (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
+                               (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.get_user_id(),
+                                       shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
                        }
                }
        }
@@ -939,7 +1101,7 @@ macro_rules! handle_monitor_err {
        ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
                handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, Vec::new(), Vec::new())
        };
-       ($self: ident, $err: expr, $short_to_id: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr, $chan_id: expr) => {
+       ($self: ident, $err: expr, $short_to_id: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr, $chan_id: expr) => {
                match $err {
                        ChannelMonitorUpdateErr::PermanentFailure => {
                                log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateErr::PermanentFailure", log_bytes!($chan_id[..]));
@@ -955,12 +1117,12 @@ macro_rules! handle_monitor_err {
                                // splitting hairs we'd prefer to claim payments that were to us, but we haven't
                                // given up the preimage yet, so might as well just wait until the payment is
                                // retried, avoiding the on-chain fees.
-                               let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), *$chan_id,
+                               let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), *$chan_id, $chan.get_user_id(),
                                                $chan.force_shutdown(true), $self.get_channel_update_for_broadcast(&$chan).ok() ));
                                (res, true)
                        },
                        ChannelMonitorUpdateErr::TemporaryFailure => {
-                               log_info!($self.logger, "Disabling channel {} due to monitor update TemporaryFailure. On restore will send {} and process {} forwards and {} fails",
+                               log_info!($self.logger, "Disabling channel {} due to monitor update TemporaryFailure. On restore will send {} and process {} forwards, {} fails, and {} fulfill finalizations",
                                                log_bytes!($chan_id[..]),
                                                if $resend_commitment && $resend_raa {
                                                                match $action_type {
@@ -971,25 +1133,29 @@ macro_rules! handle_monitor_err {
                                                        else if $resend_raa { "RAA" }
                                                        else { "nothing" },
                                                (&$failed_forwards as &Vec<(PendingHTLCInfo, u64)>).len(),
-                                               (&$failed_fails as &Vec<(HTLCSource, PaymentHash, HTLCFailReason)>).len());
+                                               (&$failed_fails as &Vec<(HTLCSource, PaymentHash, HTLCFailReason)>).len(),
+                                               (&$failed_finalized_fulfills as &Vec<HTLCSource>).len());
                                if !$resend_commitment {
                                        debug_assert!($action_type == RAACommitmentOrder::RevokeAndACKFirst || !$resend_raa);
                                }
                                if !$resend_raa {
                                        debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst || !$resend_commitment);
                                }
-                               $chan.monitor_update_failed($resend_raa, $resend_commitment, $failed_forwards, $failed_fails);
+                               $chan.monitor_update_failed($resend_raa, $resend_commitment, $failed_forwards, $failed_fails, $failed_finalized_fulfills);
                                (Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor".to_owned()), *$chan_id)), false)
                        },
                }
        };
-       ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => { {
-               let (res, drop) = handle_monitor_err!($self, $err, $channel_state.short_to_id, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails, $entry.key());
+       ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr) => { {
+               let (res, drop) = handle_monitor_err!($self, $err, $channel_state.short_to_id, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails, $failed_finalized_fulfills, $entry.key());
                if drop {
                        $entry.remove_entry();
                }
                res
        } };
+       ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => {
+               handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails, Vec::new())
+       }
 }
 
 macro_rules! return_monitor_err {
@@ -1209,22 +1375,31 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
 
        /// Creates a new outbound channel to the given remote node and with the given value.
        ///
-       /// user_id will be provided back as user_channel_id in FundingGenerationReady events to allow
-       /// tracking of which events correspond with which create_channel call. Note that the
-       /// user_channel_id defaults to 0 for inbound channels, so you may wish to avoid using 0 for
-       /// user_id here. user_id has no meaning inside of LDK, it is simply copied to events and
-       /// otherwise ignored.
-       ///
-       /// If successful, will generate a SendOpenChannel message event, so you should probably poll
-       /// PeerManager::process_events afterwards.
+       /// `user_channel_id` will be provided back as in
+       /// [`Event::FundingGenerationReady::user_channel_id`] to allow tracking of which events
+       /// correspond with which `create_channel` call. Note that the `user_channel_id` defaults to 0
+       /// for inbound channels, so you may wish to avoid using 0 for `user_channel_id` here.
+       /// `user_channel_id` has no meaning inside of LDK, it is simply copied to events and otherwise
+       /// ignored.
        ///
-       /// Raises APIError::APIMisuseError when channel_value_satoshis > 2**24 or push_msat is
-       /// greater than channel_value_satoshis * 1k or channel_value_satoshis is < 1000.
+       /// Raises [`APIError::APIMisuseError`] when `channel_value_satoshis` > 2**24 or `push_msat` is
+       /// greater than `channel_value_satoshis * 1k` or `channel_value_satoshis < 1000`.
        ///
        /// Note that we do not check if you are currently connected to the given peer. If no
        /// connection is available, the outbound `open_channel` message may fail to send, resulting in
-       /// the channel eventually being silently forgotten.
-       pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_id: u64, override_config: Option<UserConfig>) -> Result<(), APIError> {
+       /// the channel eventually being silently forgotten (dropped on reload).
+       ///
+       /// Returns the new Channel's temporary `channel_id`. This ID will appear as
+       /// [`Event::FundingGenerationReady::temporary_channel_id`] and in
+       /// [`ChannelDetails::channel_id`] until after
+       /// [`ChannelManager::funding_transaction_generated`] is called, swapping the Channel's ID for
+       /// one derived from the funding transaction's TXID. If the counterparty rejects the channel
+       /// immediately, this temporary ID will appear in [`Event::ChannelClosed::channel_id`].
+       ///
+       /// [`Event::FundingGenerationReady::user_channel_id`]: events::Event::FundingGenerationReady::user_channel_id
+       /// [`Event::FundingGenerationReady::temporary_channel_id`]: events::Event::FundingGenerationReady::temporary_channel_id
+       /// [`Event::ChannelClosed::channel_id`]: events::Event::ChannelClosed::channel_id
+       pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_channel_id: u64, override_config: Option<UserConfig>) -> Result<[u8; 32], APIError> {
                if channel_value_satoshis < 1000 {
                        return Err(APIError::APIMisuseError { err: format!("Channel value must be at least 1000 satoshis. It was {}", channel_value_satoshis) });
                }
@@ -1236,7 +1411,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        let peer_state = peer_state.lock().unwrap();
                                        let their_features = &peer_state.latest_features;
                                        let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration };
-                                       Channel::new_outbound(&self.fee_estimator, &self.keys_manager, their_network_key, their_features, channel_value_satoshis, push_msat, user_id, config)?
+                                       Channel::new_outbound(&self.fee_estimator, &self.keys_manager, their_network_key, their_features,
+                                               channel_value_satoshis, push_msat, user_channel_id, config, self.best_block.read().unwrap().height())?
                                },
                                None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", their_network_key) }),
                        }
@@ -1247,8 +1423,9 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                // We want to make sure the lock is actually acquired by PersistenceNotifierGuard.
                debug_assert!(&self.total_consistency_lock.try_write().is_err());
 
+               let temporary_channel_id = channel.channel_id();
                let mut channel_state = self.channel_state.lock().unwrap();
-               match channel_state.by_id.entry(channel.channel_id()) {
+               match channel_state.by_id.entry(temporary_channel_id) {
                        hash_map::Entry::Occupied(_) => {
                                if cfg!(feature = "fuzztarget") {
                                        return Err(APIError::APIMisuseError { err: "Fuzzy bad RNG".to_owned() });
@@ -1262,7 +1439,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        node_id: their_network_key,
                        msg: res,
                });
-               Ok(())
+               Ok(temporary_channel_id)
        }
 
        fn list_channels_with_filter<Fn: FnMut(&(&[u8; 32], &Channel<Signer>)) -> bool>(&self, f: Fn) -> Vec<ChannelDetails> {
@@ -1288,7 +1465,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        unspendable_punishment_reserve: to_self_reserve_satoshis,
                                        inbound_capacity_msat,
                                        outbound_capacity_msat,
-                                       user_id: channel.get_user_id(),
+                                       user_channel_id: channel.get_user_id(),
                                        confirmations_required: channel.minimum_depth(),
                                        force_close_spend_delay: channel.get_counterparty_selected_contest_delay(),
                                        is_outbound: channel.is_outbound(),
@@ -1326,6 +1503,22 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                self.list_channels_with_filter(|&(_, ref channel)| channel.is_live())
        }
 
+       /// Helper function that issues the channel close events
+       fn issue_channel_close_events(&self, channel: &Channel<Signer>, closure_reason: ClosureReason) {
+               let mut pending_events_lock = self.pending_events.lock().unwrap();
+               match channel.unbroadcasted_funding() {
+                       Some(transaction) => {
+                               pending_events_lock.push(events::Event::DiscardFunding { channel_id: channel.channel_id(), transaction })
+                       },
+                       None => {},
+               }
+               pending_events_lock.push(events::Event::ChannelClosed {
+                       channel_id: channel.channel_id(),
+                       user_channel_id: channel.get_user_id(),
+                       reason: closure_reason
+               });
+       }
+
        fn close_channel_internal(&self, channel_id: &[u8; 32], target_feerate_sats_per_1000_weight: Option<u32>) -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 
@@ -1352,7 +1545,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        if let Some(monitor_update) = monitor_update {
                                                if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) {
                                                        let (result, is_permanent) =
-                                                               handle_monitor_err!(self, e, channel_state.short_to_id, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, false, false, Vec::new(), Vec::new(), chan_entry.key());
+                                                               handle_monitor_err!(self, e, channel_state.short_to_id, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, false, false, Vec::new(), Vec::new(), Vec::new(), chan_entry.key());
                                                        if is_permanent {
                                                                remove_channel!(channel_state, chan_entry);
                                                                break result;
@@ -1372,12 +1565,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                msg: channel_update
                                                        });
                                                }
-                                               if let Ok(mut pending_events_lock) = self.pending_events.lock() {
-                                                       pending_events_lock.push(events::Event::ChannelClosed {
-                                                               channel_id: *channel_id,
-                                                               reason: ClosureReason::HolderForceClosed
-                                                       });
-                                               }
+                                               self.issue_channel_close_events(&channel, ClosureReason::HolderForceClosed);
                                        }
                                        break Ok(());
                                },
@@ -1468,13 +1656,12 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                if let Some(short_id) = chan.get().get_short_channel_id() {
                                        channel_state.short_to_id.remove(&short_id);
                                }
-                               let mut pending_events_lock = self.pending_events.lock().unwrap();
                                if peer_node_id.is_some() {
                                        if let Some(peer_msg) = peer_msg {
-                                               pending_events_lock.push(events::Event::ChannelClosed { channel_id: *channel_id, reason: ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() } });
+                                               self.issue_channel_close_events(chan.get(),ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() });
                                        }
                                } else {
-                                       pending_events_lock.push(events::Event::ChannelClosed { channel_id: *channel_id, reason: ClosureReason::HolderForceClosed });
+                                       self.issue_channel_close_events(chan.get(),ClosureReason::HolderForceClosed);
                                }
                                chan.remove_entry().1
                        } else {
@@ -1786,17 +1973,24 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update_for_unicast(chan).unwrap())));
                                        }
                                        let cur_height = self.best_block.read().unwrap().height() + 1;
-                                       // Theoretically, channel counterparty shouldn't send us a HTLC expiring now, but we want to be robust wrt to counterparty
-                                       // packet sanitization (see HTLC_FAIL_BACK_BUFFER rational)
+                                       // Theoretically, channel counterparty shouldn't send us a HTLC expiring now,
+                                       // but we want to be robust wrt to counterparty packet sanitization (see
+                                       // HTLC_FAIL_BACK_BUFFER rationale).
                                        if msg.cltv_expiry <= cur_height + HTLC_FAIL_BACK_BUFFER as u32 { // expiry_too_soon
                                                break Some(("CLTV expiry is too close", 0x1000 | 14, Some(self.get_channel_update_for_unicast(chan).unwrap())));
                                        }
                                        if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far
                                                break Some(("CLTV expiry is too far in the future", 21, None));
                                        }
-                                       // In theory, we would be safe against unintentional channel-closure, if we only required a margin of LATENCY_GRACE_PERIOD_BLOCKS.
-                                       // But, to be safe against policy reception, we use a longer delay.
-                                       if (*outgoing_cltv_value) as u64 <= (cur_height + HTLC_FAIL_BACK_BUFFER) as u64 {
+                                       // If the HTLC expires ~now, don't bother trying to forward it to our
+                                       // counterparty. They should fail it anyway, but we don't want to bother with
+                                       // the round-trips or risk them deciding they definitely want the HTLC and
+                                       // force-closing to ensure they get it if we're offline.
+                                       // We previously had a much more aggressive check here which tried to ensure
+                                       // our counterparty receives an HTLC which has *our* risk threshold met on it,
+                                       // but there is no need to do that, and since we're a bit conservative with our
+                                       // risk threshold it just results in failing to forward payments.
+                                       if (*outgoing_cltv_value) as u64 <= (cur_height + LATENCY_GRACE_PERIOD_BLOCKS) as u64 {
                                                break Some(("Outgoing CLTV value is too soon", 0x1000 | 14, Some(self.get_channel_update_for_unicast(chan).unwrap())));
                                        }
 
@@ -1878,7 +2072,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        }
 
        // Only public for testing, this should otherwise never be called direcly
-       pub(crate) fn send_payment_along_path(&self, path: &Vec<RouteHop>, payment_hash: &PaymentHash, payment_secret: &Option<PaymentSecret>, total_value: u64, cur_height: u32, mpp_id: MppId, keysend_preimage: &Option<PaymentPreimage>) -> Result<(), APIError> {
+       pub(crate) fn send_payment_along_path(&self, path: &Vec<RouteHop>, payee: &Option<Payee>, payment_hash: &PaymentHash, payment_secret: &Option<PaymentSecret>, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option<PaymentPreimage>) -> Result<(), APIError> {
                log_trace!(self.logger, "Attempting to send payment for path with next hop {}", path.first().unwrap().short_channel_id);
                let prng_seed = self.keys_manager.get_secure_random_bytes();
                let session_priv_bytes = self.keys_manager.get_secure_random_bytes();
@@ -1893,17 +2087,40 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, payment_hash);
 
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
-               let mut pending_outbounds = self.pending_outbound_payments.lock().unwrap();
-               let sessions = pending_outbounds.entry(mpp_id).or_insert(HashSet::new());
-               assert!(sessions.insert(session_priv_bytes));
 
                let err: Result<(), _> = loop {
                        let mut channel_lock = self.channel_state.lock().unwrap();
+
+                       let mut pending_outbounds = self.pending_outbound_payments.lock().unwrap();
+                       let payment_entry = pending_outbounds.entry(payment_id);
+                       if let hash_map::Entry::Occupied(payment) = &payment_entry {
+                               if !payment.get().is_retryable() {
+                                       return Err(APIError::RouteError {
+                                               err: "Payment already completed"
+                                       });
+                               }
+                       }
+
                        let id = match channel_lock.short_to_id.get(&path.first().unwrap().short_channel_id) {
                                None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()}),
                                Some(id) => id.clone(),
                        };
 
+                       macro_rules! insert_outbound_payment {
+                               () => {
+                                       let payment = payment_entry.or_insert_with(|| PendingOutboundPayment::Retryable {
+                                               session_privs: HashSet::new(),
+                                               pending_amt_msat: 0,
+                                               pending_fee_msat: Some(0),
+                                               payment_hash: *payment_hash,
+                                               payment_secret: *payment_secret,
+                                               starting_block_height: self.best_block.read().unwrap().height(),
+                                               total_msat: total_value,
+                                       });
+                                       assert!(payment.insert(session_priv_bytes, path));
+                               }
+                       }
+
                        let channel_state = &mut *channel_lock;
                        if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(id) {
                                match {
@@ -1913,12 +2130,16 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        if !chan.get().is_live() {
                                                return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!".to_owned()});
                                        }
-                                       break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute {
-                                               path: path.clone(),
-                                               session_priv: session_priv.clone(),
-                                               first_hop_htlc_msat: htlc_msat,
-                                               mpp_id,
-                                       }, onion_packet, &self.logger), channel_state, chan)
+                                       break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(
+                                               htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute {
+                                                       path: path.clone(),
+                                                       session_priv: session_priv.clone(),
+                                                       first_hop_htlc_msat: htlc_msat,
+                                                       payment_id,
+                                                       payment_secret: payment_secret.clone(),
+                                                       payee: payee.clone(),
+                                               }, onion_packet, &self.logger),
+                                       channel_state, chan)
                                } {
                                        Some((update_add, commitment_signed, monitor_update)) => {
                                                if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
@@ -1928,8 +2149,10 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                        // is restored. Therefore, we must return an error indicating that
                                                        // it is unsafe to retry the payment wholesale, which we do in the
                                                        // send_payment check for MonitorUpdateFailed, below.
+                                                       insert_outbound_payment!(); // Only do this after possibly break'ing on Perm failure above.
                                                        return Err(APIError::MonitorUpdateFailed);
                                                }
+                                               insert_outbound_payment!();
 
                                                log_debug!(self.logger, "Sending payment along path resulted in a commitment_signed for channel {}", log_bytes!(chan.get().channel_id()));
                                                channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
@@ -1944,7 +2167,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                        },
                                                });
                                        },
-                                       None => {},
+                                       None => { insert_outbound_payment!(); },
                                }
                        } else { unreachable!(); }
                        return Ok(());
@@ -1997,11 +2220,11 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        /// If a payment_secret *is* provided, we assume that the invoice had the payment_secret feature
        /// bit set (either as required or as available). If multiple paths are present in the Route,
        /// we assume the invoice had the basic_mpp feature set.
-       pub fn send_payment(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>) -> Result<(), PaymentSendFailure> {
-               self.send_payment_internal(route, payment_hash, payment_secret, None)
+       pub fn send_payment(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>) -> Result<PaymentId, PaymentSendFailure> {
+               self.send_payment_internal(route, payment_hash, payment_secret, None, None, None)
        }
 
-       fn send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>, keysend_preimage: Option<PaymentPreimage>) -> Result<(), PaymentSendFailure> {
+       fn send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>, keysend_preimage: Option<PaymentPreimage>, payment_id: Option<PaymentId>, recv_value_msat: Option<u64>) -> Result<PaymentId, PaymentSendFailure> {
                if route.paths.len() < 1 {
                        return Err(PaymentSendFailure::ParameterError(APIError::RouteError{err: "There must be at least one path to send over"}));
                }
@@ -2017,7 +2240,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                let mut total_value = 0;
                let our_node_id = self.get_our_node_id();
                let mut path_errs = Vec::with_capacity(route.paths.len());
-               let mpp_id = MppId(self.keys_manager.get_secure_random_bytes());
+               let payment_id = if let Some(id) = payment_id { id } else { PaymentId(self.keys_manager.get_secure_random_bytes()) };
                'path_check: for path in route.paths.iter() {
                        if path.len() < 1 || path.len() > 20 {
                                path_errs.push(Err(APIError::RouteError{err: "Path didn't go anywhere/had bogus size"}));
@@ -2035,15 +2258,21 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                if path_errs.iter().any(|e| e.is_err()) {
                        return Err(PaymentSendFailure::PathParameterError(path_errs));
                }
+               if let Some(amt_msat) = recv_value_msat {
+                       debug_assert!(amt_msat >= total_value);
+                       total_value = amt_msat;
+               }
 
                let cur_height = self.best_block.read().unwrap().height() + 1;
                let mut results = Vec::new();
                for path in route.paths.iter() {
-                       results.push(self.send_payment_along_path(&path, &payment_hash, payment_secret, total_value, cur_height, mpp_id, &keysend_preimage));
+                       results.push(self.send_payment_along_path(&path, &route.payee, &payment_hash, payment_secret, total_value, cur_height, payment_id, &keysend_preimage));
                }
                let mut has_ok = false;
                let mut has_err = false;
-               for res in results.iter() {
+               let mut pending_amt_unsent = 0;
+               let mut max_unsent_cltv_delta = 0;
+               for (res, path) in results.iter().zip(route.paths.iter()) {
                        if res.is_ok() { has_ok = true; }
                        if res.is_err() { has_err = true; }
                        if let &Err(APIError::MonitorUpdateFailed) = res {
@@ -2051,18 +2280,88 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                // PartialFailure.
                                has_err = true;
                                has_ok = true;
-                               break;
+                       } else if res.is_err() {
+                               pending_amt_unsent += path.last().unwrap().fee_msat;
+                               max_unsent_cltv_delta = cmp::max(max_unsent_cltv_delta, path.last().unwrap().cltv_expiry_delta);
                        }
                }
                if has_err && has_ok {
-                       Err(PaymentSendFailure::PartialFailure(results))
+                       Err(PaymentSendFailure::PartialFailure {
+                               results,
+                               payment_id,
+                               failed_paths_retry: if pending_amt_unsent != 0 {
+                                       if let Some(payee) = &route.payee {
+                                               Some(RouteParameters {
+                                                       payee: payee.clone(),
+                                                       final_value_msat: pending_amt_unsent,
+                                                       final_cltv_expiry_delta: max_unsent_cltv_delta,
+                                               })
+                                       } else { None }
+                               } else { None },
+                       })
                } else if has_err {
+                       // If we failed to send any paths, we shouldn't have inserted the new PaymentId into
+                       // our `pending_outbound_payments` map at all.
+                       debug_assert!(self.pending_outbound_payments.lock().unwrap().get(&payment_id).is_none());
                        Err(PaymentSendFailure::AllFailedRetrySafe(results.drain(..).map(|r| r.unwrap_err()).collect()))
                } else {
-                       Ok(())
+                       Ok(payment_id)
                }
        }
 
+       /// Retries a payment along the given [`Route`].
+       ///
+       /// Errors returned are a superset of those returned from [`send_payment`], so see
+       /// [`send_payment`] documentation for more details on errors. This method will also error if the
+       /// retry amount puts the payment more than 10% over the payment's total amount, or if the payment
+       /// for the given `payment_id` cannot be found (likely due to timeout or success).
+       ///
+       /// [`send_payment`]: [`ChannelManager::send_payment`]
+       pub fn retry_payment(&self, route: &Route, payment_id: PaymentId) -> Result<(), PaymentSendFailure> {
+               const RETRY_OVERFLOW_PERCENTAGE: u64 = 10;
+               for path in route.paths.iter() {
+                       if path.len() == 0 {
+                               return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
+                                       err: "length-0 path in route".to_string()
+                               }))
+                       }
+               }
+
+               let (total_msat, payment_hash, payment_secret) = {
+                       let outbounds = self.pending_outbound_payments.lock().unwrap();
+                       if let Some(payment) = outbounds.get(&payment_id) {
+                               match payment {
+                                       PendingOutboundPayment::Retryable {
+                                               total_msat, payment_hash, payment_secret, pending_amt_msat, ..
+                                       } => {
+                                               let retry_amt_msat: u64 = route.paths.iter().map(|path| path.last().unwrap().fee_msat).sum();
+                                               if retry_amt_msat + *pending_amt_msat > *total_msat * (100 + RETRY_OVERFLOW_PERCENTAGE) / 100 {
+                                                       return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
+                                                               err: format!("retry_amt_msat of {} will put pending_amt_msat (currently: {}) more than 10% over total_payment_amt_msat of {}", retry_amt_msat, pending_amt_msat, total_msat).to_string()
+                                                       }))
+                                               }
+                                               (*total_msat, *payment_hash, *payment_secret)
+                                       },
+                                       PendingOutboundPayment::Legacy { .. } => {
+                                               return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
+                                                       err: "Unable to retry payments that were initially sent on LDK versions prior to 0.0.102".to_string()
+                                               }))
+                                       },
+                                       PendingOutboundPayment::Fulfilled { .. } => {
+                                               return Err(PaymentSendFailure::ParameterError(APIError::RouteError {
+                                                       err: "Payment already completed"
+                                               }));
+                                       },
+                               }
+                       } else {
+                               return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
+                                       err: format!("Payment with ID {} not found", log_bytes!(payment_id.0)),
+                               }))
+                       }
+               };
+               return self.send_payment_internal(route, payment_hash, &payment_secret, None, Some(payment_id), Some(total_msat)).map(|_| ())
+       }
+
        /// Send a spontaneous payment, which is a payment that does not require the recipient to have
        /// generated an invoice. Optionally, you may specify the preimage. If you do choose to specify
        /// the preimage, it must be a cryptographically secure random value that no intermediate node
@@ -2077,14 +2376,14 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        /// Note that `route` must have exactly one path.
        ///
        /// [`send_payment`]: Self::send_payment
-       pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option<PaymentPreimage>) -> Result<PaymentHash, PaymentSendFailure> {
+       pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option<PaymentPreimage>) -> Result<(PaymentHash, PaymentId), PaymentSendFailure> {
                let preimage = match payment_preimage {
                        Some(p) => p,
                        None => PaymentPreimage(self.keys_manager.get_secure_random_bytes()),
                };
                let payment_hash = PaymentHash(Sha256::hash(&preimage.0).into_inner());
-               match self.send_payment_internal(route, payment_hash, &None, Some(preimage)) {
-                       Ok(()) => Ok(payment_hash),
+               match self.send_payment_internal(route, payment_hash, &None, Some(preimage), None, None) {
+                       Ok(payment_id) => Ok((payment_hash, payment_id)),
                        Err(e) => Err(e)
                }
        }
@@ -2100,7 +2399,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
 
                                        (chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger)
                                                .map_err(|e| if let ChannelError::Close(msg) = e {
-                                                       MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.force_shutdown(true), None)
+                                                       MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.get_user_id(), chan.force_shutdown(true), None)
                                                } else { unreachable!(); })
                                        , chan)
                                },
@@ -2362,7 +2661,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                htlc_id: prev_htlc_id,
                                                                                incoming_packet_shared_secret: incoming_shared_secret,
                                                                        });
-                                                                       match chan.get_mut().send_htlc(amt_to_forward, payment_hash, outgoing_cltv_value, htlc_source.clone(), onion_packet) {
+                                                                       match chan.get_mut().send_htlc(amt_to_forward, payment_hash, outgoing_cltv_value, htlc_source.clone(), onion_packet, &self.logger) {
                                                                                Err(e) => {
                                                                                        if let ChannelError::Ignore(msg) = e {
                                                                                                log_trace!(self.logger, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(payment_hash.0), msg);
@@ -2442,7 +2741,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                                channel_state.short_to_id.remove(&short_id);
                                                                                        }
                                                                                        // ChannelClosed event is generated by handle_error for us.
-                                                                                       Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok()))
+                                                                                       Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.get_user_id(), channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok()))
                                                                                },
                                                                                ChannelError::CloseDelayBroadcast(_) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); }
                                                                        };
@@ -2601,7 +2900,6 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                                        purpose: events::PaymentPurpose::InvoicePayment {
                                                                                                                payment_preimage: inbound_payment.get().payment_preimage,
                                                                                                                payment_secret: payment_data.payment_secret,
-                                                                                                               user_payment_id: inbound_payment.get().user_payment_id,
                                                                                                        },
                                                                                                        amt: total_value,
                                                                                                });
@@ -2699,7 +2997,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                let ret_err = match res {
                        Ok(Some((update_fee, commitment_signed, monitor_update))) => {
                                if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) {
-                                       let (res, drop) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, false, true, Vec::new(), Vec::new(), chan_id);
+                                       let (res, drop) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, false, true, Vec::new(), Vec::new(), Vec::new(), chan_id);
                                        if drop { retain_channel = false; }
                                        res
                                } else {
@@ -2875,28 +3173,36 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        self.fail_htlc_backwards_internal(channel_state,
                                                htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data});
                                },
-                               HTLCSource::OutboundRoute { session_priv, mpp_id, path, .. } => {
+                               HTLCSource::OutboundRoute { session_priv, payment_id, path, payee, .. } => {
                                        let mut session_priv_bytes = [0; 32];
                                        session_priv_bytes.copy_from_slice(&session_priv[..]);
                                        let mut outbounds = self.pending_outbound_payments.lock().unwrap();
-                                       if let hash_map::Entry::Occupied(mut sessions) = outbounds.entry(mpp_id) {
-                                               if sessions.get_mut().remove(&session_priv_bytes) {
+                                       if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(payment_id) {
+                                               if payment.get_mut().remove(&session_priv_bytes, Some(&path)) && !payment.get().is_fulfilled() {
+                                                       let retry = if let Some(payee_data) = payee {
+                                                               let path_last_hop = path.last().expect("Outbound payments must have had a valid path");
+                                                               Some(RouteParameters {
+                                                                       payee: payee_data,
+                                                                       final_value_msat: path_last_hop.fee_msat,
+                                                                       final_cltv_expiry_delta: path_last_hop.cltv_expiry_delta,
+                                                               })
+                                                       } else { None };
                                                        self.pending_events.lock().unwrap().push(
                                                                events::Event::PaymentPathFailed {
+                                                                       payment_id: Some(payment_id),
                                                                        payment_hash,
                                                                        rejected_by_dest: false,
                                                                        network_update: None,
-                                                                       all_paths_failed: sessions.get().len() == 0,
+                                                                       all_paths_failed: payment.get().remaining_parts() == 0,
                                                                        path: path.clone(),
+                                                                       short_channel_id: None,
+                                                                       retry,
                                                                        #[cfg(test)]
                                                                        error_code: None,
                                                                        #[cfg(test)]
                                                                        error_data: None,
                                                                }
                                                        );
-                                                       if sessions.get().len() == 0 {
-                                                               sessions.remove();
-                                                       }
                                                }
                                        } else {
                                                log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0));
@@ -2922,42 +3228,56 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                // from block_connected which may run during initialization prior to the chain_monitor
                // being fully configured. See the docs for `ChannelManagerReadArgs` for more.
                match source {
-                       HTLCSource::OutboundRoute { ref path, session_priv, mpp_id, .. } => {
+                       HTLCSource::OutboundRoute { ref path, session_priv, payment_id, ref payee, .. } => {
                                let mut session_priv_bytes = [0; 32];
                                session_priv_bytes.copy_from_slice(&session_priv[..]);
                                let mut outbounds = self.pending_outbound_payments.lock().unwrap();
                                let mut all_paths_failed = false;
-                               if let hash_map::Entry::Occupied(mut sessions) = outbounds.entry(mpp_id) {
-                                       if !sessions.get_mut().remove(&session_priv_bytes) {
+                               if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(payment_id) {
+                                       if !payment.get_mut().remove(&session_priv_bytes, Some(&path)) {
                                                log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0));
                                                return;
                                        }
-                                       if sessions.get().len() == 0 {
+                                       if payment.get().is_fulfilled() {
+                                               log_trace!(self.logger, "Received failure of HTLC with payment_hash {} after payment completion", log_bytes!(payment_hash.0));
+                                               return;
+                                       }
+                                       if payment.get().remaining_parts() == 0 {
                                                all_paths_failed = true;
-                                               sessions.remove();
                                        }
                                } else {
                                        log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0));
                                        return;
                                }
-                               log_trace!(self.logger, "Failing outbound payment HTLC with payment_hash {}", log_bytes!(payment_hash.0));
                                mem::drop(channel_state_lock);
+                               let retry = if let Some(payee_data) = payee {
+                                       let path_last_hop = path.last().expect("Outbound payments must have had a valid path");
+                                       Some(RouteParameters {
+                                               payee: payee_data.clone(),
+                                               final_value_msat: path_last_hop.fee_msat,
+                                               final_cltv_expiry_delta: path_last_hop.cltv_expiry_delta,
+                                       })
+                               } else { None };
+                               log_trace!(self.logger, "Failing outbound payment HTLC with payment_hash {}", log_bytes!(payment_hash.0));
                                match &onion_error {
                                        &HTLCFailReason::LightningError { ref err } => {
 #[cfg(test)]
-                                               let (network_update, payment_retryable, onion_error_code, onion_error_data) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone());
+                                               let (network_update, short_channel_id, payment_retryable, onion_error_code, onion_error_data) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone());
 #[cfg(not(test))]
-                                               let (network_update, payment_retryable, _, _) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone());
+                                               let (network_update, short_channel_id, payment_retryable, _, _) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone());
                                                // TODO: If we decided to blame ourselves (or one of our channels) in
                                                // process_onion_failure we should close that channel as it implies our
                                                // next-hop is needlessly blaming us!
                                                self.pending_events.lock().unwrap().push(
                                                        events::Event::PaymentPathFailed {
+                                                               payment_id: Some(payment_id),
                                                                payment_hash: payment_hash.clone(),
                                                                rejected_by_dest: !payment_retryable,
                                                                network_update,
                                                                all_paths_failed,
                                                                path: path.clone(),
+                                                               short_channel_id,
+                                                               retry,
 #[cfg(test)]
                                                                error_code: onion_error_code,
 #[cfg(test)]
@@ -2980,11 +3300,14 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                // channel here as we apparently can't relay through them anyway.
                                                self.pending_events.lock().unwrap().push(
                                                        events::Event::PaymentPathFailed {
+                                                               payment_id: Some(payment_id),
                                                                payment_hash: payment_hash.clone(),
                                                                rejected_by_dest: path.len() == 1,
                                                                network_update: None,
                                                                all_paths_failed,
                                                                path: path.clone(),
+                                                               short_channel_id: Some(path.first().unwrap().short_channel_id),
+                                                               retry,
 #[cfg(test)]
                                                                error_code: Some(*failure_code),
 #[cfg(test)]
@@ -3179,20 +3502,77 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                } else { unreachable!(); }
        }
 
+       fn finalize_claims(&self, mut sources: Vec<HTLCSource>) {
+               let mut pending_events = self.pending_events.lock().unwrap();
+               for source in sources.drain(..) {
+                       if let HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } = source {
+                               let mut session_priv_bytes = [0; 32];
+                               session_priv_bytes.copy_from_slice(&session_priv[..]);
+                               let mut outbounds = self.pending_outbound_payments.lock().unwrap();
+                               if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(payment_id) {
+                                       assert!(payment.get().is_fulfilled());
+                                       if payment.get_mut().remove(&session_priv_bytes, None) {
+                                               pending_events.push(
+                                                       events::Event::PaymentPathSuccessful {
+                                                               payment_id,
+                                                               payment_hash: payment.get().payment_hash(),
+                                                               path,
+                                                       }
+                                               );
+                                       }
+                                       if payment.get().remaining_parts() == 0 {
+                                               payment.remove();
+                                       }
+                               }
+                       }
+               }
+       }
+
        fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<Signer>>, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option<u64>, from_onchain: bool) {
                match source {
-                       HTLCSource::OutboundRoute { session_priv, mpp_id, .. } => {
+                       HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
                                mem::drop(channel_state_lock);
                                let mut session_priv_bytes = [0; 32];
                                session_priv_bytes.copy_from_slice(&session_priv[..]);
                                let mut outbounds = self.pending_outbound_payments.lock().unwrap();
-                               let found_payment = if let Some(mut sessions) = outbounds.remove(&mpp_id) {
-                                       sessions.remove(&session_priv_bytes)
-                               } else { false };
-                               if found_payment {
-                                       self.pending_events.lock().unwrap().push(
-                                               events::Event::PaymentSent { payment_preimage }
-                                       );
+                               if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(payment_id) {
+                                       let mut pending_events = self.pending_events.lock().unwrap();
+                                       if !payment.get().is_fulfilled() {
+                                               let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
+                                               let fee_paid_msat = payment.get().get_pending_fee_msat();
+                                               pending_events.push(
+                                                       events::Event::PaymentSent {
+                                                               payment_id: Some(payment_id),
+                                                               payment_preimage,
+                                                               payment_hash,
+                                                               fee_paid_msat,
+                                                       }
+                                               );
+                                               payment.get_mut().mark_fulfilled();
+                                       }
+
+                                       if from_onchain {
+                                               // We currently immediately remove HTLCs which were fulfilled on-chain.
+                                               // This could potentially lead to removing a pending payment too early,
+                                               // with a reorg of one block causing us to re-add the fulfilled payment on
+                                               // restart.
+                                               // TODO: We should have a second monitor event that informs us of payments
+                                               // irrevocably fulfilled.
+                                               if payment.get_mut().remove(&session_priv_bytes, Some(&path)) {
+                                                       let payment_hash = Some(PaymentHash(Sha256::hash(&payment_preimage.0).into_inner()));
+                                                       pending_events.push(
+                                                               events::Event::PaymentPathSuccessful {
+                                                                       payment_id,
+                                                                       payment_hash,
+                                                                       path,
+                                                               }
+                                                       );
+                                               }
+
+                                               if payment.get().remaining_parts() == 0 {
+                                                       payment.remove();
+                                               }
+                                       }
                                } else {
                                        log_trace!(self.logger, "Received duplicative fulfill for HTLC with payment_preimage {}", log_bytes!(payment_preimage.0));
                                }
@@ -3254,31 +3634,11 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                self.our_network_pubkey.clone()
        }
 
-       /// Restores a single, given channel to normal operation after a
-       /// ChannelMonitorUpdateErr::TemporaryFailure was returned from a channel monitor update
-       /// operation.
-       ///
-       /// All ChannelMonitor updates up to and including highest_applied_update_id must have been
-       /// fully committed in every copy of the given channels' ChannelMonitors.
-       ///
-       /// Note that there is no effect to calling with a highest_applied_update_id other than the
-       /// current latest ChannelMonitorUpdate and one call to this function after multiple
-       /// ChannelMonitorUpdateErr::TemporaryFailures is fine. The highest_applied_update_id field
-       /// exists largely only to prevent races between this and concurrent update_monitor calls.
-       ///
-       /// Thus, the anticipated use is, at a high level:
-       ///  1) You register a chain::Watch with this ChannelManager,
-       ///  2) it stores each update to disk, and begins updating any remote (eg watchtower) copies of
-       ///     said ChannelMonitors as it can, returning ChannelMonitorUpdateErr::TemporaryFailures
-       ///     any time it cannot do so instantly,
-       ///  3) update(s) are applied to each remote copy of a ChannelMonitor,
-       ///  4) once all remote copies are updated, you call this function with the update_id that
-       ///     completed, and once it is the latest the Channel will be re-enabled.
-       pub fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64) {
+       fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64) {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 
                let chan_restoration_res;
-               let mut pending_failures = {
+               let (mut pending_failures, finalized_claims) = {
                        let mut channel_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_lock;
                        let mut channel = match channel_state.by_id.entry(funding_txo.to_channel_id()) {
@@ -3289,8 +3649,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                return;
                        }
 
-                       let (raa, commitment_update, order, pending_forwards, pending_failures, funding_broadcastable, funding_locked) = channel.get_mut().monitor_updating_restored(&self.logger);
-                       let channel_update = if funding_locked.is_some() && channel.get().is_usable() && !channel.get().should_announce() {
+                       let updates = channel.get_mut().monitor_updating_restored(&self.logger);
+                       let channel_update = if updates.funding_locked.is_some() && channel.get().is_usable() && !channel.get().should_announce() {
                                // We only send a channel_update in the case where we are just now sending a
                                // funding_locked and the channel is in a usable state. Further, we rely on the
                                // normal announcement_signatures process to send a channel_update for public
@@ -3300,13 +3660,14 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        msg: self.get_channel_update_for_unicast(channel.get()).unwrap(),
                                })
                        } else { None };
-                       chan_restoration_res = handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, raa, commitment_update, order, None, pending_forwards, funding_broadcastable, funding_locked);
+                       chan_restoration_res = handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, updates.raa, updates.commitment_update, updates.order, None, updates.accepted_htlcs, updates.funding_broadcastable, updates.funding_locked);
                        if let Some(upd) = channel_update {
                                channel_state.pending_msg_events.push(upd);
                        }
-                       pending_failures
+                       (updates.failed_htlcs, updates.finalized_claimed_htlcs)
                };
                post_handle_chan_restoration!(self, chan_restoration_res);
+               self.finalize_claims(finalized_claims);
                for failure in pending_failures.drain(..) {
                        self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
                }
@@ -3317,7 +3678,12 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), msg.temporary_channel_id.clone()));
                }
 
-               let channel = Channel::new_from_req(&self.fee_estimator, &self.keys_manager, counterparty_node_id.clone(), &their_features, msg, 0, &self.default_configuration)
+               if !self.default_configuration.accept_inbound_channels {
+                       return Err(MsgHandleErrInternal::send_err_msg_no_close("No inbound channels accepted".to_owned(), msg.temporary_channel_id.clone()));
+               }
+
+               let channel = Channel::new_from_req(&self.fee_estimator, &self.keys_manager, counterparty_node_id.clone(),
+                               &their_features, msg, 0, &self.default_configuration, self.best_block.read().unwrap().height(), &self.logger)
                        .map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id))?;
                let mut channel_state_lock = self.channel_state.lock().unwrap();
                let channel_state = &mut *channel_state_lock;
@@ -3395,7 +3761,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
                                        // accepted payment from yet. We do, however, need to wait to send our funding_locked
                                        // until we have persisted our monitor.
-                                       chan.monitor_update_failed(false, false, Vec::new(), Vec::new());
+                                       chan.monitor_update_failed(false, false, Vec::new(), Vec::new(), Vec::new());
                                },
                        }
                }
@@ -3431,7 +3797,16 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                Err(e) => try_chan_entry!(self, Err(e), channel_state, chan),
                                        };
                                        if let Err(e) = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) {
-                                               return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, false, false);
+                                               let mut res = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, false, false);
+                                               if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
+                                                       // We weren't able to watch the channel to begin with, so no updates should be made on
+                                                       // it. Previously, full_stack_target found an (unreachable) panic when the
+                                                       // monitor update contained within `shutdown_finish` was applied.
+                                                       if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
+                                                               shutdown_finish.0.take();
+                                                       }
+                                               }
+                                               return res
                                        }
                                        funding_tx
                                },
@@ -3504,7 +3879,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        if let Some(monitor_update) = monitor_update {
                                                if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) {
                                                        let (result, is_permanent) =
-                                                               handle_monitor_err!(self, e, channel_state.short_to_id, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, false, false, Vec::new(), Vec::new(), chan_entry.key());
+                                                               handle_monitor_err!(self, e, channel_state.short_to_id, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, false, false, Vec::new(), Vec::new(), Vec::new(), chan_entry.key());
                                                        if is_permanent {
                                                                remove_channel!(channel_state, chan_entry);
                                                                break result;
@@ -3574,7 +3949,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        msg: update
                                });
                        }
-                       self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: msg.channel_id,  reason: ClosureReason::CooperativeClosure });
+                       self.issue_channel_close_events(&chan, ClosureReason::CooperativeClosure);
                }
                Ok(())
        }
@@ -3791,37 +4166,51 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                break Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                        }
                                        let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update();
-                                       let (commitment_update, pending_forwards, pending_failures, monitor_update, htlcs_to_fail_in) =
-                                               break_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.logger), channel_state, chan);
-                                       htlcs_to_fail = htlcs_to_fail_in;
-                                       if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
+                                       let raa_updates = break_chan_entry!(self,
+                                               chan.get_mut().revoke_and_ack(&msg, &self.logger), channel_state, chan);
+                                       htlcs_to_fail = raa_updates.holding_cell_failed_htlcs;
+                                       if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), raa_updates.monitor_update) {
                                                if was_frozen_for_monitor {
-                                                       assert!(commitment_update.is_none() && pending_forwards.is_empty() && pending_failures.is_empty());
+                                                       assert!(raa_updates.commitment_update.is_none());
+                                                       assert!(raa_updates.accepted_htlcs.is_empty());
+                                                       assert!(raa_updates.failed_htlcs.is_empty());
+                                                       assert!(raa_updates.finalized_claimed_htlcs.is_empty());
                                                        break Err(MsgHandleErrInternal::ignore_no_close("Previous monitor update failure prevented responses to RAA".to_owned()));
                                                } else {
-                                                       if let Err(e) = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, commitment_update.is_some(), pending_forwards, pending_failures) {
+                                                       if let Err(e) = handle_monitor_err!(self, e, channel_state, chan,
+                                                                       RAACommitmentOrder::CommitmentFirst, false,
+                                                                       raa_updates.commitment_update.is_some(),
+                                                                       raa_updates.accepted_htlcs, raa_updates.failed_htlcs,
+                                                                       raa_updates.finalized_claimed_htlcs) {
                                                                break Err(e);
                                                        } else { unreachable!(); }
                                                }
                                        }
-                                       if let Some(updates) = commitment_update {
+                                       if let Some(updates) = raa_updates.commitment_update {
                                                channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
                                                        node_id: counterparty_node_id.clone(),
                                                        updates,
                                                });
                                        }
-                                       break Ok((pending_forwards, pending_failures, chan.get().get_short_channel_id().expect("RAA should only work on a short-id-available channel"), chan.get().get_funding_txo().unwrap()))
+                                       break Ok((raa_updates.accepted_htlcs, raa_updates.failed_htlcs,
+                                                       raa_updates.finalized_claimed_htlcs,
+                                                       chan.get().get_short_channel_id()
+                                                               .expect("RAA should only work on a short-id-available channel"),
+                                                       chan.get().get_funding_txo().unwrap()))
                                },
                                hash_map::Entry::Vacant(_) => break Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                        }
                };
                self.fail_holding_cell_htlcs(htlcs_to_fail, msg.channel_id);
                match res {
-                       Ok((pending_forwards, mut pending_failures, short_channel_id, channel_outpoint)) => {
+                       Ok((pending_forwards, mut pending_failures, finalized_claim_htlcs,
+                               short_channel_id, channel_outpoint)) =>
+                       {
                                for failure in pending_failures.drain(..) {
                                        self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
                                }
                                self.forward_htlcs(&mut [(short_channel_id, channel_outpoint, pending_forwards)]);
+                               self.finalize_claims(finalized_claim_htlcs);
                                Ok(())
                        },
                        Err(e) => Err(e)
@@ -3970,7 +4359,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
                                        }
                                },
-                               MonitorEvent::CommitmentTxConfirmed(funding_outpoint) => {
+                               MonitorEvent::CommitmentTxConfirmed(funding_outpoint) |
+                               MonitorEvent::UpdateFailed(funding_outpoint) => {
                                        let mut channel_lock = self.channel_state.lock().unwrap();
                                        let channel_state = &mut *channel_lock;
                                        let by_id = &mut channel_state.by_id;
@@ -3986,7 +4376,12 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                msg: update
                                                        });
                                                }
-                                               self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(),  reason: ClosureReason::CommitmentTxConfirmed });
+                                               let reason = if let MonitorEvent::UpdateFailed(_) = monitor_event {
+                                                       ClosureReason::ProcessingError { err: "Failed to persist ChannelMonitor update during chain sync".to_string() }
+                                               } else {
+                                                       ClosureReason::CommitmentTxConfirmed
+                                               };
+                                               self.issue_channel_close_events(&chan, reason);
                                                pending_msg_events.push(events::MessageSendEvent::HandleError {
                                                        node_id: chan.get_counterparty_node_id(),
                                                        action: msgs::ErrorAction::SendErrorMessage {
@@ -3995,6 +4390,9 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                });
                                        }
                                },
+                               MonitorEvent::UpdateCompleted { funding_txo, monitor_update_id } => {
+                                       self.channel_monitor_updated(&funding_txo, monitor_update_id);
+                               },
                        }
                }
 
@@ -4005,6 +4403,14 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                has_pending_monitor_events
        }
 
+       /// In chanmon_consistency_target, we'd like to be able to restore monitor updating without
+       /// handling all pending events (i.e. not PendingHTLCsForwardable). Thus, we expose monitor
+       /// update events as a separate process method here.
+       #[cfg(feature = "fuzztarget")]
+       pub fn process_monitor_events(&self) {
+               self.process_pending_monitor_events();
+       }
+
        /// Check the holding cell in each channel and free any pending HTLCs in them if possible.
        /// Returns whether there were any updates such as if pending HTLCs were freed or a monitor
        /// update was applied.
@@ -4033,7 +4439,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                if let Some((commitment_update, monitor_update)) = commitment_opt {
                                                        if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) {
                                                                has_monitor_update = true;
-                                                               let (res, close_channel) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, false, true, Vec::new(), Vec::new(), channel_id);
+                                                               let (res, close_channel) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, false, true, Vec::new(), Vec::new(), Vec::new(), channel_id);
                                                                handle_errors.push((chan.get_counterparty_node_id(), res));
                                                                if close_channel { return false; }
                                                        } else {
@@ -4102,12 +4508,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                });
                                                        }
 
-                                                       if let Ok(mut pending_events_lock) = self.pending_events.lock() {
-                                                               pending_events_lock.push(events::Event::ChannelClosed {
-                                                                       channel_id: *channel_id,
-                                                                       reason: ClosureReason::CooperativeClosure
-                                                               });
-                                                       }
+                                                       self.issue_channel_close_events(chan, ClosureReason::CooperativeClosure);
 
                                                        log_info!(self.logger, "Broadcasting {}", log_tx!(tx));
                                                        self.tx_broadcaster.broadcast_transaction(&tx);
@@ -4154,7 +4555,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                }
        }
 
-       fn set_payment_hash_secret_map(&self, payment_hash: PaymentHash, payment_preimage: Option<PaymentPreimage>, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32, user_payment_id: u64) -> Result<PaymentSecret, APIError> {
+       fn set_payment_hash_secret_map(&self, payment_hash: PaymentHash, payment_preimage: Option<PaymentPreimage>, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32) -> Result<PaymentSecret, APIError> {
                assert!(invoice_expiry_delta_secs <= 60*60*24*365); // Sadly bitcoin timestamps are u32s, so panic before 2106
 
                let payment_secret = PaymentSecret(self.keys_manager.get_secure_random_bytes());
@@ -4164,7 +4565,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                match payment_secrets.entry(payment_hash) {
                        hash_map::Entry::Vacant(e) => {
                                e.insert(PendingInboundPayment {
-                                       payment_secret, min_value_msat, user_payment_id, payment_preimage,
+                                       payment_secret, min_value_msat, payment_preimage,
+                                       user_payment_id: 0, // For compatibility with version 0.0.103 and earlier
                                        // We assume that highest_seen_timestamp is pretty close to the current time -
                                        // its updated when we receive a new block with the maximum time we've seen in
                                        // a header. It should never be more than two hours in the future.
@@ -4196,12 +4598,12 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        /// [`PaymentReceived`]: events::Event::PaymentReceived
        /// [`PaymentReceived::payment_preimage`]: events::Event::PaymentReceived::payment_preimage
        /// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash
-       pub fn create_inbound_payment(&self, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32, user_payment_id: u64) -> (PaymentHash, PaymentSecret) {
+       pub fn create_inbound_payment(&self, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32) -> (PaymentHash, PaymentSecret) {
                let payment_preimage = PaymentPreimage(self.keys_manager.get_secure_random_bytes());
                let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
 
                (payment_hash,
-                       self.set_payment_hash_secret_map(payment_hash, Some(payment_preimage), min_value_msat, invoice_expiry_delta_secs, user_payment_id)
+                       self.set_payment_hash_secret_map(payment_hash, Some(payment_preimage), min_value_msat, invoice_expiry_delta_secs)
                                .expect("RNG Generated Duplicate PaymentHash"))
        }
 
@@ -4215,12 +4617,6 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        /// The [`PaymentHash`] (and corresponding [`PaymentPreimage`]) must be globally unique. This
        /// method may return an Err if another payment with the same payment_hash is still pending.
        ///
-       /// `user_payment_id` will be provided back in [`PaymentPurpose::InvoicePayment::user_payment_id`] events to
-       /// allow tracking of which events correspond with which calls to this and
-       /// [`create_inbound_payment`]. `user_payment_id` has no meaning inside of LDK, it is simply
-       /// copied to events and otherwise ignored. It may be used to correlate PaymentReceived events
-       /// with invoice metadata stored elsewhere.
-       ///
        /// `min_value_msat` should be set if the invoice being generated contains a value. Any payment
        /// received for the returned [`PaymentHash`] will be required to be at least `min_value_msat`
        /// before a [`PaymentReceived`] event will be generated, ensuring that we do not provide the
@@ -4249,9 +4645,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        ///
        /// [`create_inbound_payment`]: Self::create_inbound_payment
        /// [`PaymentReceived`]: events::Event::PaymentReceived
-       /// [`PaymentPurpose::InvoicePayment::user_payment_id`]: events::PaymentPurpose::InvoicePayment::user_payment_id
-       pub fn create_inbound_payment_for_hash(&self, payment_hash: PaymentHash, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32, user_payment_id: u64) -> Result<PaymentSecret, APIError> {
-               self.set_payment_hash_secret_map(payment_hash, None, min_value_msat, invoice_expiry_delta_secs, user_payment_id)
+       pub fn create_inbound_payment_for_hash(&self, payment_hash: PaymentHash, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32) -> Result<PaymentSecret, APIError> {
+               self.set_payment_hash_secret_map(payment_hash, None, min_value_msat, invoice_expiry_delta_secs)
        }
 
        #[cfg(any(test, feature = "fuzztarget", feature = "_test_utils"))]
@@ -4261,6 +4656,16 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                self.process_pending_events(&event_handler);
                events.into_inner()
        }
+
+       #[cfg(test)]
+       pub fn has_pending_payments(&self) -> bool {
+               !self.pending_outbound_payments.lock().unwrap().is_empty()
+       }
+
+       #[cfg(test)]
+       pub fn clear_pending_payments(&self) {
+               self.pending_outbound_payments.lock().unwrap().clear()
+       }
 }
 
 impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> MessageSendEventsProvider for ChannelManager<Signer, M, T, K, F, L>
@@ -4436,6 +4841,16 @@ where
                payment_secrets.retain(|_, inbound_payment| {
                        inbound_payment.expiry_time > header.time as u64
                });
+
+               let mut outbounds = self.pending_outbound_payments.lock().unwrap();
+               outbounds.retain(|_, payment| {
+                       const PAYMENT_EXPIRY_BLOCKS: u32 = 3;
+                       if payment.remaining_parts() != 0 { return true }
+                       if let PendingOutboundPayment::Retryable { starting_block_height, .. } = payment {
+                               return *starting_block_height + PAYMENT_EXPIRY_BLOCKS > height
+                       }
+                       true
+               });
        }
 
        fn get_relevant_txids(&self) -> Vec<Txid> {
@@ -4472,7 +4887,7 @@ where
        /// Calls a function which handles an on-chain event (blocks dis/connected, transactions
        /// un/confirmed, etc) on each channel, handling any resulting errors or messages generated by
        /// the function.
-       fn do_chain_event<FN: Fn(&mut Channel<Signer>) -> Result<(Option<msgs::FundingLocked>, Vec<(HTLCSource, PaymentHash)>), msgs::ErrorMessage>>
+       fn do_chain_event<FN: Fn(&mut Channel<Signer>) -> Result<(Option<msgs::FundingLocked>, Vec<(HTLCSource, PaymentHash)>), ClosureReason>>
                        (&self, height_opt: Option<u32>, f: FN) {
                // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
                // during initialization prior to the chain_monitor being fully configured in some cases.
@@ -4517,7 +4932,7 @@ where
                                                }
                                                short_to_id.insert(channel.get_short_channel_id().unwrap(), channel.channel_id());
                                        }
-                               } else if let Err(e) = res {
+                               } else if let Err(reason) = res {
                                        if let Some(short_id) = channel.get_short_channel_id() {
                                                short_to_id.remove(&short_id);
                                        }
@@ -4529,10 +4944,14 @@ where
                                                        msg: update
                                                });
                                        }
-                                       self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: channel.channel_id(),  reason: ClosureReason::CommitmentTxConfirmed });
+                                       let reason_message = format!("{}", reason);
+                                       self.issue_channel_close_events(channel, reason);
                                        pending_msg_events.push(events::MessageSendEvent::HandleError {
                                                node_id: channel.get_counterparty_node_id(),
-                                               action: msgs::ErrorAction::SendErrorMessage { msg: e },
+                                               action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage {
+                                                       channel_id: channel.channel_id(),
+                                                       data: reason_message,
+                                               } },
                                        });
                                        return false;
                                }
@@ -4720,7 +5139,7 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
                                                                msg: update
                                                        });
                                                }
-                                               self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(),  reason: ClosureReason::DisconnectedPeer });
+                                               self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer);
                                                false
                                        } else {
                                                true
@@ -4735,7 +5154,7 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
                                                        if let Some(short_id) = chan.get_short_channel_id() {
                                                                short_to_id.remove(&short_id);
                                                        }
-                                                       self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(),  reason: ClosureReason::DisconnectedPeer });
+                                                       self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer);
                                                        return false;
                                                } else {
                                                        no_channels_remain = false;
@@ -5079,23 +5498,29 @@ impl Readable for HTLCSource {
                                let mut session_priv: ::util::ser::OptionDeserWrapper<SecretKey> = ::util::ser::OptionDeserWrapper(None);
                                let mut first_hop_htlc_msat: u64 = 0;
                                let mut path = Some(Vec::new());
-                               let mut mpp_id = None;
+                               let mut payment_id = None;
+                               let mut payment_secret = None;
+                               let mut payee = None;
                                read_tlv_fields!(reader, {
                                        (0, session_priv, required),
-                                       (1, mpp_id, option),
+                                       (1, payment_id, option),
                                        (2, first_hop_htlc_msat, required),
+                                       (3, payment_secret, option),
                                        (4, path, vec_type),
+                                       (5, payee, option),
                                });
-                               if mpp_id.is_none() {
-                                       // For backwards compat, if there was no mpp_id written, use the session_priv bytes
+                               if payment_id.is_none() {
+                                       // For backwards compat, if there was no payment_id written, use the session_priv bytes
                                        // instead.
-                                       mpp_id = Some(MppId(*session_priv.0.unwrap().as_ref()));
+                                       payment_id = Some(PaymentId(*session_priv.0.unwrap().as_ref()));
                                }
                                Ok(HTLCSource::OutboundRoute {
                                        session_priv: session_priv.0.unwrap(),
                                        first_hop_htlc_msat: first_hop_htlc_msat,
                                        path: path.unwrap(),
-                                       mpp_id: mpp_id.unwrap(),
+                                       payment_id: payment_id.unwrap(),
+                                       payment_secret,
+                                       payee,
                                })
                        }
                        1 => Ok(HTLCSource::PreviousHopData(Readable::read(reader)?)),
@@ -5107,14 +5532,16 @@ impl Readable for HTLCSource {
 impl Writeable for HTLCSource {
        fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::io::Error> {
                match self {
-                       HTLCSource::OutboundRoute { ref session_priv, ref first_hop_htlc_msat, ref path, mpp_id } => {
+                       HTLCSource::OutboundRoute { ref session_priv, ref first_hop_htlc_msat, ref path, payment_id, payment_secret, payee } => {
                                0u8.write(writer)?;
-                               let mpp_id_opt = Some(mpp_id);
+                               let payment_id_opt = Some(payment_id);
                                write_tlv_fields!(writer, {
                                        (0, session_priv, required),
-                                       (1, mpp_id_opt, option),
+                                       (1, payment_id_opt, option),
                                        (2, first_hop_htlc_msat, required),
+                                       (3, payment_secret, option),
                                        (4, path, vec_type),
+                                       (5, payee, option),
                                 });
                        }
                        HTLCSource::PreviousHopData(ref field) => {
@@ -5157,6 +5584,25 @@ impl_writeable_tlv_based!(PendingInboundPayment, {
        (8, min_value_msat, required),
 });
 
+impl_writeable_tlv_based_enum_upgradable!(PendingOutboundPayment,
+       (0, Legacy) => {
+               (0, session_privs, required),
+       },
+       (1, Fulfilled) => {
+               (0, session_privs, required),
+               (1, payment_hash, option),
+       },
+       (2, Retryable) => {
+               (0, session_privs, required),
+               (1, pending_fee_msat, option),
+               (2, payment_hash, required),
+               (4, payment_secret, option),
+               (6, total_msat, required),
+               (8, pending_amt_msat, required),
+               (10, starting_block_height, required),
+       },
+);
+
 impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable for ChannelManager<Signer, M, T, K, F, L>
        where M::Target: chain::Watch<Signer>,
         T::Target: BroadcasterInterface,
@@ -5247,18 +5693,38 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable f
                let pending_outbound_payments = self.pending_outbound_payments.lock().unwrap();
                // For backwards compat, write the session privs and their total length.
                let mut num_pending_outbounds_compat: u64 = 0;
-               for (_, outbounds) in pending_outbound_payments.iter() {
-                       num_pending_outbounds_compat += outbounds.len() as u64;
+               for (_, outbound) in pending_outbound_payments.iter() {
+                       if !outbound.is_fulfilled() {
+                               num_pending_outbounds_compat += outbound.remaining_parts() as u64;
+                       }
                }
                num_pending_outbounds_compat.write(writer)?;
-               for (_, outbounds) in pending_outbound_payments.iter() {
-                       for outbound in outbounds.iter() {
-                               outbound.write(writer)?;
+               for (_, outbound) in pending_outbound_payments.iter() {
+                       match outbound {
+                               PendingOutboundPayment::Legacy { session_privs } |
+                               PendingOutboundPayment::Retryable { session_privs, .. } => {
+                                       for session_priv in session_privs.iter() {
+                                               session_priv.write(writer)?;
+                                       }
+                               }
+                               PendingOutboundPayment::Fulfilled { .. } => {},
                        }
                }
 
+               // Encode without retry info for 0.0.101 compatibility.
+               let mut pending_outbound_payments_no_retry: HashMap<PaymentId, HashSet<[u8; 32]>> = HashMap::new();
+               for (id, outbound) in pending_outbound_payments.iter() {
+                       match outbound {
+                               PendingOutboundPayment::Legacy { session_privs } |
+                               PendingOutboundPayment::Retryable { session_privs, .. } => {
+                                       pending_outbound_payments_no_retry.insert(*id, session_privs.clone());
+                               },
+                               _ => {},
+                       }
+               }
                write_tlv_fields!(writer, {
-                       (1, pending_outbound_payments, required),
+                       (1, pending_outbound_payments_no_retry, required),
+                       (3, pending_outbound_payments, required),
                });
 
                Ok(())
@@ -5269,20 +5735,25 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable f
 ///
 /// At a high-level, the process for deserializing a ChannelManager and resuming normal operation
 /// is:
-/// 1) Deserialize all stored ChannelMonitors.
-/// 2) Deserialize the ChannelManager by filling in this struct and calling:
-///    <(BlockHash, ChannelManager)>::read(reader, args)
-///    This may result in closing some Channels if the ChannelMonitor is newer than the stored
-///    ChannelManager state to ensure no loss of funds. Thus, transactions may be broadcasted.
-/// 3) If you are not fetching full blocks, register all relevant ChannelMonitor outpoints the same
-///    way you would handle a `chain::Filter` call using ChannelMonitor::get_outputs_to_watch() and
-///    ChannelMonitor::get_funding_txo().
-/// 4) Reconnect blocks on your ChannelMonitors.
-/// 5) Disconnect/connect blocks on the ChannelManager.
-/// 6) Move the ChannelMonitors into your local chain::Watch.
+/// 1) Deserialize all stored [`ChannelMonitor`]s.
+/// 2) Deserialize the [`ChannelManager`] by filling in this struct and calling:
+///    `<(BlockHash, ChannelManager)>::read(reader, args)`
+///    This may result in closing some channels if the [`ChannelMonitor`] is newer than the stored
+///    [`ChannelManager`] state to ensure no loss of funds. Thus, transactions may be broadcasted.
+/// 3) If you are not fetching full blocks, register all relevant [`ChannelMonitor`] outpoints the
+///    same way you would handle a [`chain::Filter`] call using
+///    [`ChannelMonitor::get_outputs_to_watch`] and [`ChannelMonitor::get_funding_txo`].
+/// 4) Reconnect blocks on your [`ChannelMonitor`]s.
+/// 5) Disconnect/connect blocks on the [`ChannelManager`].
+/// 6) Re-persist the [`ChannelMonitor`]s to ensure the latest state is on disk.
+///    Note that if you're using a [`ChainMonitor`] for your [`chain::Watch`] implementation, you
+///    will likely accomplish this as a side-effect of calling [`chain::Watch::watch_channel`] in
+///    the next step.
+/// 7) Move the [`ChannelMonitor`]s into your local [`chain::Watch`]. If you're using a
+///    [`ChainMonitor`], this is done by calling [`chain::Watch::watch_channel`].
 ///
-/// Note that the ordering of #4-6 is not of importance, however all three must occur before you
-/// call any other methods on the newly-deserialized ChannelManager.
+/// Note that the ordering of #4-7 is not of importance, however all four must occur before you
+/// call any other methods on the newly-deserialized [`ChannelManager`].
 ///
 /// Note that because some channels may be closed during deserialization, it is critical that you
 /// always deserialize only the latest version of a ChannelManager and ChannelMonitors available to
@@ -5290,6 +5761,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable f
 /// broadcast), and then later deserialize a newer version of the same ChannelManager (which will
 /// not force-close the same channels but consider them live), you may end up revoking a state for
 /// which you've already broadcasted the transaction.
+///
+/// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor
 pub struct ChannelManagerReadArgs<'a, Signer: 'a + Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
        where M::Target: chain::Watch<Signer>,
         T::Target: BroadcasterInterface,
@@ -5398,7 +5871,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                let mut short_to_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
                let mut channel_closures = Vec::new();
                for _ in 0..channel_count {
-                       let mut channel: Channel<Signer> = Channel::read(reader, &args.keys_manager)?;
+                       let mut channel: Channel<Signer> = Channel::read(reader, (&args.keys_manager, best_block_height))?;
                        let funding_txo = channel.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
                        funding_txo_set.insert(funding_txo.clone());
                        if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) {
@@ -5429,9 +5902,11 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                        monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger);
                                        channel_closures.push(events::Event::ChannelClosed {
                                                channel_id: channel.channel_id(),
+                                               user_channel_id: channel.get_user_id(),
                                                reason: ClosureReason::OutdatedChannelManager
                                        });
                                } else {
+                                       log_info!(args.logger, "Successfully loaded channel {}", log_bytes!(channel.channel_id()));
                                        if let Some(short_channel_id) = channel.get_short_channel_id() {
                                                short_to_id.insert(short_channel_id, channel.channel_id());
                                        }
@@ -5449,6 +5924,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
 
                for (ref funding_txo, ref mut monitor) in args.channel_monitors.iter_mut() {
                        if !funding_txo_set.contains(funding_txo) {
+                               log_info!(args.logger, "Broadcasting latest holder commitment transaction for closed channel {}", log_bytes!(funding_txo.to_channel_id()));
                                monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger);
                        }
                }
@@ -5528,21 +6004,78 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                }
 
                let pending_outbound_payments_count_compat: u64 = Readable::read(reader)?;
-               let mut pending_outbound_payments_compat: HashMap<MppId, HashSet<[u8; 32]>> =
+               let mut pending_outbound_payments_compat: HashMap<PaymentId, PendingOutboundPayment> =
                        HashMap::with_capacity(cmp::min(pending_outbound_payments_count_compat as usize, MAX_ALLOC_SIZE/32));
                for _ in 0..pending_outbound_payments_count_compat {
                        let session_priv = Readable::read(reader)?;
-                       if pending_outbound_payments_compat.insert(MppId(session_priv), [session_priv].iter().cloned().collect()).is_some() {
+                       let payment = PendingOutboundPayment::Legacy {
+                               session_privs: [session_priv].iter().cloned().collect()
+                       };
+                       if pending_outbound_payments_compat.insert(PaymentId(session_priv), payment).is_some() {
                                return Err(DecodeError::InvalidValue)
                        };
                }
 
+               // pending_outbound_payments_no_retry is for compatibility with 0.0.101 clients.
+               let mut pending_outbound_payments_no_retry: Option<HashMap<PaymentId, HashSet<[u8; 32]>>> = None;
                let mut pending_outbound_payments = None;
                read_tlv_fields!(reader, {
-                       (1, pending_outbound_payments, option),
+                       (1, pending_outbound_payments_no_retry, option),
+                       (3, pending_outbound_payments, option),
                });
-               if pending_outbound_payments.is_none() {
+               if pending_outbound_payments.is_none() && pending_outbound_payments_no_retry.is_none() {
                        pending_outbound_payments = Some(pending_outbound_payments_compat);
+               } else if pending_outbound_payments.is_none() {
+                       let mut outbounds = HashMap::new();
+                       for (id, session_privs) in pending_outbound_payments_no_retry.unwrap().drain() {
+                               outbounds.insert(id, PendingOutboundPayment::Legacy { session_privs });
+                       }
+                       pending_outbound_payments = Some(outbounds);
+               } else {
+                       // If we're tracking pending payments, ensure we haven't lost any by looking at the
+                       // ChannelMonitor data for any channels for which we do not have authorative state
+                       // (i.e. those for which we just force-closed above or we otherwise don't have a
+                       // corresponding `Channel` at all).
+                       // This avoids several edge-cases where we would otherwise "forget" about pending
+                       // payments which are still in-flight via their on-chain state.
+                       // We only rebuild the pending payments map if we were most recently serialized by
+                       // 0.0.102+
+                       for (_, monitor) in args.channel_monitors {
+                               if by_id.get(&monitor.get_funding_txo().0.to_channel_id()).is_none() {
+                                       for (htlc_source, htlc) in monitor.get_pending_outbound_htlcs() {
+                                               if let HTLCSource::OutboundRoute { payment_id, session_priv, path, payment_secret, .. } = htlc_source {
+                                                       if path.is_empty() {
+                                                               log_error!(args.logger, "Got an empty path for a pending payment");
+                                                               return Err(DecodeError::InvalidValue);
+                                                       }
+                                                       let path_amt = path.last().unwrap().fee_msat;
+                                                       let mut session_priv_bytes = [0; 32];
+                                                       session_priv_bytes[..].copy_from_slice(&session_priv[..]);
+                                                       match pending_outbound_payments.as_mut().unwrap().entry(payment_id) {
+                                                               hash_map::Entry::Occupied(mut entry) => {
+                                                                       let newly_added = entry.get_mut().insert(session_priv_bytes, &path);
+                                                                       log_info!(args.logger, "{} a pending payment path for {} msat for session priv {} on an existing pending payment with payment hash {}",
+                                                                               if newly_added { "Added" } else { "Had" }, path_amt, log_bytes!(session_priv_bytes), log_bytes!(htlc.payment_hash.0));
+                                                               },
+                                                               hash_map::Entry::Vacant(entry) => {
+                                                                       let path_fee = path.get_path_fees();
+                                                                       entry.insert(PendingOutboundPayment::Retryable {
+                                                                               session_privs: [session_priv_bytes].iter().map(|a| *a).collect(),
+                                                                               payment_hash: htlc.payment_hash,
+                                                                               payment_secret,
+                                                                               pending_amt_msat: path_amt,
+                                                                               pending_fee_msat: Some(path_fee),
+                                                                               total_msat: path_amt,
+                                                                               starting_block_height: best_block_height,
+                                                                       });
+                                                                       log_info!(args.logger, "Added a pending payment for {} msat with payment hash {} for path with session priv {}",
+                                                                               path_amt, log_bytes!(htlc.payment_hash.0),  log_bytes!(session_priv_bytes));
+                                                               }
+                                                       }
+                                               }
+                                       }
+                               }
+                       }
                }
 
                let mut secp_ctx = Secp256k1::new();
@@ -5606,12 +6139,12 @@ mod tests {
        use bitcoin::hashes::sha256::Hash as Sha256;
        use core::time::Duration;
        use ln::{PaymentPreimage, PaymentHash, PaymentSecret};
-       use ln::channelmanager::{MppId, PaymentSendFailure};
-       use ln::features::{InitFeatures, InvoiceFeatures};
+       use ln::channelmanager::{PaymentId, PaymentSendFailure};
+       use ln::features::InitFeatures;
        use ln::functional_test_utils::*;
        use ln::msgs;
        use ln::msgs::ChannelMessageHandler;
-       use routing::router::{get_keysend_route, get_route};
+       use routing::router::{Payee, RouteParameters, find_route};
        use util::errors::APIError;
        use util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
        use util::test_utils;
@@ -5751,17 +6284,14 @@ mod tests {
                let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
                let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
                create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
-               let logger = test_utils::TestLogger::new();
 
                // First, send a partial MPP payment.
-               let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
-               let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap();
-               let (payment_preimage, our_payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[1]);
-               let mpp_id = MppId([42; 32]);
+               let (route, our_payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100_000);
+               let payment_id = PaymentId([42; 32]);
                // Use the utility function send_payment_along_path to send the payment with MPP data which
                // indicates there are more HTLCs coming.
                let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match.
-               nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, mpp_id, &None).unwrap();
+               nodes[0].node.send_payment_along_path(&route.paths[0], &route.payee, &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None).unwrap();
                check_added_monitors!(nodes[0], 1);
                let mut events = nodes[0].node.get_and_clear_pending_msg_events();
                assert_eq!(events.len(), 1);
@@ -5791,7 +6321,7 @@ mod tests {
                expect_payment_failed!(nodes[0], our_payment_hash, true);
 
                // Send the second half of the original MPP payment.
-               nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, mpp_id, &None).unwrap();
+               nodes[0].node.send_payment_along_path(&route.paths[0], &route.payee, &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None).unwrap();
                check_added_monitors!(nodes[0], 1);
                let mut events = nodes[0].node.get_and_clear_pending_msg_events();
                assert_eq!(events.len(), 1);
@@ -5829,12 +6359,31 @@ mod tests {
                nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_third_raa);
                check_added_monitors!(nodes[0], 1);
 
-               // Note that successful MPP payments will generate 1 event upon the first path's success. No
-               // further events will be generated for subsequence path successes.
+               // Note that successful MPP payments will generate a single PaymentSent event upon the first
+               // path's success and a PaymentPathSuccessful event for each path's success.
                let events = nodes[0].node.get_and_clear_pending_events();
+               assert_eq!(events.len(), 3);
                match events[0] {
-                       Event::PaymentSent { payment_preimage: ref preimage } => {
+                       Event::PaymentSent { payment_id: ref id, payment_preimage: ref preimage, payment_hash: ref hash, .. } => {
+                               assert_eq!(Some(payment_id), *id);
                                assert_eq!(payment_preimage, *preimage);
+                               assert_eq!(our_payment_hash, *hash);
+                       },
+                       _ => panic!("Unexpected event"),
+               }
+               match events[1] {
+                       Event::PaymentPathSuccessful { payment_id: ref actual_payment_id, ref payment_hash, ref path } => {
+                               assert_eq!(payment_id, *actual_payment_id);
+                               assert_eq!(our_payment_hash, *payment_hash.as_ref().unwrap());
+                               assert_eq!(route.paths[0], *path);
+                       },
+                       _ => panic!("Unexpected event"),
+               }
+               match events[2] {
+                       Event::PaymentPathSuccessful { payment_id: ref actual_payment_id, ref payment_hash, ref path } => {
+                               assert_eq!(payment_id, *actual_payment_id);
+                               assert_eq!(our_payment_hash, *payment_hash.as_ref().unwrap());
+                               assert_eq!(route.paths[0], *path);
                        },
                        _ => panic!("Unexpected event"),
                }
@@ -5851,14 +6400,22 @@ mod tests {
                let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
                let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
                create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
-               let logger = test_utils::TestLogger::new();
+               let scorer = test_utils::TestScorer::with_fixed_penalty(0);
 
                // To start (1), send a regular payment but don't claim it.
                let expected_route = [&nodes[1]];
                let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &expected_route, 100_000);
 
                // Next, attempt a keysend payment and make sure it fails.
-               let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph, &expected_route.last().unwrap().node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap();
+               let params = RouteParameters {
+                       payee: Payee::for_keysend(expected_route.last().unwrap().node.get_our_node_id()),
+                       final_value_msat: 100_000,
+                       final_cltv_expiry_delta: TEST_FINAL_CLTV,
+               };
+               let route = find_route(
+                       &nodes[0].node.get_our_node_id(), &params, nodes[0].network_graph, None,
+                       nodes[0].logger, &scorer
+               ).unwrap();
                nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap();
                check_added_monitors!(nodes[0], 1);
                let mut events = nodes[0].node.get_and_clear_pending_msg_events();
@@ -5886,8 +6443,11 @@ mod tests {
 
                // To start (2), send a keysend payment but don't claim it.
                let payment_preimage = PaymentPreimage([42; 32]);
-               let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph, &expected_route.last().unwrap().node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap();
-               let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap();
+               let route = find_route(
+                       &nodes[0].node.get_our_node_id(), &params, nodes[0].network_graph, None,
+                       nodes[0].logger, &scorer
+               ).unwrap();
+               let (payment_hash, _) = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap();
                check_added_monitors!(nodes[0], 1);
                let mut events = nodes[0].node.get_and_clear_pending_msg_events();
                assert_eq!(events.len(), 1);
@@ -5938,15 +6498,22 @@ mod tests {
                nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known() });
 
                let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known());
-               let network_graph = &nodes[0].net_graph_msg_handler.network_graph;
+               let params = RouteParameters {
+                       payee: Payee::for_keysend(payee_pubkey),
+                       final_value_msat: 10000,
+                       final_cltv_expiry_delta: 40,
+               };
+               let network_graph = nodes[0].network_graph;
                let first_hops = nodes[0].node.list_usable_channels();
-               let route = get_keysend_route(&payer_pubkey, network_graph, &payee_pubkey,
-                                  Some(&first_hops.iter().collect::<Vec<_>>()), &vec![], 10000, 40,
-                                  nodes[0].logger).unwrap();
+               let scorer = test_utils::TestScorer::with_fixed_penalty(0);
+               let route = find_route(
+                       &payer_pubkey, &params, network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
+                       nodes[0].logger, &scorer
+               ).unwrap();
 
                let test_preimage = PaymentPreimage([42; 32]);
                let mismatch_payment_hash = PaymentHash([43; 32]);
-               let _ = nodes[0].node.send_payment_internal(&route, mismatch_payment_hash, &None, Some(test_preimage)).unwrap();
+               let _ = nodes[0].node.send_payment_internal(&route, mismatch_payment_hash, &None, Some(test_preimage), None, None).unwrap();
                check_added_monitors!(nodes[0], 1);
 
                let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
@@ -5974,16 +6541,23 @@ mod tests {
                nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known() });
 
                let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known());
-               let network_graph = &nodes[0].net_graph_msg_handler.network_graph;
+               let params = RouteParameters {
+                       payee: Payee::for_keysend(payee_pubkey),
+                       final_value_msat: 10000,
+                       final_cltv_expiry_delta: 40,
+               };
+               let network_graph = nodes[0].network_graph;
                let first_hops = nodes[0].node.list_usable_channels();
-               let route = get_keysend_route(&payer_pubkey, network_graph, &payee_pubkey,
-                                  Some(&first_hops.iter().collect::<Vec<_>>()), &vec![], 10000, 40,
-                                  nodes[0].logger).unwrap();
+               let scorer = test_utils::TestScorer::with_fixed_penalty(0);
+               let route = find_route(
+                       &payer_pubkey, &params, network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
+                       nodes[0].logger, &scorer
+               ).unwrap();
 
                let test_preimage = PaymentPreimage([42; 32]);
                let test_secret = PaymentSecret([43; 32]);
                let payment_hash = PaymentHash(Sha256::hash(&test_preimage.0).into_inner());
-               let _ = nodes[0].node.send_payment_internal(&route, payment_hash, &Some(test_secret), Some(test_preimage)).unwrap();
+               let _ = nodes[0].node.send_payment_internal(&route, payment_hash, &Some(test_secret), Some(test_preimage), None, None).unwrap();
                check_added_monitors!(nodes[0], 1);
 
                let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
@@ -6008,12 +6582,9 @@ mod tests {
                let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
                let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
                let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
-               let logger = test_utils::TestLogger::new();
 
                // Marshall an MPP route.
-               let (_, payment_hash, _) = get_payment_preimage_hash!(&nodes[3]);
-               let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
-               let mut route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[3].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap();
+               let (mut route, payment_hash, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000);
                let path = route.paths[0].clone();
                route.paths.push(path);
                route.paths[0][0].pubkey = nodes[1].node.get_our_node_id();
@@ -6034,15 +6605,15 @@ mod tests {
 #[cfg(all(any(test, feature = "_test_utils"), feature = "unstable"))]
 pub mod bench {
        use chain::Listen;
-       use chain::chainmonitor::ChainMonitor;
-       use chain::channelmonitor::Persist;
+       use chain::chainmonitor::{ChainMonitor, Persist};
        use chain::keysinterface::{KeysManager, InMemorySigner};
        use ln::channelmanager::{BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage};
        use ln::features::{InitFeatures, InvoiceFeatures};
        use ln::functional_test_utils::*;
        use ln::msgs::{ChannelMessageHandler, Init};
        use routing::network_graph::NetworkGraph;
-       use routing::router::get_route;
+       use routing::router::{Payee, get_route};
+       use routing::scoring::Scorer;
        use util::test_utils;
        use util::config::UserConfig;
        use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose};
@@ -6150,14 +6721,17 @@ pub mod bench {
                macro_rules! send_payment {
                        ($node_a: expr, $node_b: expr) => {
                                let usable_channels = $node_a.list_usable_channels();
-                               let route = get_route(&$node_a.get_our_node_id(), &dummy_graph, &$node_b.get_our_node_id(), Some(InvoiceFeatures::known()),
-                                       Some(&usable_channels.iter().map(|r| r).collect::<Vec<_>>()), &[], 10_000, TEST_FINAL_CLTV, &logger_a).unwrap();
+                               let payee = Payee::from_node_id($node_b.get_our_node_id())
+                                       .with_features(InvoiceFeatures::known());
+                               let scorer = Scorer::with_fixed_penalty(0);
+                               let route = get_route(&$node_a.get_our_node_id(), &payee, &dummy_graph,
+                                       Some(&usable_channels.iter().map(|r| r).collect::<Vec<_>>()), 10_000, TEST_FINAL_CLTV, &logger_a, &scorer).unwrap();
 
                                let mut payment_preimage = PaymentPreimage([0; 32]);
                                payment_preimage.0[0..8].copy_from_slice(&payment_count.to_le_bytes());
                                payment_count += 1;
                                let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner());
-                               let payment_secret = $node_b.create_inbound_payment_for_hash(payment_hash, None, 7200, 0).unwrap();
+                               let payment_secret = $node_b.create_inbound_payment_for_hash(payment_hash, None, 7200).unwrap();
 
                                $node_a.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
                                let payment_event = SendEvent::from_event($node_a.get_and_clear_pending_msg_events().pop().unwrap());