Merge pull request #1107 from dunxen/2021-10-swap-pubkey-for-bytearray
[rust-lightning] / lightning / src / ln / channelmanager.rs
index 67975238050472790da5bbc344d1f0bb67fda2a4..1b5813a301851229fdd2967dc22e025042b2cab8 100644 (file)
@@ -37,13 +37,12 @@ use bitcoin::secp256k1;
 
 use chain;
 use chain::{Confirm, Watch, BestBlock};
-use chain::chaininterface::{BroadcasterInterface, FeeEstimator};
+use chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator};
 use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, ChannelMonitorUpdateErr, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID};
 use chain::transaction::{OutPoint, TransactionData};
 // Since this struct is returned in `list_channels` methods, expose it here in case users want to
 // construct one themselves.
 use ln::{PaymentHash, PaymentPreimage, PaymentSecret};
-pub use ln::channel::CounterpartyForwardingInfo;
 use ln::channel::{Channel, ChannelError, ChannelUpdateStatus, UpdateFulfillCommitFetch};
 use ln::features::{InitFeatures, NodeFeatures};
 use routing::router::{Route, RouteHop};
@@ -53,24 +52,24 @@ use ln::onion_utils;
 use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, OptionalField};
 use chain::keysinterface::{Sign, KeysInterface, KeysManager, InMemorySigner};
 use util::config::UserConfig;
-use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
+use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
 use util::{byte_utils, events};
-use util::ser::{Readable, ReadableArgs, MaybeReadable, Writeable, Writer};
+use util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer};
 use util::chacha20::{ChaCha20, ChaChaReader};
-use util::logger::Logger;
+use util::logger::{Logger, Level};
 use util::errors::APIError;
 
+use io;
 use prelude::*;
 use core::{cmp, mem};
 use core::cell::RefCell;
-use std::io::{Cursor, Read};
-use std::sync::{Arc, Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard};
+use io::{Cursor, Read};
+use sync::{Arc, Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard};
 use core::sync::atomic::{AtomicUsize, Ordering};
 use core::time::Duration;
 #[cfg(any(test, feature = "allow_wallclock_use"))]
 use std::time::Instant;
 use core::ops::Deref;
-use bitcoin::hashes::hex::ToHex;
 
 // We hold various information about HTLC relay in the HTLC objects in Channel itself:
 //
@@ -99,6 +98,10 @@ enum PendingHTLCRouting {
                payment_data: msgs::FinalOnionHopData,
                incoming_cltv_expiry: u32, // Used to track when we should expire pending HTLCs that go unclaimed
        },
+       ReceiveKeysend {
+               payment_preimage: PaymentPreimage,
+               incoming_cltv_expiry: u32, // Used to track when we should expire pending HTLCs that go unclaimed
+       },
 }
 
 #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
@@ -153,16 +156,38 @@ pub(crate) struct HTLCPreviousHopData {
        outpoint: OutPoint,
 }
 
-struct ClaimableHTLC {
-       prev_hop: HTLCPreviousHopData,
-       value: u64,
+enum OnionPayload {
        /// Contains a total_msat (which may differ from value if this is a Multi-Path Payment) and a
        /// payment_secret which prevents path-probing attacks and can associate different HTLCs which
        /// are part of the same payment.
-       payment_data: msgs::FinalOnionHopData,
+       Invoice(msgs::FinalOnionHopData),
+       /// Contains the payer-provided preimage.
+       Spontaneous(PaymentPreimage),
+}
+
+struct ClaimableHTLC {
+       prev_hop: HTLCPreviousHopData,
        cltv_expiry: u32,
+       value: u64,
+       onion_payload: OnionPayload,
+}
+
+/// A payment identifier used to uniquely identify a payment to LDK.
+#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
+pub struct PaymentId(pub [u8; 32]);
+
+impl Writeable for PaymentId {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+               self.0.write(w)
+       }
 }
 
+impl Readable for PaymentId {
+       fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
+               let buf: [u8; 32] = Readable::read(r)?;
+               Ok(PaymentId(buf))
+       }
+}
 /// Tracks the inbound corresponding to an outbound HTLC
 #[derive(Clone, PartialEq)]
 pub(crate) enum HTLCSource {
@@ -173,6 +198,7 @@ pub(crate) enum HTLCSource {
                /// Technically we can recalculate this from the route, but we cache it here to avoid
                /// doing a double-pass on route when we get a failure back
                first_hop_htlc_msat: u64,
+               payment_id: PaymentId,
        },
 }
 #[cfg(test)]
@@ -182,6 +208,7 @@ impl HTLCSource {
                        path: Vec::new(),
                        session_priv: SecretKey::from_slice(&[1; 32]).unwrap(),
                        first_hop_htlc_msat: 0,
+                       payment_id: PaymentId([2; 32]),
                }
        }
 }
@@ -197,6 +224,14 @@ pub(super) enum HTLCFailReason {
        }
 }
 
+/// Return value for claim_funds_from_hop
+enum ClaimFundsFromHop {
+       PrevHopForceClosed,
+       MonitorUpdateFail(PublicKey, MsgHandleErrInternal, Option<u64>),
+       Success(u64),
+       DuplicateClaim,
+}
+
 type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>);
 
 /// Error type returned across the channel_state mutex boundary. When an Err is generated for a
@@ -207,6 +242,7 @@ type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource
 
 struct MsgHandleErrInternal {
        err: msgs::LightningError,
+       chan_id: Option<[u8; 32]>, // If Some a channel of ours has been closed
        shutdown_finish: Option<(ShutdownResult, Option<msgs::ChannelUpdate>)>,
 }
 impl MsgHandleErrInternal {
@@ -222,6 +258,7 @@ impl MsgHandleErrInternal {
                                        },
                                },
                        },
+                       chan_id: None,
                        shutdown_finish: None,
                }
        }
@@ -232,12 +269,13 @@ impl MsgHandleErrInternal {
                                err,
                                action: msgs::ErrorAction::IgnoreError,
                        },
+                       chan_id: None,
                        shutdown_finish: None,
                }
        }
        #[inline]
        fn from_no_close(err: msgs::LightningError) -> Self {
-               Self { err, shutdown_finish: None }
+               Self { err, chan_id: None, shutdown_finish: None }
        }
        #[inline]
        fn from_finish_shutdown(err: String, channel_id: [u8; 32], shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>) -> Self {
@@ -251,6 +289,7 @@ impl MsgHandleErrInternal {
                                        },
                                },
                        },
+                       chan_id: Some(channel_id),
                        shutdown_finish: Some((shutdown_res, channel_update)),
                }
        }
@@ -258,6 +297,10 @@ impl MsgHandleErrInternal {
        fn from_chan_no_close(err: ChannelError, channel_id: [u8; 32]) -> Self {
                Self {
                        err: match err {
+                               ChannelError::Warn(msg) =>  LightningError {
+                                       err: msg,
+                                       action: msgs::ErrorAction::IgnoreError,
+                               },
                                ChannelError::Ignore(msg) => LightningError {
                                        err: msg,
                                        action: msgs::ErrorAction::IgnoreError,
@@ -281,6 +324,7 @@ impl MsgHandleErrInternal {
                                        },
                                },
                        },
+                       chan_id: None,
                        shutdown_finish: None,
                }
        }
@@ -356,6 +400,65 @@ struct PendingInboundPayment {
        min_value_msat: Option<u64>,
 }
 
+/// Stores the session_priv for each part of a payment that is still pending. For versions 0.0.102
+/// and later, also stores information for retrying the payment.
+pub(crate) enum PendingOutboundPayment {
+       Legacy {
+               session_privs: HashSet<[u8; 32]>,
+       },
+       Retryable {
+               session_privs: HashSet<[u8; 32]>,
+               payment_hash: PaymentHash,
+               payment_secret: Option<PaymentSecret>,
+               pending_amt_msat: u64,
+               /// The total payment amount across all paths, used to verify that a retry is not overpaying.
+               total_msat: u64,
+               /// Our best known block height at the time this payment was initiated.
+               starting_block_height: u32,
+       },
+}
+
+impl PendingOutboundPayment {
+       fn remove(&mut self, session_priv: &[u8; 32], part_amt_msat: u64) -> bool {
+               let remove_res = match self {
+                       PendingOutboundPayment::Legacy { session_privs } |
+                       PendingOutboundPayment::Retryable { session_privs, .. } => {
+                               session_privs.remove(session_priv)
+                       }
+               };
+               if remove_res {
+                       if let PendingOutboundPayment::Retryable { ref mut pending_amt_msat, .. } = self {
+                               *pending_amt_msat -= part_amt_msat;
+                       }
+               }
+               remove_res
+       }
+
+       fn insert(&mut self, session_priv: [u8; 32], part_amt_msat: u64) -> bool {
+               let insert_res = match self {
+                       PendingOutboundPayment::Legacy { session_privs } |
+                       PendingOutboundPayment::Retryable { session_privs, .. } => {
+                               session_privs.insert(session_priv)
+                       }
+               };
+               if insert_res {
+                       if let PendingOutboundPayment::Retryable { ref mut pending_amt_msat, .. } = self {
+                               *pending_amt_msat += part_amt_msat;
+                       }
+               }
+               insert_res
+       }
+
+       fn remaining_parts(&self) -> usize {
+               match self {
+                       PendingOutboundPayment::Legacy { session_privs } |
+                       PendingOutboundPayment::Retryable { session_privs, .. } => {
+                               session_privs.len()
+                       }
+               }
+       }
+}
+
 /// SimpleArcChannelManager is useful when you need a ChannelManager with a static lifetime, e.g.
 /// when you're using lightning-net-tokio (since tokio::spawn requires parameters with static
 /// lifetimes). Other times you can afford a reference, which is more efficient, in which case
@@ -442,17 +545,19 @@ pub struct ChannelManager<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref,
        /// Locked *after* channel_state.
        pending_inbound_payments: Mutex<HashMap<PaymentHash, PendingInboundPayment>>,
 
-       /// The session_priv bytes of outbound payments which are pending resolution.
+       /// The session_priv bytes and retry metadata of outbound payments which are pending resolution.
        /// The authoritative state of these HTLCs resides either within Channels or ChannelMonitors
        /// (if the channel has been force-closed), however we track them here to prevent duplicative
-       /// PaymentSent/PaymentFailed events. Specifically, in the case of a duplicative
+       /// PaymentSent/PaymentPathFailed events. Specifically, in the case of a duplicative
        /// update_fulfill_htlc message after a reconnect, we may "claim" a payment twice.
        /// Additionally, because ChannelMonitors are often not re-serialized after connecting block(s)
        /// which may generate a claim event, we may receive similar duplicate claim/fail MonitorEvents
        /// after reloading from disk while replaying blocks against ChannelMonitors.
        ///
+       /// See `PendingOutboundPayment` documentation for more info.
+       ///
        /// Locked *after* channel_state.
-       pending_outbound_payments: Mutex<HashSet<[u8; 32]>>,
+       pending_outbound_payments: Mutex<HashMap<PaymentId, PendingOutboundPayment>>,
 
        our_network_key: SecretKey,
        our_network_pubkey: PublicKey,
@@ -472,6 +577,8 @@ pub struct ChannelManager<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref,
        /// Because adding or removing an entry is rare, we usually take an outer read lock and then
        /// operate on the inner value freely. Sadly, this prevents parallel operation when opening a
        /// new channel.
+       ///
+       /// If also holding `channel_state` lock, must lock `channel_state` prior to `per_peer_state`.
        per_peer_state: RwLock<HashMap<PublicKey, Mutex<PeerState>>>,
 
        pending_events: Mutex<Vec<events::Event>>,
@@ -602,6 +709,42 @@ const CHECK_CLTV_EXPIRY_SANITY: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRA
 #[allow(dead_code)]
 const CHECK_CLTV_EXPIRY_SANITY_2: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER;
 
+/// Information needed for constructing an invoice route hint for this channel.
+#[derive(Clone, Debug, PartialEq)]
+pub struct CounterpartyForwardingInfo {
+       /// Base routing fee in millisatoshis.
+       pub fee_base_msat: u32,
+       /// Amount in millionths of a satoshi the channel will charge per transferred satoshi.
+       pub fee_proportional_millionths: u32,
+       /// The minimum difference in cltv_expiry between an ingoing HTLC and its outgoing counterpart,
+       /// such that the outgoing HTLC is forwardable to this counterparty. See `msgs::ChannelUpdate`'s
+       /// `cltv_expiry_delta` for more details.
+       pub cltv_expiry_delta: u16,
+}
+
+/// Channel parameters which apply to our counterparty. These are split out from [`ChannelDetails`]
+/// to better separate parameters.
+#[derive(Clone, Debug, PartialEq)]
+pub struct ChannelCounterparty {
+       /// The node_id of our counterparty
+       pub node_id: PublicKey,
+       /// The Features the channel counterparty provided upon last connection.
+       /// Useful for routing as it is the most up-to-date copy of the counterparty's features and
+       /// many routing-relevant features are present in the init context.
+       pub features: InitFeatures,
+       /// The value, in satoshis, that must always be held in the channel for our counterparty. This
+       /// value ensures that if our counterparty broadcasts a revoked state, we can punish them by
+       /// claiming at least this value on chain.
+       ///
+       /// This value is not included in [`inbound_capacity_msat`] as it can never be spent.
+       ///
+       /// [`inbound_capacity_msat`]: ChannelDetails::inbound_capacity_msat
+       pub unspendable_punishment_reserve: u64,
+       /// Information on the fees and requirements that the counterparty requires when forwarding
+       /// payments to us through this channel.
+       pub forwarding_info: Option<CounterpartyForwardingInfo>,
+}
+
 /// Details of a channel, as returned by ChannelManager::list_channels and ChannelManager::list_usable_channels
 #[derive(Clone, Debug, PartialEq)]
 pub struct ChannelDetails {
@@ -610,6 +753,8 @@ pub struct ChannelDetails {
        /// Note that this means this value is *not* persistent - it can change once during the
        /// lifetime of the channel.
        pub channel_id: [u8; 32],
+       /// Parameters which apply to our counterparty. See individual fields for more information.
+       pub counterparty: ChannelCounterparty,
        /// The Channel's funding transaction output, if we've negotiated the funding transaction with
        /// our counterparty already.
        ///
@@ -619,12 +764,6 @@ pub struct ChannelDetails {
        /// The position of the funding transaction in the chain. None if the funding transaction has
        /// not yet been confirmed and the channel fully opened.
        pub short_channel_id: Option<u64>,
-       /// The node_id of our counterparty
-       pub remote_network_id: PublicKey,
-       /// The Features the channel counterparty provided upon last connection.
-       /// Useful for routing as it is the most up-to-date copy of the counterparty's features and
-       /// many routing-relevant features are present in the init context.
-       pub counterparty_features: InitFeatures,
        /// The value, in satoshis, of this channel as appears in the funding output
        pub channel_value_satoshis: u64,
        /// The value, in satoshis, that must always be held in the channel for us. This value ensures
@@ -636,15 +775,7 @@ pub struct ChannelDetails {
        /// This value will be `None` for outbound channels until the counterparty accepts the channel.
        ///
        /// [`outbound_capacity_msat`]: ChannelDetails::outbound_capacity_msat
-       pub to_self_reserve_satoshis: Option<u64>,
-       /// The value, in satoshis, that must always be held in the channel for our counterparty. This
-       /// value ensures that if our counterparty broadcasts a revoked state, we can punish them by
-       /// claiming at least this value on chain.
-       ///
-       /// This value is not included in [`inbound_capacity_msat`] as it can never be spent.
-       ///
-       /// [`inbound_capacity_msat`]: ChannelDetails::inbound_capacity_msat
-       pub to_remote_reserve_satoshis: u64,
+       pub unspendable_punishment_reserve: Option<u64>,
        /// The user_id passed in to create_channel, or 0 if the channel was inbound.
        pub user_id: u64,
        /// The available outbound capacity for sending HTLCs to the remote peer. This does not include
@@ -685,7 +816,7 @@ pub struct ChannelDetails {
        /// time to claim our non-HTLC-encumbered funds.
        ///
        /// This value will be `None` for outbound channels until the counterparty accepts the channel.
-       pub spend_csv_on_our_commitment_funds: Option<u16>,
+       pub force_close_spend_delay: Option<u16>,
        /// True if the channel was initiated (and thus funded) by us.
        pub is_outbound: bool,
        /// True if the channel is confirmed, funding_locked messages have been exchanged, and the
@@ -703,9 +834,6 @@ pub struct ChannelDetails {
        pub is_usable: bool,
        /// True if this channel is (or will be) publicly-announced.
        pub is_public: bool,
-       /// Information on the fees and requirements that the counterparty requires when forwarding
-       /// payments to us through this channel.
-       pub counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
 }
 
 /// If a payment fails to send, it can be in one of several states. This enum is returned as the
@@ -748,12 +876,13 @@ macro_rules! handle_error {
        ($self: ident, $internal: expr, $counterparty_node_id: expr) => {
                match $internal {
                        Ok(msg) => Ok(msg),
-                       Err(MsgHandleErrInternal { err, shutdown_finish }) => {
+                       Err(MsgHandleErrInternal { err, chan_id, shutdown_finish }) => {
                                #[cfg(debug_assertions)]
                                {
                                        // In testing, ensure there are no deadlocks where the lock is already held upon
                                        // entering the macro.
                                        assert!($self.channel_state.try_lock().is_ok());
+                                       assert!($self.pending_events.try_lock().is_ok());
                                }
 
                                let mut msg_events = Vec::with_capacity(2);
@@ -765,6 +894,9 @@ macro_rules! handle_error {
                                                        msg: update
                                                });
                                        }
+                                       if let Some(channel_id) = chan_id {
+                                               $self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id,  reason: ClosureReason::ProcessingError { err: err.err.clone() } });
+                                       }
                                }
 
                                log_error!($self.logger, "{}", err.err);
@@ -791,6 +923,11 @@ macro_rules! handle_error {
 macro_rules! convert_chan_err {
        ($self: ident, $err: expr, $short_to_id: expr, $channel: expr, $channel_id: expr) => {
                match $err {
+                       ChannelError::Warn(msg) => {
+                               //TODO: Once warning messages are merged, we should send a `warning` message to our
+                               //peer here.
+                               (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id.clone()))
+                       },
                        ChannelError::Ignore(msg) => {
                                (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id.clone()))
                        },
@@ -800,7 +937,7 @@ macro_rules! convert_chan_err {
                                        $short_to_id.remove(&short_id);
                                }
                                let shutdown_res = $channel.force_shutdown(true);
-                               (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $self.get_channel_update(&$channel).ok()))
+                               (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
                        },
                        ChannelError::CloseDelayBroadcast(msg) => {
                                log_error!($self.logger, "Channel {} need to be shutdown but closing transactions not broadcast due to {}", log_bytes!($channel_id[..]), msg);
@@ -808,7 +945,7 @@ macro_rules! convert_chan_err {
                                        $short_to_id.remove(&short_id);
                                }
                                let shutdown_res = $channel.force_shutdown(false);
-                               (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $self.get_channel_update(&$channel).ok()))
+                               (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
                        }
                }
        }
@@ -844,6 +981,18 @@ macro_rules! try_chan_entry {
        }
 }
 
+macro_rules! remove_channel {
+       ($channel_state: expr, $entry: expr) => {
+               {
+                       let channel = $entry.remove_entry().1;
+                       if let Some(short_id) = channel.get_short_channel_id() {
+                               $channel_state.short_to_id.remove(&short_id);
+                       }
+                       channel
+               }
+       }
+}
+
 macro_rules! handle_monitor_err {
        ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
                handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, Vec::new(), Vec::new())
@@ -864,7 +1013,8 @@ macro_rules! handle_monitor_err {
                                // splitting hairs we'd prefer to claim payments that were to us, but we haven't
                                // given up the preimage yet, so might as well just wait until the payment is
                                // retried, avoiding the on-chain fees.
-                               let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), *$chan_id, $chan.force_shutdown(true), $self.get_channel_update(&$chan).ok()));
+                               let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), *$chan_id,
+                                               $chan.force_shutdown(true), $self.get_channel_update_for_broadcast(&$chan).ok() ));
                                (res, true)
                        },
                        ChannelMonitorUpdateErr::TemporaryFailure => {
@@ -1088,7 +1238,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                pending_msg_events: Vec::new(),
                        }),
                        pending_inbound_payments: Mutex::new(HashMap::new()),
-                       pending_outbound_payments: Mutex::new(HashSet::new()),
+                       pending_outbound_payments: Mutex::new(HashMap::new()),
 
                        our_network_key: keys_manager.get_node_secret(),
                        our_network_pubkey: PublicKey::from_secret_key(&secp_ctx, &keys_manager.get_node_secret()),
@@ -1128,13 +1278,27 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        ///
        /// Raises APIError::APIMisuseError when channel_value_satoshis > 2**24 or push_msat is
        /// greater than channel_value_satoshis * 1k or channel_value_satoshis is < 1000.
+       ///
+       /// Note that we do not check if you are currently connected to the given peer. If no
+       /// connection is available, the outbound `open_channel` message may fail to send, resulting in
+       /// the channel eventually being silently forgotten.
        pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_id: u64, override_config: Option<UserConfig>) -> Result<(), APIError> {
                if channel_value_satoshis < 1000 {
                        return Err(APIError::APIMisuseError { err: format!("Channel value must be at least 1000 satoshis. It was {}", channel_value_satoshis) });
                }
 
-               let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration };
-               let channel = Channel::new_outbound(&self.fee_estimator, &self.keys_manager, their_network_key, channel_value_satoshis, push_msat, user_id, config)?;
+               let channel = {
+                       let per_peer_state = self.per_peer_state.read().unwrap();
+                       match per_peer_state.get(&their_network_key) {
+                               Some(peer_state) => {
+                                       let peer_state = peer_state.lock().unwrap();
+                                       let their_features = &peer_state.latest_features;
+                                       let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration };
+                                       Channel::new_outbound(&self.fee_estimator, &self.keys_manager, their_network_key, their_features, channel_value_satoshis, push_msat, user_id, config)?
+                               },
+                               None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", their_network_key) }),
+                       }
+               };
                let res = channel.get_open_channel(self.genesis_hash.clone());
 
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
@@ -1170,30 +1334,32 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        channel.get_holder_counterparty_selected_channel_reserve_satoshis();
                                res.push(ChannelDetails {
                                        channel_id: (*channel_id).clone(),
+                                       counterparty: ChannelCounterparty {
+                                               node_id: channel.get_counterparty_node_id(),
+                                               features: InitFeatures::empty(),
+                                               unspendable_punishment_reserve: to_remote_reserve_satoshis,
+                                               forwarding_info: channel.counterparty_forwarding_info(),
+                                       },
                                        funding_txo: channel.get_funding_txo(),
                                        short_channel_id: channel.get_short_channel_id(),
-                                       remote_network_id: channel.get_counterparty_node_id(),
-                                       counterparty_features: InitFeatures::empty(),
                                        channel_value_satoshis: channel.get_value_satoshis(),
-                                       to_self_reserve_satoshis,
-                                       to_remote_reserve_satoshis,
+                                       unspendable_punishment_reserve: to_self_reserve_satoshis,
                                        inbound_capacity_msat,
                                        outbound_capacity_msat,
                                        user_id: channel.get_user_id(),
                                        confirmations_required: channel.minimum_depth(),
-                                       spend_csv_on_our_commitment_funds: channel.get_counterparty_selected_contest_delay(),
+                                       force_close_spend_delay: channel.get_counterparty_selected_contest_delay(),
                                        is_outbound: channel.is_outbound(),
                                        is_funding_locked: channel.is_usable(),
                                        is_usable: channel.is_live(),
                                        is_public: channel.should_announce(),
-                                       counterparty_forwarding_info: channel.counterparty_forwarding_info(),
                                });
                        }
                }
                let per_peer_state = self.per_peer_state.read().unwrap();
                for chan in res.iter_mut() {
-                       if let Some(peer_state) = per_peer_state.get(&chan.remote_network_id) {
-                               chan.counterparty_features = peer_state.lock().unwrap().latest_features.clone();
+                       if let Some(peer_state) = per_peer_state.get(&chan.counterparty.node_id) {
+                               chan.counterparty.features = peer_state.lock().unwrap().latest_features.clone();
                        }
                }
                res
@@ -1218,53 +1384,117 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                self.list_channels_with_filter(|&(_, ref channel)| channel.is_live())
        }
 
-       /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs
-       /// will be accepted on the given channel, and after additional timeout/the closing of all
-       /// pending HTLCs, the channel will be closed on chain.
-       ///
-       /// May generate a SendShutdown message event on success, which should be relayed.
-       pub fn close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> {
+       fn close_channel_internal(&self, channel_id: &[u8; 32], target_feerate_sats_per_1000_weight: Option<u32>) -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 
-               let (mut failed_htlcs, chan_option) = {
+               let counterparty_node_id;
+               let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>;
+               let result: Result<(), _> = loop {
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
                        match channel_state.by_id.entry(channel_id.clone()) {
                                hash_map::Entry::Occupied(mut chan_entry) => {
-                                       let (shutdown_msg, failed_htlcs) = chan_entry.get_mut().get_shutdown()?;
+                                       counterparty_node_id = chan_entry.get().get_counterparty_node_id();
+                                       let per_peer_state = self.per_peer_state.read().unwrap();
+                                       let (shutdown_msg, monitor_update, htlcs) = match per_peer_state.get(&counterparty_node_id) {
+                                               Some(peer_state) => {
+                                                       let peer_state = peer_state.lock().unwrap();
+                                                       let their_features = &peer_state.latest_features;
+                                                       chan_entry.get_mut().get_shutdown(&self.keys_manager, their_features, target_feerate_sats_per_1000_weight)?
+                                               },
+                                               None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", counterparty_node_id) }),
+                                       };
+                                       failed_htlcs = htlcs;
+
+                                       // Update the monitor with the shutdown script if necessary.
+                                       if let Some(monitor_update) = monitor_update {
+                                               if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) {
+                                                       let (result, is_permanent) =
+                                                               handle_monitor_err!(self, e, channel_state.short_to_id, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, false, false, Vec::new(), Vec::new(), chan_entry.key());
+                                                       if is_permanent {
+                                                               remove_channel!(channel_state, chan_entry);
+                                                               break result;
+                                                       }
+                                               }
+                                       }
+
                                        channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
-                                               node_id: chan_entry.get().get_counterparty_node_id(),
+                                               node_id: counterparty_node_id,
                                                msg: shutdown_msg
                                        });
+
                                        if chan_entry.get().is_shutdown() {
-                                               if let Some(short_id) = chan_entry.get().get_short_channel_id() {
-                                                       channel_state.short_to_id.remove(&short_id);
+                                               let channel = remove_channel!(channel_state, chan_entry);
+                                               if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) {
+                                                       channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                                               msg: channel_update
+                                                       });
+                                               }
+                                               if let Ok(mut pending_events_lock) = self.pending_events.lock() {
+                                                       pending_events_lock.push(events::Event::ChannelClosed {
+                                                               channel_id: *channel_id,
+                                                               reason: ClosureReason::HolderForceClosed
+                                                       });
                                                }
-                                               (failed_htlcs, Some(chan_entry.remove_entry().1))
-                                       } else { (failed_htlcs, None) }
+                                       }
+                                       break Ok(());
                                },
                                hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()})
                        }
                };
+
                for htlc_source in failed_htlcs.drain(..) {
                        self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
                }
-               let chan_update = if let Some(chan) = chan_option {
-                       if let Ok(update) = self.get_channel_update(&chan) {
-                               Some(update)
-                       } else { None }
-               } else { None };
-
-               if let Some(update) = chan_update {
-                       let mut channel_state = self.channel_state.lock().unwrap();
-                       channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
-                               msg: update
-                       });
-               }
 
+               let _ = handle_error!(self, result, counterparty_node_id);
                Ok(())
        }
 
+       /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs
+       /// will be accepted on the given channel, and after additional timeout/the closing of all
+       /// pending HTLCs, the channel will be closed on chain.
+       ///
+       ///  * If we are the channel initiator, we will pay between our [`Background`] and
+       ///    [`ChannelConfig::force_close_avoidance_max_fee_satoshis`] plus our [`Normal`] fee
+       ///    estimate.
+       ///  * If our counterparty is the channel initiator, we will require a channel closing
+       ///    transaction feerate of at least our [`Background`] feerate or the feerate which
+       ///    would appear on a force-closure transaction, whichever is lower. We will allow our
+       ///    counterparty to pay as much fee as they'd like, however.
+       ///
+       /// May generate a SendShutdown message event on success, which should be relayed.
+       ///
+       /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis
+       /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background
+       /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal
+       pub fn close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> {
+               self.close_channel_internal(channel_id, None)
+       }
+
+       /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs
+       /// will be accepted on the given channel, and after additional timeout/the closing of all
+       /// pending HTLCs, the channel will be closed on chain.
+       ///
+       /// `target_feerate_sat_per_1000_weight` has different meanings depending on if we initiated
+       /// the channel being closed or not:
+       ///  * If we are the channel initiator, we will pay at least this feerate on the closing
+       ///    transaction. The upper-bound is set by
+       ///    [`ChannelConfig::force_close_avoidance_max_fee_satoshis`] plus our [`Normal`] fee
+       ///    estimate (or `target_feerate_sat_per_1000_weight`, if it is greater).
+       ///  * If our counterparty is the channel initiator, we will refuse to accept a channel closure
+       ///    transaction feerate below `target_feerate_sat_per_1000_weight` (or the feerate which
+       ///    will appear on a force-closure transaction, whichever is lower).
+       ///
+       /// May generate a SendShutdown message event on success, which should be relayed.
+       ///
+       /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis
+       /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background
+       /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal
+       pub fn close_channel_with_target_feerate(&self, channel_id: &[u8; 32], target_feerate_sats_per_1000_weight: u32) -> Result<(), APIError> {
+               self.close_channel_internal(channel_id, Some(target_feerate_sats_per_1000_weight))
+       }
+
        #[inline]
        fn finish_force_close_channel(&self, shutdown_res: ShutdownResult) {
                let (monitor_update_option, mut failed_htlcs) = shutdown_res;
@@ -1281,7 +1511,9 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                }
        }
 
-       fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: Option<&PublicKey>) -> Result<PublicKey, APIError> {
+       /// `peer_node_id` should be set when we receive a message from a peer, but not set when the
+       /// user closes, which will be re-exposed as the `ChannelClosed` reason.
+       fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: Option<&PublicKey>, peer_msg: Option<&String>) -> Result<PublicKey, APIError> {
                let mut chan = {
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
@@ -1294,6 +1526,14 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                if let Some(short_id) = chan.get().get_short_channel_id() {
                                        channel_state.short_to_id.remove(&short_id);
                                }
+                               let mut pending_events_lock = self.pending_events.lock().unwrap();
+                               if peer_node_id.is_some() {
+                                       if let Some(peer_msg) = peer_msg {
+                                               pending_events_lock.push(events::Event::ChannelClosed { channel_id: *channel_id, reason: ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() } });
+                                       }
+                               } else {
+                                       pending_events_lock.push(events::Event::ChannelClosed { channel_id: *channel_id, reason: ClosureReason::HolderForceClosed });
+                               }
                                chan.remove_entry().1
                        } else {
                                return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()});
@@ -1301,7 +1541,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                };
                log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
                self.finish_force_close_channel(chan.force_shutdown(true));
-               if let Ok(update) = self.get_channel_update(&chan) {
+               if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
                        let mut channel_state = self.channel_state.lock().unwrap();
                        channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                msg: update
@@ -1315,7 +1555,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        /// the chain and rejecting new HTLCs on the given channel. Fails if channel_id is unknown to the manager.
        pub fn force_close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
-               match self.force_close_channel_with_peer(channel_id, None) {
+               match self.force_close_channel_with_peer(channel_id, None, None) {
                        Ok(counterparty_node_id) => {
                                self.channel_state.lock().unwrap().pending_msg_events.push(
                                        events::MessageSendEvent::HandleError {
@@ -1401,8 +1641,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
 
                let mut chacha = ChaCha20::new(&rho, &[0u8; 8]);
                let mut chacha_stream = ChaChaReader { chacha: &mut chacha, read: Cursor::new(&msg.onion_routing_packet.hop_data[..]) };
-               let (next_hop_data, next_hop_hmac) = {
-                       match msgs::OnionHopData::read(&mut chacha_stream) {
+               let (next_hop_data, next_hop_hmac): (msgs::OnionHopData, _) = {
+                       match <msgs::OnionHopData as Readable>::read(&mut chacha_stream) {
                                Err(err) => {
                                        let error_code = match err {
                                                msgs::DecodeError::UnknownVersion => 0x4000 | 1, // unknown realm byte
@@ -1424,121 +1664,141 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                };
 
                let pending_forward_info = if next_hop_hmac == [0; 32] {
-                               #[cfg(test)]
-                               {
-                                       // In tests, make sure that the initial onion pcket data is, at least, non-0.
-                                       // We could do some fancy randomness test here, but, ehh, whatever.
-                                       // This checks for the issue where you can calculate the path length given the
-                                       // onion data as all the path entries that the originator sent will be here
-                                       // as-is (and were originally 0s).
-                                       // Of course reverse path calculation is still pretty easy given naive routing
-                                       // algorithms, but this fixes the most-obvious case.
-                                       let mut next_bytes = [0; 32];
-                                       chacha_stream.read_exact(&mut next_bytes).unwrap();
-                                       assert_ne!(next_bytes[..], [0; 32][..]);
-                                       chacha_stream.read_exact(&mut next_bytes).unwrap();
-                                       assert_ne!(next_bytes[..], [0; 32][..]);
-                               }
-
-                               // OUR PAYMENT!
-                               // final_expiry_too_soon
-                               // We have to have some headroom to broadcast on chain if we have the preimage, so make sure we have at least
-                               // HTLC_FAIL_BACK_BUFFER blocks to go.
-                               // Also, ensure that, in the case of an unknown payment hash, our payment logic has enough time to fail the HTLC backward
-                               // before our onchain logic triggers a channel closure (see HTLC_FAIL_BACK_BUFFER rational).
-                               if (msg.cltv_expiry as u64) <= self.best_block.read().unwrap().height() as u64 + HTLC_FAIL_BACK_BUFFER as u64 + 1 {
-                                       return_err!("The final CLTV expiry is too soon to handle", 17, &[0;0]);
-                               }
-                               // final_incorrect_htlc_amount
-                               if next_hop_data.amt_to_forward > msg.amount_msat {
-                                       return_err!("Upstream node sent less than we were supposed to receive in payment", 19, &byte_utils::be64_to_array(msg.amount_msat));
-                               }
-                               // final_incorrect_cltv_expiry
-                               if next_hop_data.outgoing_cltv_value != msg.cltv_expiry {
-                                       return_err!("Upstream node set CLTV to the wrong value", 18, &byte_utils::be32_to_array(msg.cltv_expiry));
-                               }
-
-                               let payment_data = match next_hop_data.format {
-                                       msgs::OnionHopDataFormat::Legacy { .. } => None,
-                                       msgs::OnionHopDataFormat::NonFinalNode { .. } => return_err!("Got non final data with an HMAC of 0", 0x4000 | 22, &[0;0]),
-                                       msgs::OnionHopDataFormat::FinalNode { payment_data } => payment_data,
-                               };
+                       #[cfg(test)]
+                       {
+                               // In tests, make sure that the initial onion pcket data is, at least, non-0.
+                               // We could do some fancy randomness test here, but, ehh, whatever.
+                               // This checks for the issue where you can calculate the path length given the
+                               // onion data as all the path entries that the originator sent will be here
+                               // as-is (and were originally 0s).
+                               // Of course reverse path calculation is still pretty easy given naive routing
+                               // algorithms, but this fixes the most-obvious case.
+                               let mut next_bytes = [0; 32];
+                               chacha_stream.read_exact(&mut next_bytes).unwrap();
+                               assert_ne!(next_bytes[..], [0; 32][..]);
+                               chacha_stream.read_exact(&mut next_bytes).unwrap();
+                               assert_ne!(next_bytes[..], [0; 32][..]);
+                       }
 
-                               if payment_data.is_none() {
-                                       return_err!("We require payment_secrets", 0x4000|0x2000|3, &[0;0]);
-                               }
+                       // OUR PAYMENT!
+                       // final_expiry_too_soon
+                       // We have to have some headroom to broadcast on chain if we have the preimage, so make sure
+                       // we have at least HTLC_FAIL_BACK_BUFFER blocks to go.
+                       // Also, ensure that, in the case of an unknown preimage for the received payment hash, our
+                       // payment logic has enough time to fail the HTLC backward before our onchain logic triggers a
+                       // channel closure (see HTLC_FAIL_BACK_BUFFER rationale).
+                       if (msg.cltv_expiry as u64) <= self.best_block.read().unwrap().height() as u64 + HTLC_FAIL_BACK_BUFFER as u64 + 1 {
+                               return_err!("The final CLTV expiry is too soon to handle", 17, &[0;0]);
+                       }
+                       // final_incorrect_htlc_amount
+                       if next_hop_data.amt_to_forward > msg.amount_msat {
+                               return_err!("Upstream node sent less than we were supposed to receive in payment", 19, &byte_utils::be64_to_array(msg.amount_msat));
+                       }
+                       // final_incorrect_cltv_expiry
+                       if next_hop_data.outgoing_cltv_value != msg.cltv_expiry {
+                               return_err!("Upstream node set CLTV to the wrong value", 18, &byte_utils::be32_to_array(msg.cltv_expiry));
+                       }
 
-                               // Note that we could obviously respond immediately with an update_fulfill_htlc
-                               // message, however that would leak that we are the recipient of this payment, so
-                               // instead we stay symmetric with the forwarding case, only responding (after a
-                               // delay) once they've send us a commitment_signed!
+                       let routing = match next_hop_data.format {
+                               msgs::OnionHopDataFormat::Legacy { .. } => return_err!("We require payment_secrets", 0x4000|0x2000|3, &[0;0]),
+                               msgs::OnionHopDataFormat::NonFinalNode { .. } => return_err!("Got non final data with an HMAC of 0", 0x4000 | 22, &[0;0]),
+                               msgs::OnionHopDataFormat::FinalNode { payment_data, keysend_preimage } => {
+                                       if payment_data.is_some() && keysend_preimage.is_some() {
+                                               return_err!("We don't support MPP keysend payments", 0x4000|22, &[0;0]);
+                                       } else if let Some(data) = payment_data {
+                                               PendingHTLCRouting::Receive {
+                                                       payment_data: data,
+                                                       incoming_cltv_expiry: msg.cltv_expiry,
+                                               }
+                                       } else if let Some(payment_preimage) = keysend_preimage {
+                                               // We need to check that the sender knows the keysend preimage before processing this
+                                               // payment further. Otherwise, an intermediary routing hop forwarding non-keysend-HTLC X
+                                               // could discover the final destination of X, by probing the adjacent nodes on the route
+                                               // with a keysend payment of identical payment hash to X and observing the processing
+                                               // time discrepancies due to a hash collision with X.
+                                               let hashed_preimage = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
+                                               if hashed_preimage != msg.payment_hash {
+                                                       return_err!("Payment preimage didn't match payment hash", 0x4000|22, &[0;0]);
+                                               }
 
-                               PendingHTLCStatus::Forward(PendingHTLCInfo {
-                                       routing: PendingHTLCRouting::Receive {
-                                               payment_data: payment_data.unwrap(),
-                                               incoming_cltv_expiry: msg.cltv_expiry,
-                                       },
-                                       payment_hash: msg.payment_hash.clone(),
-                                       incoming_shared_secret: shared_secret,
-                                       amt_to_forward: next_hop_data.amt_to_forward,
-                                       outgoing_cltv_value: next_hop_data.outgoing_cltv_value,
-                               })
-                       } else {
-                               let mut new_packet_data = [0; 20*65];
-                               let read_pos = chacha_stream.read(&mut new_packet_data).unwrap();
-                               #[cfg(debug_assertions)]
-                               {
-                                       // Check two things:
-                                       // a) that the behavior of our stream here will return Ok(0) even if the TLV
-                                       //    read above emptied out our buffer and the unwrap() wont needlessly panic
-                                       // b) that we didn't somehow magically end up with extra data.
-                                       let mut t = [0; 1];
-                                       debug_assert!(chacha_stream.read(&mut t).unwrap() == 0);
-                               }
-                               // Once we've emptied the set of bytes our peer gave us, encrypt 0 bytes until we
-                               // fill the onion hop data we'll forward to our next-hop peer.
-                               chacha_stream.chacha.process_in_place(&mut new_packet_data[read_pos..]);
+                                               PendingHTLCRouting::ReceiveKeysend {
+                                                       payment_preimage,
+                                                       incoming_cltv_expiry: msg.cltv_expiry,
+                                               }
+                                       } else {
+                                               return_err!("We require payment_secrets", 0x4000|0x2000|3, &[0;0]);
+                                       }
+                               },
+                       };
 
-                               let mut new_pubkey = msg.onion_routing_packet.public_key.unwrap();
+                       // Note that we could obviously respond immediately with an update_fulfill_htlc
+                       // message, however that would leak that we are the recipient of this payment, so
+                       // instead we stay symmetric with the forwarding case, only responding (after a
+                       // delay) once they've send us a commitment_signed!
+
+                       PendingHTLCStatus::Forward(PendingHTLCInfo {
+                               routing,
+                               payment_hash: msg.payment_hash.clone(),
+                               incoming_shared_secret: shared_secret,
+                               amt_to_forward: next_hop_data.amt_to_forward,
+                               outgoing_cltv_value: next_hop_data.outgoing_cltv_value,
+                       })
+               } else {
+                       let mut new_packet_data = [0; 20*65];
+                       let read_pos = chacha_stream.read(&mut new_packet_data).unwrap();
+                       #[cfg(debug_assertions)]
+                       {
+                               // Check two things:
+                               // a) that the behavior of our stream here will return Ok(0) even if the TLV
+                               //    read above emptied out our buffer and the unwrap() wont needlessly panic
+                               // b) that we didn't somehow magically end up with extra data.
+                               let mut t = [0; 1];
+                               debug_assert!(chacha_stream.read(&mut t).unwrap() == 0);
+                       }
+                       // Once we've emptied the set of bytes our peer gave us, encrypt 0 bytes until we
+                       // fill the onion hop data we'll forward to our next-hop peer.
+                       chacha_stream.chacha.process_in_place(&mut new_packet_data[read_pos..]);
 
-                               let blinding_factor = {
-                                       let mut sha = Sha256::engine();
-                                       sha.input(&new_pubkey.serialize()[..]);
-                                       sha.input(&shared_secret);
-                                       Sha256::from_engine(sha).into_inner()
-                               };
+                       let mut new_pubkey = msg.onion_routing_packet.public_key.unwrap();
 
-                               let public_key = if let Err(e) = new_pubkey.mul_assign(&self.secp_ctx, &blinding_factor[..]) {
-                                       Err(e)
-                               } else { Ok(new_pubkey) };
+                       let blinding_factor = {
+                               let mut sha = Sha256::engine();
+                               sha.input(&new_pubkey.serialize()[..]);
+                               sha.input(&shared_secret);
+                               Sha256::from_engine(sha).into_inner()
+                       };
 
-                               let outgoing_packet = msgs::OnionPacket {
-                                       version: 0,
-                                       public_key,
-                                       hop_data: new_packet_data,
-                                       hmac: next_hop_hmac.clone(),
-                               };
+                       let public_key = if let Err(e) = new_pubkey.mul_assign(&self.secp_ctx, &blinding_factor[..]) {
+                               Err(e)
+                       } else { Ok(new_pubkey) };
 
-                               let short_channel_id = match next_hop_data.format {
-                                       msgs::OnionHopDataFormat::Legacy { short_channel_id } => short_channel_id,
-                                       msgs::OnionHopDataFormat::NonFinalNode { short_channel_id } => short_channel_id,
-                                       msgs::OnionHopDataFormat::FinalNode { .. } => {
-                                               return_err!("Final Node OnionHopData provided for us as an intermediary node", 0x4000 | 22, &[0;0]);
-                                       },
-                               };
+                       let outgoing_packet = msgs::OnionPacket {
+                               version: 0,
+                               public_key,
+                               hop_data: new_packet_data,
+                               hmac: next_hop_hmac.clone(),
+                       };
 
-                               PendingHTLCStatus::Forward(PendingHTLCInfo {
-                                       routing: PendingHTLCRouting::Forward {
-                                               onion_packet: outgoing_packet,
-                                               short_channel_id,
-                                       },
-                                       payment_hash: msg.payment_hash.clone(),
-                                       incoming_shared_secret: shared_secret,
-                                       amt_to_forward: next_hop_data.amt_to_forward,
-                                       outgoing_cltv_value: next_hop_data.outgoing_cltv_value,
-                               })
+                       let short_channel_id = match next_hop_data.format {
+                               msgs::OnionHopDataFormat::Legacy { short_channel_id } => short_channel_id,
+                               msgs::OnionHopDataFormat::NonFinalNode { short_channel_id } => short_channel_id,
+                               msgs::OnionHopDataFormat::FinalNode { .. } => {
+                                       return_err!("Final Node OnionHopData provided for us as an intermediary node", 0x4000 | 22, &[0;0]);
+                               },
                        };
 
+                       PendingHTLCStatus::Forward(PendingHTLCInfo {
+                               routing: PendingHTLCRouting::Forward {
+                                       onion_packet: outgoing_packet,
+                                       short_channel_id,
+                               },
+                               payment_hash: msg.payment_hash.clone(),
+                               incoming_shared_secret: shared_secret,
+                               amt_to_forward: next_hop_data.amt_to_forward,
+                               outgoing_cltv_value: next_hop_data.outgoing_cltv_value,
+                       })
+               };
+
                channel_state = Some(self.channel_state.lock().unwrap());
                if let &PendingHTLCStatus::Forward(PendingHTLCInfo { ref routing, ref amt_to_forward, ref outgoing_cltv_value, .. }) = &pending_forward_info {
                        // If short_channel_id is 0 here, we'll reject the HTLC as there cannot be a channel
@@ -1546,46 +1806,56 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        // short_channel_id is non-0 in any ::Forward.
                        if let &PendingHTLCRouting::Forward { ref short_channel_id, .. } = routing {
                                let id_option = channel_state.as_ref().unwrap().short_to_id.get(&short_channel_id).cloned();
-                               let forwarding_id = match id_option {
-                                       None => { // unknown_next_peer
-                                               return_err!("Don't have available channel for forwarding as requested.", 0x4000 | 10, &[0;0]);
-                                       },
-                                       Some(id) => id.clone(),
-                               };
                                if let Some((err, code, chan_update)) = loop {
+                                       let forwarding_id = match id_option {
+                                               None => { // unknown_next_peer
+                                                       break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
+                                               },
+                                               Some(id) => id.clone(),
+                                       };
+
                                        let chan = channel_state.as_mut().unwrap().by_id.get_mut(&forwarding_id).unwrap();
 
+                                       if !chan.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels {
+                                               // Note that the behavior here should be identical to the above block - we
+                                               // should NOT reveal the existence or non-existence of a private channel if
+                                               // we don't allow forwards outbound over them.
+                                               break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
+                                       }
+
                                        // Note that we could technically not return an error yet here and just hope
                                        // that the connection is reestablished or monitor updated by the time we get
                                        // around to doing the actual forward, but better to fail early if we can and
                                        // hopefully an attacker trying to path-trace payments cannot make this occur
                                        // on a small/per-node/per-channel scale.
                                        if !chan.is_live() { // channel_disabled
-                                               break Some(("Forwarding channel is not in a ready state.", 0x1000 | 20, Some(self.get_channel_update(chan).unwrap())));
+                                               break Some(("Forwarding channel is not in a ready state.", 0x1000 | 20, Some(self.get_channel_update_for_unicast(chan).unwrap())));
                                        }
                                        if *amt_to_forward < chan.get_counterparty_htlc_minimum_msat() { // amount_below_minimum
-                                               break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, Some(self.get_channel_update(chan).unwrap())));
+                                               break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, Some(self.get_channel_update_for_unicast(chan).unwrap())));
                                        }
-                                       let fee = amt_to_forward.checked_mul(chan.get_fee_proportional_millionths() as u64).and_then(|prop_fee| { (prop_fee / 1000000).checked_add(chan.get_holder_fee_base_msat(&self.fee_estimator) as u64) });
+                                       let fee = amt_to_forward.checked_mul(chan.get_fee_proportional_millionths() as u64)
+                                               .and_then(|prop_fee| { (prop_fee / 1000000)
+                                               .checked_add(chan.get_outbound_forwarding_fee_base_msat() as u64) });
                                        if fee.is_none() || msg.amount_msat < fee.unwrap() || (msg.amount_msat - fee.unwrap()) < *amt_to_forward { // fee_insufficient
-                                               break Some(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, Some(self.get_channel_update(chan).unwrap())));
+                                               break Some(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, Some(self.get_channel_update_for_unicast(chan).unwrap())));
                                        }
                                        if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + chan.get_cltv_expiry_delta() as u64 { // incorrect_cltv_expiry
-                                               break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update(chan).unwrap())));
+                                               break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update_for_unicast(chan).unwrap())));
                                        }
                                        let cur_height = self.best_block.read().unwrap().height() + 1;
                                        // Theoretically, channel counterparty shouldn't send us a HTLC expiring now, but we want to be robust wrt to counterparty
                                        // packet sanitization (see HTLC_FAIL_BACK_BUFFER rational)
                                        if msg.cltv_expiry <= cur_height + HTLC_FAIL_BACK_BUFFER as u32 { // expiry_too_soon
-                                               break Some(("CLTV expiry is too close", 0x1000 | 14, Some(self.get_channel_update(chan).unwrap())));
+                                               break Some(("CLTV expiry is too close", 0x1000 | 14, Some(self.get_channel_update_for_unicast(chan).unwrap())));
                                        }
                                        if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far
                                                break Some(("CLTV expiry is too far in the future", 21, None));
                                        }
-                                       // In theory, we would be safe against unitentional channel-closure, if we only required a margin of LATENCY_GRACE_PERIOD_BLOCKS.
-                                       // But, to be safe against policy reception, we use a longuer delay.
+                                       // In theory, we would be safe against unintentional channel-closure, if we only required a margin of LATENCY_GRACE_PERIOD_BLOCKS.
+                                       // But, to be safe against policy reception, we use a longer delay.
                                        if (*outgoing_cltv_value) as u64 <= (cur_height + HTLC_FAIL_BACK_BUFFER) as u64 {
-                                               break Some(("Outgoing CLTV value is too soon", 0x1000 | 14, Some(self.get_channel_update(chan).unwrap())));
+                                               break Some(("Outgoing CLTV value is too soon", 0x1000 | 14, Some(self.get_channel_update_for_unicast(chan).unwrap())));
                                        }
 
                                        break None;
@@ -1613,9 +1883,29 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                (pending_forward_info, channel_state.unwrap())
        }
 
-       /// only fails if the channel does not yet have an assigned short_id
+       /// Gets the current channel_update for the given channel. This first checks if the channel is
+       /// public, and thus should be called whenever the result is going to be passed out in a
+       /// [`MessageSendEvent::BroadcastChannelUpdate`] event.
+       ///
        /// May be called with channel_state already locked!
-       fn get_channel_update(&self, chan: &Channel<Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
+       fn get_channel_update_for_broadcast(&self, chan: &Channel<Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
+               if !chan.should_announce() {
+                       return Err(LightningError {
+                               err: "Cannot broadcast a channel_update for a private channel".to_owned(),
+                               action: msgs::ErrorAction::IgnoreError
+                       });
+               }
+               log_trace!(self.logger, "Attempting to generate broadcast channel update for channel {}", log_bytes!(chan.channel_id()));
+               self.get_channel_update_for_unicast(chan)
+       }
+
+       /// Gets the current channel_update for the given channel. This does not check if the channel
+       /// is public (only returning an Err if the channel does not yet have an assigned short_id),
+       /// and thus MUST NOT be called unless the recipient of the resulting message has already
+       /// provided evidence that they know about the existence of the channel.
+       /// May be called with channel_state already locked!
+       fn get_channel_update_for_unicast(&self, chan: &Channel<Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
+               log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.channel_id()));
                let short_channel_id = match chan.get_short_channel_id() {
                        None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}),
                        Some(id) => id,
@@ -1631,7 +1921,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        cltv_expiry_delta: chan.get_cltv_expiry_delta(),
                        htlc_minimum_msat: chan.get_counterparty_htlc_minimum_msat(),
                        htlc_maximum_msat: OptionalField::Present(chan.get_announced_htlc_max_msat()),
-                       fee_base_msat: chan.get_holder_fee_base_msat(&self.fee_estimator),
+                       fee_base_msat: chan.get_outbound_forwarding_fee_base_msat(),
                        fee_proportional_millionths: chan.get_fee_proportional_millionths(),
                        excess_data: Vec::new(),
                };
@@ -1646,7 +1936,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        }
 
        // Only public for testing, this should otherwise never be called direcly
-       pub(crate) fn send_payment_along_path(&self, path: &Vec<RouteHop>, payment_hash: &PaymentHash, payment_secret: &Option<PaymentSecret>, total_value: u64, cur_height: u32) -> Result<(), APIError> {
+       pub(crate) fn send_payment_along_path(&self, path: &Vec<RouteHop>, payment_hash: &PaymentHash, payment_secret: &Option<PaymentSecret>, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option<PaymentPreimage>) -> Result<(), APIError> {
                log_trace!(self.logger, "Attempting to send payment for path with next hop {}", path.first().unwrap().short_channel_id);
                let prng_seed = self.keys_manager.get_secure_random_bytes();
                let session_priv_bytes = self.keys_manager.get_secure_random_bytes();
@@ -1654,14 +1944,13 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
 
                let onion_keys = onion_utils::construct_onion_keys(&self.secp_ctx, &path, &session_priv)
                        .map_err(|_| APIError::RouteError{err: "Pubkey along hop was maliciously selected"})?;
-               let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(path, total_value, payment_secret, cur_height)?;
+               let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(path, total_value, payment_secret, cur_height, keysend_preimage)?;
                if onion_utils::route_size_insane(&onion_payloads) {
                        return Err(APIError::RouteError{err: "Route size too large considering onion data"});
                }
                let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, payment_hash);
 
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
-               assert!(self.pending_outbound_payments.lock().unwrap().insert(session_priv_bytes));
 
                let err: Result<(), _> = loop {
                        let mut channel_lock = self.channel_state.lock().unwrap();
@@ -1679,11 +1968,27 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        if !chan.get().is_live() {
                                                return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!".to_owned()});
                                        }
-                                       break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute {
-                                               path: path.clone(),
-                                               session_priv: session_priv.clone(),
-                                               first_hop_htlc_msat: htlc_msat,
-                                       }, onion_packet, &self.logger), channel_state, chan)
+                                       let send_res = break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(
+                                               htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute {
+                                                       path: path.clone(),
+                                                       session_priv: session_priv.clone(),
+                                                       first_hop_htlc_msat: htlc_msat,
+                                                       payment_id,
+                                               }, onion_packet, &self.logger),
+                                       channel_state, chan);
+
+                                       let mut pending_outbounds = self.pending_outbound_payments.lock().unwrap();
+                                       let payment = pending_outbounds.entry(payment_id).or_insert_with(|| PendingOutboundPayment::Retryable {
+                                               session_privs: HashSet::new(),
+                                               pending_amt_msat: 0,
+                                               payment_hash: *payment_hash,
+                                               payment_secret: *payment_secret,
+                                               starting_block_height: self.best_block.read().unwrap().height(),
+                                               total_msat: total_value,
+                                       });
+                                       assert!(payment.insert(session_priv_bytes, path.last().unwrap().fee_msat));
+
+                                       send_res
                                } {
                                        Some((update_add, commitment_signed, monitor_update)) => {
                                                if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
@@ -1762,7 +2067,11 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        /// If a payment_secret *is* provided, we assume that the invoice had the payment_secret feature
        /// bit set (either as required or as available). If multiple paths are present in the Route,
        /// we assume the invoice had the basic_mpp feature set.
-       pub fn send_payment(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>) -> Result<(), PaymentSendFailure> {
+       pub fn send_payment(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>) -> Result<PaymentId, PaymentSendFailure> {
+               self.send_payment_internal(route, payment_hash, payment_secret, None, None, None)
+       }
+
+       fn send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>, keysend_preimage: Option<PaymentPreimage>, payment_id: Option<PaymentId>, recv_value_msat: Option<u64>) -> Result<PaymentId, PaymentSendFailure> {
                if route.paths.len() < 1 {
                        return Err(PaymentSendFailure::ParameterError(APIError::RouteError{err: "There must be at least one path to send over"}));
                }
@@ -1772,9 +2081,13 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        // for now more than 10 paths likely carries too much one-path failure.
                        return Err(PaymentSendFailure::ParameterError(APIError::RouteError{err: "Sending over more than 10 paths is not currently supported"}));
                }
+               if payment_secret.is_none() && route.paths.len() > 1 {
+                       return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError{err: "Payment secret is required for multi-path payments".to_string()}));
+               }
                let mut total_value = 0;
                let our_node_id = self.get_our_node_id();
                let mut path_errs = Vec::with_capacity(route.paths.len());
+               let payment_id = if let Some(id) = payment_id { id } else { PaymentId(self.keys_manager.get_secure_random_bytes()) };
                'path_check: for path in route.paths.iter() {
                        if path.len() < 1 || path.len() > 20 {
                                path_errs.push(Err(APIError::RouteError{err: "Path didn't go anywhere/had bogus size"}));
@@ -1792,11 +2105,15 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                if path_errs.iter().any(|e| e.is_err()) {
                        return Err(PaymentSendFailure::PathParameterError(path_errs));
                }
+               if let Some(amt_msat) = recv_value_msat {
+                       debug_assert!(amt_msat >= total_value);
+                       total_value = amt_msat;
+               }
 
                let cur_height = self.best_block.read().unwrap().height() + 1;
                let mut results = Vec::new();
                for path in route.paths.iter() {
-                       results.push(self.send_payment_along_path(&path, &payment_hash, payment_secret, total_value, cur_height));
+                       results.push(self.send_payment_along_path(&path, &payment_hash, payment_secret, total_value, cur_height, payment_id, &keysend_preimage));
                }
                let mut has_ok = false;
                let mut has_err = false;
@@ -1816,7 +2133,81 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                } else if has_err {
                        Err(PaymentSendFailure::AllFailedRetrySafe(results.drain(..).map(|r| r.unwrap_err()).collect()))
                } else {
-                       Ok(())
+                       Ok(payment_id)
+               }
+       }
+
+       /// Retries a payment along the given [`Route`].
+       ///
+       /// Errors returned are a superset of those returned from [`send_payment`], so see
+       /// [`send_payment`] documentation for more details on errors. This method will also error if the
+       /// retry amount puts the payment more than 10% over the payment's total amount, or if the payment
+       /// for the given `payment_id` cannot be found (likely due to timeout or success).
+       ///
+       /// [`send_payment`]: [`ChannelManager::send_payment`]
+       pub fn retry_payment(&self, route: &Route, payment_id: PaymentId) -> Result<(), PaymentSendFailure> {
+               const RETRY_OVERFLOW_PERCENTAGE: u64 = 10;
+               for path in route.paths.iter() {
+                       if path.len() == 0 {
+                               return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
+                                       err: "length-0 path in route".to_string()
+                               }))
+                       }
+               }
+
+               let (total_msat, payment_hash, payment_secret) = {
+                       let outbounds = self.pending_outbound_payments.lock().unwrap();
+                       if let Some(payment) = outbounds.get(&payment_id) {
+                               match payment {
+                                       PendingOutboundPayment::Retryable {
+                                               total_msat, payment_hash, payment_secret, pending_amt_msat, ..
+                                       } => {
+                                               let retry_amt_msat: u64 = route.paths.iter().map(|path| path.last().unwrap().fee_msat).sum();
+                                               if retry_amt_msat + *pending_amt_msat > *total_msat * (100 + RETRY_OVERFLOW_PERCENTAGE) / 100 {
+                                                       return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
+                                                               err: format!("retry_amt_msat of {} will put pending_amt_msat (currently: {}) more than 10% over total_payment_amt_msat of {}", retry_amt_msat, pending_amt_msat, total_msat).to_string()
+                                                       }))
+                                               }
+                                               (*total_msat, *payment_hash, *payment_secret)
+                                       },
+                                       PendingOutboundPayment::Legacy { .. } => {
+                                               return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
+                                                       err: "Unable to retry payments that were initially sent on LDK versions prior to 0.0.102".to_string()
+                                               }))
+                                       }
+                               }
+                       } else {
+                               return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
+                                       err: format!("Payment with ID {} not found", log_bytes!(payment_id.0)),
+                               }))
+                       }
+               };
+               return self.send_payment_internal(route, payment_hash, &payment_secret, None, Some(payment_id), Some(total_msat)).map(|_| ())
+       }
+
+       /// Send a spontaneous payment, which is a payment that does not require the recipient to have
+       /// generated an invoice. Optionally, you may specify the preimage. If you do choose to specify
+       /// the preimage, it must be a cryptographically secure random value that no intermediate node
+       /// would be able to guess -- otherwise, an intermediate node may claim the payment and it will
+       /// never reach the recipient.
+       ///
+       /// See [`send_payment`] documentation for more details on the return value of this function.
+       ///
+       /// Similar to regular payments, you MUST NOT reuse a `payment_preimage` value. See
+       /// [`send_payment`] for more information about the risks of duplicate preimage usage.
+       ///
+       /// Note that `route` must have exactly one path.
+       ///
+       /// [`send_payment`]: Self::send_payment
+       pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option<PaymentPreimage>) -> Result<(PaymentHash, PaymentId), PaymentSendFailure> {
+               let preimage = match payment_preimage {
+                       Some(p) => p,
+                       None => PaymentPreimage(self.keys_manager.get_secure_random_bytes()),
+               };
+               let payment_hash = PaymentHash(Sha256::hash(&preimage.0).into_inner());
+               match self.send_payment_internal(route, payment_hash, &None, Some(preimage), None, None) {
+                       Ok(payment_id) => Ok((payment_hash, payment_id)),
+                       Err(e) => Err(e)
                }
        }
 
@@ -2008,7 +2399,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        if let Some(msg) = chan.get_signed_channel_announcement(&self.our_network_key, self.get_our_node_id(), self.genesis_hash.clone()) {
                                channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
                                        msg,
-                                       update_msg: match self.get_channel_update(chan) {
+                                       update_msg: match self.get_channel_update_for_broadcast(chan) {
                                                Ok(msg) => msg,
                                                Err(_) => continue,
                                        },
@@ -2100,7 +2491,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                        } else {
                                                                                                panic!("Stated return value requirements in send_htlc() were not met");
                                                                                        }
-                                                                                       let chan_update = self.get_channel_update(chan.get()).unwrap();
+                                                                                       let chan_update = self.get_channel_update_for_unicast(chan.get()).unwrap();
                                                                                        failed_forwards.push((htlc_source, payment_hash,
                                                                                                HTLCFailReason::Reason { failure_code: 0x1000 | 7, data: chan_update.encode_with_len() }
                                                                                        ));
@@ -2163,16 +2554,17 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                        // close channel and then send error message to peer.
                                                                        let counterparty_node_id = chan.get().get_counterparty_node_id();
                                                                        let err: Result<(), _>  = match e {
-                                                                               ChannelError::Ignore(_) => {
+                                                                               ChannelError::Ignore(_) | ChannelError::Warn(_) => {
                                                                                        panic!("Stated return value requirements in send_commitment() were not met");
-                                                                               },
+                                                                               }
                                                                                ChannelError::Close(msg) => {
                                                                                        log_trace!(self.logger, "Closing channel {} due to Close-required error: {}", log_bytes!(chan.key()[..]), msg);
                                                                                        let (channel_id, mut channel) = chan.remove_entry();
                                                                                        if let Some(short_id) = channel.get_short_channel_id() {
                                                                                                channel_state.short_to_id.remove(&short_id);
                                                                                        }
-                                                                                       Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.force_shutdown(true), self.get_channel_update(&channel).ok()))
+                                                                                       // ChannelClosed event is generated by handle_error for us.
+                                                                                       Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok()))
                                                                                },
                                                                                ChannelError::CloseDelayBroadcast(_) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); }
                                                                        };
@@ -2205,9 +2597,17 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        for forward_info in pending_forwards.drain(..) {
                                                match forward_info {
                                                        HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo {
-                                                                       routing: PendingHTLCRouting::Receive { payment_data, incoming_cltv_expiry },
-                                                                       incoming_shared_secret, payment_hash, amt_to_forward, .. },
+                                                                       routing, incoming_shared_secret, payment_hash, amt_to_forward, .. },
                                                                        prev_funding_outpoint } => {
+                                                               let (cltv_expiry, onion_payload) = match routing {
+                                                                       PendingHTLCRouting::Receive { payment_data, incoming_cltv_expiry } =>
+                                                                               (incoming_cltv_expiry, OnionPayload::Invoice(payment_data)),
+                                                                       PendingHTLCRouting::ReceiveKeysend { payment_preimage, incoming_cltv_expiry } =>
+                                                                               (incoming_cltv_expiry, OnionPayload::Spontaneous(payment_preimage)),
+                                                                       _ => {
+                                                                               panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive");
+                                                                       }
+                                                               };
                                                                let claimable_htlc = ClaimableHTLC {
                                                                        prev_hop: HTLCPreviousHopData {
                                                                                short_channel_id: prev_short_channel_id,
@@ -2216,8 +2616,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                incoming_packet_shared_secret: incoming_shared_secret,
                                                                        },
                                                                        value: amt_to_forward,
-                                                                       payment_data: payment_data.clone(),
-                                                                       cltv_expiry: incoming_cltv_expiry,
+                                                                       cltv_expiry,
+                                                                       onion_payload,
                                                                };
 
                                                                macro_rules! fail_htlc {
@@ -2246,10 +2646,38 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                let mut payment_secrets = self.pending_inbound_payments.lock().unwrap();
                                                                match payment_secrets.entry(payment_hash) {
                                                                        hash_map::Entry::Vacant(_) => {
-                                                                               log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we didn't have a corresponding inbound payment.", log_bytes!(payment_hash.0));
-                                                                               fail_htlc!(claimable_htlc);
+                                                                               match claimable_htlc.onion_payload {
+                                                                                       OnionPayload::Invoice(_) => {
+                                                                                               log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we didn't have a corresponding inbound payment.", log_bytes!(payment_hash.0));
+                                                                                               fail_htlc!(claimable_htlc);
+                                                                                       },
+                                                                                       OnionPayload::Spontaneous(preimage) => {
+                                                                                               match channel_state.claimable_htlcs.entry(payment_hash) {
+                                                                                                       hash_map::Entry::Vacant(e) => {
+                                                                                                               e.insert(vec![claimable_htlc]);
+                                                                                                               new_events.push(events::Event::PaymentReceived {
+                                                                                                                       payment_hash,
+                                                                                                                       amt: amt_to_forward,
+                                                                                                                       purpose: events::PaymentPurpose::SpontaneousPayment(preimage),
+                                                                                                               });
+                                                                                                       },
+                                                                                                       hash_map::Entry::Occupied(_) => {
+                                                                                                               log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} for a duplicative payment hash", log_bytes!(payment_hash.0));
+                                                                                                               fail_htlc!(claimable_htlc);
+                                                                                                       }
+                                                                                               }
+                                                                                       }
+                                                                               }
                                                                        },
                                                                        hash_map::Entry::Occupied(inbound_payment) => {
+                                                                               let payment_data =
+                                                                                       if let OnionPayload::Invoice(ref data) = claimable_htlc.onion_payload {
+                                                                                               data.clone()
+                                                                                       } else {
+                                                                                               log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} because we already have an inbound payment with the same payment hash", log_bytes!(payment_hash.0));
+                                                                                               fail_htlc!(claimable_htlc);
+                                                                                               continue
+                                                                                       };
                                                                                if inbound_payment.get().payment_secret != payment_data.payment_secret {
                                                                                        log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our expected payment secret.", log_bytes!(payment_hash.0));
                                                                                        fail_htlc!(claimable_htlc);
@@ -2261,15 +2689,27 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                        let mut total_value = 0;
                                                                                        let htlcs = channel_state.claimable_htlcs.entry(payment_hash)
                                                                                                .or_insert(Vec::new());
+                                                                                       if htlcs.len() == 1 {
+                                                                                               if let OnionPayload::Spontaneous(_) = htlcs[0].onion_payload {
+                                                                                                       log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we already had an existing keysend HTLC with the same payment hash", log_bytes!(payment_hash.0));
+                                                                                                       fail_htlc!(claimable_htlc);
+                                                                                                       continue
+                                                                                               }
+                                                                                       }
                                                                                        htlcs.push(claimable_htlc);
                                                                                        for htlc in htlcs.iter() {
                                                                                                total_value += htlc.value;
-                                                                                               if htlc.payment_data.total_msat != payment_data.total_msat {
-                                                                                                       log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the HTLCs had inconsistent total values (eg {} and {})",
-                                                                                                               log_bytes!(payment_hash.0), payment_data.total_msat, htlc.payment_data.total_msat);
-                                                                                                       total_value = msgs::MAX_VALUE_MSAT;
+                                                                                               match &htlc.onion_payload {
+                                                                                                       OnionPayload::Invoice(htlc_payment_data) => {
+                                                                                                               if htlc_payment_data.total_msat != payment_data.total_msat {
+                                                                                                                       log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the HTLCs had inconsistent total values (eg {} and {})",
+                                                                                                                                                                log_bytes!(payment_hash.0), payment_data.total_msat, htlc_payment_data.total_msat);
+                                                                                                                       total_value = msgs::MAX_VALUE_MSAT;
+                                                                                                               }
+                                                                                                               if total_value >= msgs::MAX_VALUE_MSAT { break; }
+                                                                                                       },
+                                                                                                       _ => unreachable!(),
                                                                                                }
-                                                                                               if total_value >= msgs::MAX_VALUE_MSAT { break; }
                                                                                        }
                                                                                        if total_value >= msgs::MAX_VALUE_MSAT || total_value > payment_data.total_msat {
                                                                                                log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the total value {} ran over expected value {} (or HTLCs were inconsistent)",
@@ -2280,10 +2720,12 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                        } else if total_value == payment_data.total_msat {
                                                                                                new_events.push(events::Event::PaymentReceived {
                                                                                                        payment_hash,
-                                                                                                       payment_preimage: inbound_payment.get().payment_preimage,
-                                                                                                       payment_secret: payment_data.payment_secret,
+                                                                                                       purpose: events::PaymentPurpose::InvoicePayment {
+                                                                                                               payment_preimage: inbound_payment.get().payment_preimage,
+                                                                                                               payment_secret: payment_data.payment_secret,
+                                                                                                               user_payment_id: inbound_payment.get().user_payment_id,
+                                                                                                       },
                                                                                                        amt: total_value,
-                                                                                                       user_payment_id: inbound_payment.get().user_payment_id,
                                                                                                });
                                                                                                // Only ever generate at most one PaymentReceived
                                                                                                // per registered payment_hash, even if it isn't
@@ -2298,9 +2740,6 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                        },
                                                                };
                                                        },
-                                                       HTLCForwardInfo::AddHTLC { .. } => {
-                                                               panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive");
-                                                       },
                                                        HTLCForwardInfo::FailHTLC { .. } => {
                                                                panic!("Got pending fail of our own HTLC");
                                                        }
@@ -2354,48 +2793,160 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                self.process_background_events();
        }
 
-       /// If a peer is disconnected we mark any channels with that peer as 'disabled'.
-       /// After some time, if channels are still disabled we need to broadcast a ChannelUpdate
-       /// to inform the network about the uselessness of these channels.
+       fn update_channel_fee(&self, short_to_id: &mut HashMap<u64, [u8; 32]>, pending_msg_events: &mut Vec<events::MessageSendEvent>, chan_id: &[u8; 32], chan: &mut Channel<Signer>, new_feerate: u32) -> (bool, NotifyOption, Result<(), MsgHandleErrInternal>) {
+               if !chan.is_outbound() { return (true, NotifyOption::SkipPersist, Ok(())); }
+               // If the feerate has decreased by less than half, don't bother
+               if new_feerate <= chan.get_feerate() && new_feerate * 2 > chan.get_feerate() {
+                       log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {}.",
+                               log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate);
+                       return (true, NotifyOption::SkipPersist, Ok(()));
+               }
+               if !chan.is_live() {
+                       log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).",
+                               log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate);
+                       return (true, NotifyOption::SkipPersist, Ok(()));
+               }
+               log_trace!(self.logger, "Channel {} qualifies for a feerate change from {} to {}.",
+                       log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate);
+
+               let mut retain_channel = true;
+               let res = match chan.send_update_fee_and_commit(new_feerate, &self.logger) {
+                       Ok(res) => Ok(res),
+                       Err(e) => {
+                               let (drop, res) = convert_chan_err!(self, e, short_to_id, chan, chan_id);
+                               if drop { retain_channel = false; }
+                               Err(res)
+                       }
+               };
+               let ret_err = match res {
+                       Ok(Some((update_fee, commitment_signed, monitor_update))) => {
+                               if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) {
+                                       let (res, drop) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, false, true, Vec::new(), Vec::new(), chan_id);
+                                       if drop { retain_channel = false; }
+                                       res
+                               } else {
+                                       pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+                                               node_id: chan.get_counterparty_node_id(),
+                                               updates: msgs::CommitmentUpdate {
+                                                       update_add_htlcs: Vec::new(),
+                                                       update_fulfill_htlcs: Vec::new(),
+                                                       update_fail_htlcs: Vec::new(),
+                                                       update_fail_malformed_htlcs: Vec::new(),
+                                                       update_fee: Some(update_fee),
+                                                       commitment_signed,
+                                               },
+                                       });
+                                       Ok(())
+                               }
+                       },
+                       Ok(None) => Ok(()),
+                       Err(e) => Err(e),
+               };
+               (retain_channel, NotifyOption::DoPersist, ret_err)
+       }
+
+       #[cfg(fuzzing)]
+       /// In chanmon_consistency we want to sometimes do the channel fee updates done in
+       /// timer_tick_occurred, but we can't generate the disabled channel updates as it considers
+       /// these a fuzz failure (as they usually indicate a channel force-close, which is exactly what
+       /// it wants to detect). Thus, we have a variant exposed here for its benefit.
+       pub fn maybe_update_chan_fees(&self) {
+               PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
+                       let mut should_persist = NotifyOption::SkipPersist;
+
+                       let new_feerate = self.fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);
+
+                       let mut handle_errors = Vec::new();
+                       {
+                               let mut channel_state_lock = self.channel_state.lock().unwrap();
+                               let channel_state = &mut *channel_state_lock;
+                               let pending_msg_events = &mut channel_state.pending_msg_events;
+                               let short_to_id = &mut channel_state.short_to_id;
+                               channel_state.by_id.retain(|chan_id, chan| {
+                                       let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_id, pending_msg_events, chan_id, chan, new_feerate);
+                                       if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
+                                       if err.is_err() {
+                                               handle_errors.push(err);
+                                       }
+                                       retain_channel
+                               });
+                       }
+
+                       should_persist
+               });
+       }
+
+       /// Performs actions which should happen on startup and roughly once per minute thereafter.
        ///
-       /// This method handles all the details, and must be called roughly once per minute.
+       /// This currently includes:
+       ///  * Increasing or decreasing the on-chain feerate estimates for our outbound channels,
+       ///  * Broadcasting `ChannelUpdate` messages if we've been disconnected from our peer for more
+       ///    than a minute, informing the network that they should no longer attempt to route over
+       ///    the channel.
        ///
-       /// Note that in some rare cases this may generate a `chain::Watch::update_channel` call.
+       /// Note that this may cause reentrancy through `chain::Watch::update_channel` calls or feerate
+       /// estimate fetches.
        pub fn timer_tick_occurred(&self) {
                PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
                        let mut should_persist = NotifyOption::SkipPersist;
                        if self.process_background_events() { should_persist = NotifyOption::DoPersist; }
 
-                       let mut channel_state_lock = self.channel_state.lock().unwrap();
-                       let channel_state = &mut *channel_state_lock;
-                       for (_, chan) in channel_state.by_id.iter_mut() {
-                               match chan.channel_update_status() {
-                                       ChannelUpdateStatus::Enabled if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged),
-                                       ChannelUpdateStatus::Disabled if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged),
-                                       ChannelUpdateStatus::DisabledStaged if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Enabled),
-                                       ChannelUpdateStatus::EnabledStaged if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Disabled),
-                                       ChannelUpdateStatus::DisabledStaged if !chan.is_live() => {
-                                               if let Ok(update) = self.get_channel_update(&chan) {
-                                                       channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
-                                                               msg: update
-                                                       });
-                                               }
-                                               should_persist = NotifyOption::DoPersist;
-                                               chan.set_channel_update_status(ChannelUpdateStatus::Disabled);
-                                       },
-                                       ChannelUpdateStatus::EnabledStaged if chan.is_live() => {
-                                               if let Ok(update) = self.get_channel_update(&chan) {
-                                                       channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
-                                                               msg: update
-                                                       });
-                                               }
-                                               should_persist = NotifyOption::DoPersist;
-                                               chan.set_channel_update_status(ChannelUpdateStatus::Enabled);
-                                       },
-                                       _ => {},
-                               }
+                       let new_feerate = self.fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);
+
+                       let mut handle_errors = Vec::new();
+                       {
+                               let mut channel_state_lock = self.channel_state.lock().unwrap();
+                               let channel_state = &mut *channel_state_lock;
+                               let pending_msg_events = &mut channel_state.pending_msg_events;
+                               let short_to_id = &mut channel_state.short_to_id;
+                               channel_state.by_id.retain(|chan_id, chan| {
+                                       let counterparty_node_id = chan.get_counterparty_node_id();
+                                       let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_id, pending_msg_events, chan_id, chan, new_feerate);
+                                       if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
+                                       if err.is_err() {
+                                               handle_errors.push((err, counterparty_node_id));
+                                       }
+                                       if !retain_channel { return false; }
+
+                                       if let Err(e) = chan.timer_check_closing_negotiation_progress() {
+                                               let (needs_close, err) = convert_chan_err!(self, e, short_to_id, chan, chan_id);
+                                               handle_errors.push((Err(err), chan.get_counterparty_node_id()));
+                                               if needs_close { return false; }
+                                       }
+
+                                       match chan.channel_update_status() {
+                                               ChannelUpdateStatus::Enabled if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged),
+                                               ChannelUpdateStatus::Disabled if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged),
+                                               ChannelUpdateStatus::DisabledStaged if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Enabled),
+                                               ChannelUpdateStatus::EnabledStaged if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Disabled),
+                                               ChannelUpdateStatus::DisabledStaged if !chan.is_live() => {
+                                                       if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
+                                                               pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                                                       msg: update
+                                                               });
+                                                       }
+                                                       should_persist = NotifyOption::DoPersist;
+                                                       chan.set_channel_update_status(ChannelUpdateStatus::Disabled);
+                                               },
+                                               ChannelUpdateStatus::EnabledStaged if chan.is_live() => {
+                                                       if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
+                                                               pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                                                       msg: update
+                                                               });
+                                                       }
+                                                       should_persist = NotifyOption::DoPersist;
+                                                       chan.set_channel_update_status(ChannelUpdateStatus::Enabled);
+                                               },
+                                               _ => {},
+                                       }
+
+                                       true
+                               });
                        }
 
+                       for (err, counterparty_node_id) in handle_errors.drain(..) {
+                               let _ = handle_error!(self, err, counterparty_node_id);
+                       }
                        should_persist
                });
        }
@@ -2434,7 +2985,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        let (failure_code, onion_failure_data) =
                                                match self.channel_state.lock().unwrap().by_id.entry(channel_id) {
                                                        hash_map::Entry::Occupied(chan_entry) => {
-                                                               if let Ok(upd) = self.get_channel_update(&chan_entry.get()) {
+                                                               if let Ok(upd) = self.get_channel_update_for_unicast(&chan_entry.get()) {
                                                                        (0x1000|7, upd.encode_with_len())
                                                                } else {
                                                                        (0x4000|10, Vec::new())
@@ -2446,22 +2997,26 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        self.fail_htlc_backwards_internal(channel_state,
                                                htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data});
                                },
-                               HTLCSource::OutboundRoute { session_priv, .. } => {
-                                       if {
-                                               let mut session_priv_bytes = [0; 32];
-                                               session_priv_bytes.copy_from_slice(&session_priv[..]);
-                                               self.pending_outbound_payments.lock().unwrap().remove(&session_priv_bytes)
-                                       } {
-                                               self.pending_events.lock().unwrap().push(
-                                                       events::Event::PaymentFailed {
-                                                               payment_hash,
-                                                               rejected_by_dest: false,
-#[cfg(test)]
-                                                               error_code: None,
-#[cfg(test)]
-                                                               error_data: None,
-                                                       }
-                                               )
+                               HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
+                                       let mut session_priv_bytes = [0; 32];
+                                       session_priv_bytes.copy_from_slice(&session_priv[..]);
+                                       let mut outbounds = self.pending_outbound_payments.lock().unwrap();
+                                       if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(payment_id) {
+                                               if payment.get_mut().remove(&session_priv_bytes, path.last().unwrap().fee_msat) {
+                                                       self.pending_events.lock().unwrap().push(
+                                                               events::Event::PaymentPathFailed {
+                                                                       payment_hash,
+                                                                       rejected_by_dest: false,
+                                                                       network_update: None,
+                                                                       all_paths_failed: payment.get().remaining_parts() == 0,
+                                                                       path: path.clone(),
+                                                                       #[cfg(test)]
+                                                                       error_code: None,
+                                                                       #[cfg(test)]
+                                                                       error_data: None,
+                                                               }
+                                                       );
+                                               }
                                        } else {
                                                log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0));
                                        }
@@ -2486,12 +3041,20 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                // from block_connected which may run during initialization prior to the chain_monitor
                // being fully configured. See the docs for `ChannelManagerReadArgs` for more.
                match source {
-                       HTLCSource::OutboundRoute { ref path, session_priv, .. } => {
-                               if {
-                                       let mut session_priv_bytes = [0; 32];
-                                       session_priv_bytes.copy_from_slice(&session_priv[..]);
-                                       !self.pending_outbound_payments.lock().unwrap().remove(&session_priv_bytes)
-                               } {
+                       HTLCSource::OutboundRoute { ref path, session_priv, payment_id, .. } => {
+                               let mut session_priv_bytes = [0; 32];
+                               session_priv_bytes.copy_from_slice(&session_priv[..]);
+                               let mut outbounds = self.pending_outbound_payments.lock().unwrap();
+                               let mut all_paths_failed = false;
+                               if let hash_map::Entry::Occupied(mut sessions) = outbounds.entry(payment_id) {
+                                       if !sessions.get_mut().remove(&session_priv_bytes, path.last().unwrap().fee_msat) {
+                                               log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0));
+                                               return;
+                                       }
+                                       if sessions.get().remaining_parts() == 0 {
+                                               all_paths_failed = true;
+                                       }
+                               } else {
                                        log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0));
                                        return;
                                }
@@ -2500,23 +3063,19 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                match &onion_error {
                                        &HTLCFailReason::LightningError { ref err } => {
 #[cfg(test)]
-                                               let (channel_update, payment_retryable, onion_error_code, onion_error_data) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone());
+                                               let (network_update, payment_retryable, onion_error_code, onion_error_data) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone());
 #[cfg(not(test))]
-                                               let (channel_update, payment_retryable, _, _) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone());
+                                               let (network_update, payment_retryable, _, _) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone());
                                                // TODO: If we decided to blame ourselves (or one of our channels) in
                                                // process_onion_failure we should close that channel as it implies our
                                                // next-hop is needlessly blaming us!
-                                               if let Some(update) = channel_update {
-                                                       self.channel_state.lock().unwrap().pending_msg_events.push(
-                                                               events::MessageSendEvent::PaymentFailureNetworkUpdate {
-                                                                       update,
-                                                               }
-                                                       );
-                                               }
                                                self.pending_events.lock().unwrap().push(
-                                                       events::Event::PaymentFailed {
+                                                       events::Event::PaymentPathFailed {
                                                                payment_hash: payment_hash.clone(),
                                                                rejected_by_dest: !payment_retryable,
+                                                               network_update,
+                                                               all_paths_failed,
+                                                               path: path.clone(),
 #[cfg(test)]
                                                                error_code: onion_error_code,
 #[cfg(test)]
@@ -2531,16 +3090,19 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                        ref data,
                                                        .. } => {
                                                // we get a fail_malformed_htlc from the first hop
-                                               // TODO: We'd like to generate a PaymentFailureNetworkUpdate for temporary
+                                               // TODO: We'd like to generate a NetworkUpdate for temporary
                                                // failures here, but that would be insufficient as get_route
                                                // generally ignores its view of our own channels as we provide them via
                                                // ChannelDetails.
                                                // TODO: For non-temporary failures, we really should be closing the
                                                // channel here as we apparently can't relay through them anyway.
                                                self.pending_events.lock().unwrap().push(
-                                                       events::Event::PaymentFailed {
+                                                       events::Event::PaymentPathFailed {
                                                                payment_hash: payment_hash.clone(),
                                                                rejected_by_dest: path.len() == 1,
+                                                               network_update: None,
+                                                               all_paths_failed,
+                                                               path: path.clone(),
 #[cfg(test)]
                                                                error_code: Some(*failure_code),
 #[cfg(test)]
@@ -2641,16 +3203,22 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                         HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_height_data });
                                } else {
                                        match self.claim_funds_from_hop(channel_state.as_mut().unwrap(), htlc.prev_hop, payment_preimage) {
-                                               Err(Some(e)) => {
-                                                       if let msgs::ErrorAction::IgnoreError = e.1.err.action {
+                                               ClaimFundsFromHop::MonitorUpdateFail(pk, err, _) => {
+                                                       if let msgs::ErrorAction::IgnoreError = err.err.action {
                                                                // We got a temporary failure updating monitor, but will claim the
                                                                // HTLC when the monitor updating is restored (or on chain).
-                                                               log_error!(self.logger, "Temporary failure claiming HTLC, treating as success: {}", e.1.err.err);
+                                                               log_error!(self.logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err);
                                                                claimed_any_htlcs = true;
-                                                       } else { errs.push(e); }
+                                                       } else { errs.push((pk, err)); }
                                                },
-                                               Err(None) => unreachable!("We already checked for channel existence, we can't fail here!"),
-                                               Ok(()) => claimed_any_htlcs = true,
+                                               ClaimFundsFromHop::PrevHopForceClosed => unreachable!("We already checked for channel existence, we can't fail here!"),
+                                               ClaimFundsFromHop::DuplicateClaim => {
+                                                       // While we should never get here in most cases, if we do, it likely
+                                                       // indicates that the HTLC was timed out some time ago and is no longer
+                                                       // available to be claimed. Thus, it does not make sense to set
+                                                       // `claimed_any_htlcs`.
+                                               },
+                                               ClaimFundsFromHop::Success(_) => claimed_any_htlcs = true,
                                        }
                                }
                        }
@@ -2668,27 +3236,29 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                } else { false }
        }
 
-       fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard<ChannelHolder<Signer>>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> Result<(), Option<(PublicKey, MsgHandleErrInternal)>> {
+       fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard<ChannelHolder<Signer>>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> ClaimFundsFromHop {
                //TODO: Delay the claimed_funds relaying just like we do outbound relay!
                let channel_state = &mut **channel_state_lock;
                let chan_id = match channel_state.short_to_id.get(&prev_hop.short_channel_id) {
                        Some(chan_id) => chan_id.clone(),
                        None => {
-                               return Err(None)
+                               return ClaimFundsFromHop::PrevHopForceClosed
                        }
                };
 
                if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(chan_id) {
-                       let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update();
                        match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) {
                                Ok(msgs_monitor_option) => {
-                                       if let UpdateFulfillCommitFetch::NewClaim { msgs, monitor_update } = msgs_monitor_option {
+                                       if let UpdateFulfillCommitFetch::NewClaim { msgs, htlc_value_msat, monitor_update } = msgs_monitor_option {
                                                if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
-                                                       if was_frozen_for_monitor {
-                                                               assert!(msgs.is_none());
-                                                       } else {
-                                                               return Err(Some((chan.get().get_counterparty_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err())));
-                                                       }
+                                                       log_given_level!(self.logger, if e == ChannelMonitorUpdateErr::PermanentFailure { Level::Error } else { Level::Debug },
+                                                               "Failed to update channel monitor with preimage {:?}: {:?}",
+                                                               payment_preimage, e);
+                                                       return ClaimFundsFromHop::MonitorUpdateFail(
+                                                               chan.get().get_counterparty_node_id(),
+                                                               handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(),
+                                                               Some(htlc_value_msat)
+                                                       );
                                                }
                                                if let Some((msg, commitment_signed)) = msgs {
                                                        log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}",
@@ -2705,66 +3275,97 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                }
                                                        });
                                                }
+                                               return ClaimFundsFromHop::Success(htlc_value_msat);
+                                       } else {
+                                               return ClaimFundsFromHop::DuplicateClaim;
                                        }
-                                       return Ok(())
                                },
-                               Err(e) => {
-                                       // TODO: Do something with e?
-                                       // This should only occur if we are claiming an HTLC at the same time as the
-                                       // HTLC is being failed (eg because a block is being connected and this caused
-                                       // an HTLC to time out). This should, of course, only occur if the user is the
-                                       // one doing the claiming (as it being a part of a peer claim would imply we're
-                                       // about to lose funds) and only if the lock in claim_funds was dropped as a
-                                       // previous HTLC was failed (thus not for an MPP payment).
-                                       debug_assert!(false, "This shouldn't be reachable except in absurdly rare cases between monitor updates and HTLC timeouts: {:?}", e);
-                                       return Err(None)
+                               Err((e, monitor_update)) => {
+                                       if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
+                                               log_given_level!(self.logger, if e == ChannelMonitorUpdateErr::PermanentFailure { Level::Error } else { Level::Info },
+                                                       "Failed to update channel monitor with preimage {:?} immediately prior to force-close: {:?}",
+                                                       payment_preimage, e);
+                                       }
+                                       let counterparty_node_id = chan.get().get_counterparty_node_id();
+                                       let (drop, res) = convert_chan_err!(self, e, channel_state.short_to_id, chan.get_mut(), &chan_id);
+                                       if drop {
+                                               chan.remove_entry();
+                                       }
+                                       return ClaimFundsFromHop::MonitorUpdateFail(counterparty_node_id, res, None);
                                },
                        }
                } else { unreachable!(); }
        }
 
-       fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<Signer>>, source: HTLCSource, payment_preimage: PaymentPreimage) {
+       fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<Signer>>, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option<u64>, from_onchain: bool) {
                match source {
-                       HTLCSource::OutboundRoute { session_priv, .. } => {
+                       HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
                                mem::drop(channel_state_lock);
-                               if {
-                                       let mut session_priv_bytes = [0; 32];
-                                       session_priv_bytes.copy_from_slice(&session_priv[..]);
-                                       self.pending_outbound_payments.lock().unwrap().remove(&session_priv_bytes)
-                               } {
-                                       let mut pending_events = self.pending_events.lock().unwrap();
-                                       pending_events.push(events::Event::PaymentSent {
-                                               payment_preimage
-                                       });
+                               let mut session_priv_bytes = [0; 32];
+                               session_priv_bytes.copy_from_slice(&session_priv[..]);
+                               let mut outbounds = self.pending_outbound_payments.lock().unwrap();
+                               let found_payment = if let Some(mut sessions) = outbounds.remove(&payment_id) {
+                                       sessions.remove(&session_priv_bytes, path.last().unwrap().fee_msat)
+                               } else { false };
+                               if found_payment {
+                                       let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
+                                       self.pending_events.lock().unwrap().push(
+                                               events::Event::PaymentSent {
+                                                       payment_preimage,
+                                                       payment_hash: payment_hash
+                                               }
+                                       );
                                } else {
                                        log_trace!(self.logger, "Received duplicative fulfill for HTLC with payment_preimage {}", log_bytes!(payment_preimage.0));
                                }
                        },
                        HTLCSource::PreviousHopData(hop_data) => {
                                let prev_outpoint = hop_data.outpoint;
-                               if let Err((counterparty_node_id, err)) = match self.claim_funds_from_hop(&mut channel_state_lock, hop_data, payment_preimage) {
-                                       Ok(()) => Ok(()),
-                                       Err(None) => {
-                                               let preimage_update = ChannelMonitorUpdate {
-                                                       update_id: CLOSED_CHANNEL_UPDATE_ID,
-                                                       updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
-                                                               payment_preimage: payment_preimage.clone(),
-                                                       }],
-                                               };
-                                               // We update the ChannelMonitor on the backward link, after
-                                               // receiving an offchain preimage event from the forward link (the
-                                               // event being update_fulfill_htlc).
-                                               if let Err(e) = self.chain_monitor.update_channel(prev_outpoint, preimage_update) {
-                                                       log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
-                                                                  payment_preimage, e);
-                                               }
-                                               Ok(())
-                                       },
-                                       Err(Some(res)) => Err(res),
-                               } {
-                                       mem::drop(channel_state_lock);
-                                       let res: Result<(), _> = Err(err);
-                                       let _ = handle_error!(self, res, counterparty_node_id);
+                               let res = self.claim_funds_from_hop(&mut channel_state_lock, hop_data, payment_preimage);
+                               let claimed_htlc = if let ClaimFundsFromHop::DuplicateClaim = res { false } else { true };
+                               let htlc_claim_value_msat = match res {
+                                       ClaimFundsFromHop::MonitorUpdateFail(_, _, amt_opt) => amt_opt,
+                                       ClaimFundsFromHop::Success(amt) => Some(amt),
+                                       _ => None,
+                               };
+                               if let ClaimFundsFromHop::PrevHopForceClosed = res {
+                                       let preimage_update = ChannelMonitorUpdate {
+                                               update_id: CLOSED_CHANNEL_UPDATE_ID,
+                                               updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
+                                                       payment_preimage: payment_preimage.clone(),
+                                               }],
+                                       };
+                                       // We update the ChannelMonitor on the backward link, after
+                                       // receiving an offchain preimage event from the forward link (the
+                                       // event being update_fulfill_htlc).
+                                       if let Err(e) = self.chain_monitor.update_channel(prev_outpoint, preimage_update) {
+                                               log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
+                                                                                        payment_preimage, e);
+                                       }
+                                       // Note that we do *not* set `claimed_htlc` to false here. In fact, this
+                                       // totally could be a duplicate claim, but we have no way of knowing
+                                       // without interrogating the `ChannelMonitor` we've provided the above
+                                       // update to. Instead, we simply document in `PaymentForwarded` that this
+                                       // can happen.
+                               }
+                               mem::drop(channel_state_lock);
+                               if let ClaimFundsFromHop::MonitorUpdateFail(pk, err, _) = res {
+                                       let result: Result<(), _> = Err(err);
+                                       let _ = handle_error!(self, result, pk);
+                               }
+
+                               if claimed_htlc {
+                                       if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
+                                               let fee_earned_msat = if let Some(claimed_htlc_value) = htlc_claim_value_msat {
+                                                       Some(claimed_htlc_value - forwarded_htlc_value)
+                                               } else { None };
+
+                                               let mut pending_events = self.pending_events.lock().unwrap();
+                                               pending_events.push(events::Event::PaymentForwarded {
+                                                       fee_earned_msat,
+                                                       claim_from_onchain_tx: from_onchain,
+                                               });
+                                       }
                                }
                        },
                }
@@ -2798,7 +3399,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        pub fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64) {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 
-               let (mut pending_failures, chan_restoration_res) = {
+               let chan_restoration_res;
+               let mut pending_failures = {
                        let mut channel_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_lock;
                        let mut channel = match channel_state.by_id.entry(funding_txo.to_channel_id()) {
@@ -2810,7 +3412,21 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        }
 
                        let (raa, commitment_update, order, pending_forwards, pending_failures, funding_broadcastable, funding_locked) = channel.get_mut().monitor_updating_restored(&self.logger);
-                       (pending_failures, handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, raa, commitment_update, order, None, pending_forwards, funding_broadcastable, funding_locked))
+                       let channel_update = if funding_locked.is_some() && channel.get().is_usable() && !channel.get().should_announce() {
+                               // We only send a channel_update in the case where we are just now sending a
+                               // funding_locked and the channel is in a usable state. Further, we rely on the
+                               // normal announcement_signatures process to send a channel_update for public
+                               // channels, only generating a unicast channel_update if this is a private channel.
+                               Some(events::MessageSendEvent::SendChannelUpdate {
+                                       node_id: channel.get().get_counterparty_node_id(),
+                                       msg: self.get_channel_update_for_unicast(channel.get()).unwrap(),
+                               })
+                       } else { None };
+                       chan_restoration_res = handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, raa, commitment_update, order, None, pending_forwards, funding_broadcastable, funding_locked);
+                       if let Some(upd) = channel_update {
+                               channel_state.pending_msg_events.push(upd);
+                       }
+                       pending_failures
                };
                post_handle_chan_restoration!(self, chan_restoration_res);
                for failure in pending_failures.drain(..) {
@@ -2823,7 +3439,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), msg.temporary_channel_id.clone()));
                }
 
-               let channel = Channel::new_from_req(&self.fee_estimator, &self.keys_manager, counterparty_node_id.clone(), their_features, msg, 0, &self.default_configuration)
+               let channel = Channel::new_from_req(&self.fee_estimator, &self.keys_manager, counterparty_node_id.clone(), &their_features, msg, 0, &self.default_configuration)
                        .map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id))?;
                let mut channel_state_lock = self.channel_state.lock().unwrap();
                let channel_state = &mut *channel_state_lock;
@@ -2849,7 +3465,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        if chan.get().get_counterparty_node_id() != *counterparty_node_id {
                                                return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.temporary_channel_id));
                                        }
-                                       try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration, their_features), channel_state, chan);
+                                       try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration, &their_features), channel_state, chan);
                                        (chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id())
                                },
                                hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id))
@@ -2937,7 +3553,16 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                Err(e) => try_chan_entry!(self, Err(e), channel_state, chan),
                                        };
                                        if let Err(e) = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) {
-                                               return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, false, false);
+                                               let mut res = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, false, false);
+                                               if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
+                                                       // We weren't able to watch the channel to begin with, so no updates should be made on
+                                                       // it. Previously, full_stack_target found an (unreachable) panic when the
+                                                       // monitor update contained within `shutdown_finish` was applied.
+                                                       if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
+                                                               shutdown_finish.0.take();
+                                                       }
+                                               }
+                                               return res
                                        }
                                        funding_tx
                                },
@@ -2973,6 +3598,11 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                node_id: counterparty_node_id.clone(),
                                                msg: announcement_sigs,
                                        });
+                               } else if chan.get().is_usable() {
+                                       channel_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
+                                               node_id: counterparty_node_id.clone(),
+                                               msg: self.get_channel_update_for_unicast(chan.get()).unwrap(),
+                                       });
                                }
                                Ok(())
                        },
@@ -2981,7 +3611,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        }
 
        fn internal_shutdown(&self, counterparty_node_id: &PublicKey, their_features: &InitFeatures, msg: &msgs::Shutdown) -> Result<(), MsgHandleErrInternal> {
-               let (mut dropped_htlcs, chan_option) = {
+               let mut dropped_htlcs: Vec<(HTLCSource, PaymentHash)>;
+               let result: Result<(), _> = loop {
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
 
@@ -2990,25 +3621,36 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        if chan_entry.get().get_counterparty_node_id() != *counterparty_node_id {
                                                return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                        }
-                                       let (shutdown, closing_signed, dropped_htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.fee_estimator, &their_features, &msg), channel_state, chan_entry);
+
+                                       if !chan_entry.get().received_shutdown() {
+                                               log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.",
+                                                       log_bytes!(msg.channel_id),
+                                                       if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" });
+                                       }
+
+                                       let (shutdown, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.keys_manager, &their_features, &msg), channel_state, chan_entry);
+                                       dropped_htlcs = htlcs;
+
+                                       // Update the monitor with the shutdown script if necessary.
+                                       if let Some(monitor_update) = monitor_update {
+                                               if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) {
+                                                       let (result, is_permanent) =
+                                                               handle_monitor_err!(self, e, channel_state.short_to_id, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, false, false, Vec::new(), Vec::new(), chan_entry.key());
+                                                       if is_permanent {
+                                                               remove_channel!(channel_state, chan_entry);
+                                                               break result;
+                                                       }
+                                               }
+                                       }
+
                                        if let Some(msg) = shutdown {
                                                channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
-                                                       node_id: counterparty_node_id.clone(),
-                                                       msg,
-                                               });
-                                       }
-                                       if let Some(msg) = closing_signed {
-                                               channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
-                                                       node_id: counterparty_node_id.clone(),
+                                                       node_id: *counterparty_node_id,
                                                        msg,
                                                });
                                        }
-                                       if chan_entry.get().is_shutdown() {
-                                               if let Some(short_id) = chan_entry.get().get_short_channel_id() {
-                                                       channel_state.short_to_id.remove(&short_id);
-                                               }
-                                               (dropped_htlcs, Some(chan_entry.remove_entry().1))
-                                       } else { (dropped_htlcs, None) }
+
+                                       break Ok(());
                                },
                                hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                        }
@@ -3016,14 +3658,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                for htlc_source in dropped_htlcs.drain(..) {
                        self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
                }
-               if let Some(chan) = chan_option {
-                       if let Ok(update) = self.get_channel_update(&chan) {
-                               let mut channel_state = self.channel_state.lock().unwrap();
-                               channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
-                                       msg: update
-                               });
-                       }
-               }
+
+               let _ = handle_error!(self, result, *counterparty_node_id);
                Ok(())
        }
 
@@ -3063,12 +3699,13 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        self.tx_broadcaster.broadcast_transaction(&broadcast_tx);
                }
                if let Some(chan) = chan_option {
-                       if let Ok(update) = self.get_channel_update(&chan) {
+                       if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
                                let mut channel_state = self.channel_state.lock().unwrap();
                                channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                        msg: update
                                });
                        }
+                       self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: msg.channel_id,  reason: ClosureReason::CooperativeClosure });
                }
                Ok(())
        }
@@ -3093,33 +3730,34 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                }
 
                                let create_pending_htlc_status = |chan: &Channel<Signer>, pending_forward_info: PendingHTLCStatus, error_code: u16| {
-                                       // Ensure error_code has the UPDATE flag set, since by default we send a
-                                       // channel update along as part of failing the HTLC.
-                                       assert!((error_code & 0x1000) != 0);
                                        // If the update_add is completely bogus, the call will Err and we will close,
                                        // but if we've sent a shutdown and they haven't acknowledged it yet, we just
                                        // want to reject the new HTLC and fail it backwards instead of forwarding.
                                        match pending_forward_info {
                                                PendingHTLCStatus::Forward(PendingHTLCInfo { ref incoming_shared_secret, .. }) => {
-                                                       let reason = if let Ok(upd) = self.get_channel_update(chan) {
-                                                               onion_utils::build_first_hop_failure_packet(incoming_shared_secret, error_code, &{
-                                                                       let mut res = Vec::with_capacity(8 + 128);
-                                                                       // TODO: underspecified, follow https://github.com/lightningnetwork/lightning-rfc/issues/791
-                                                                       res.extend_from_slice(&byte_utils::be16_to_array(0));
-                                                                       res.extend_from_slice(&upd.encode_with_len()[..]);
-                                                                       res
-                                                               }[..])
+                                                       let reason = if (error_code & 0x1000) != 0 {
+                                                               if let Ok(upd) = self.get_channel_update_for_unicast(chan) {
+                                                                       onion_utils::build_first_hop_failure_packet(incoming_shared_secret, error_code, &{
+                                                                               let mut res = Vec::with_capacity(8 + 128);
+                                                                               // TODO: underspecified, follow https://github.com/lightningnetwork/lightning-rfc/issues/791
+                                                                               res.extend_from_slice(&byte_utils::be16_to_array(0));
+                                                                               res.extend_from_slice(&upd.encode_with_len()[..]);
+                                                                               res
+                                                                       }[..])
+                                                               } else {
+                                                                       // The only case where we'd be unable to
+                                                                       // successfully get a channel update is if the
+                                                                       // channel isn't in the fully-funded state yet,
+                                                                       // implying our counterparty is trying to route
+                                                                       // payments over the channel back to themselves
+                                                                       // (because no one else should know the short_id
+                                                                       // is a lightning channel yet). We should have
+                                                                       // no problem just calling this
+                                                                       // unknown_next_peer (0x4000|10).
+                                                                       onion_utils::build_first_hop_failure_packet(incoming_shared_secret, 0x4000|10, &[])
+                                                               }
                                                        } else {
-                                                               // The only case where we'd be unable to
-                                                               // successfully get a channel update is if the
-                                                               // channel isn't in the fully-funded state yet,
-                                                               // implying our counterparty is trying to route
-                                                               // payments over the channel back to themselves
-                                                               // (cause no one else should know the short_id
-                                                               // is a lightning channel yet). We should have
-                                                               // no problem just calling this
-                                                               // unknown_next_peer (0x4000|10).
-                                                               onion_utils::build_first_hop_failure_packet(incoming_shared_secret, 0x4000|10, &[])
+                                                               onion_utils::build_first_hop_failure_packet(incoming_shared_secret, error_code, &[])
                                                        };
                                                        let msg = msgs::UpdateFailHTLC {
                                                                channel_id: msg.channel_id,
@@ -3140,7 +3778,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
 
        fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> {
                let mut channel_lock = self.channel_state.lock().unwrap();
-               let htlc_source = {
+               let (htlc_source, forwarded_htlc_value) = {
                        let channel_state = &mut *channel_lock;
                        match channel_state.by_id.entry(msg.channel_id) {
                                hash_map::Entry::Occupied(mut chan) => {
@@ -3152,7 +3790,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                        }
                };
-               self.claim_funds_internal(channel_lock, htlc_source, msg.payment_preimage.clone());
+               self.claim_funds_internal(channel_lock, htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false);
                Ok(())
        }
 
@@ -3198,8 +3836,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                if chan.get().get_counterparty_node_id() != *counterparty_node_id {
                                        return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                }
-                               let (revoke_and_ack, commitment_signed, closing_signed, monitor_update) =
-                                       match chan.get_mut().commitment_signed(&msg, &self.fee_estimator, &self.logger) {
+                               let (revoke_and_ack, commitment_signed, monitor_update) =
+                                       match chan.get_mut().commitment_signed(&msg, &self.logger) {
                                                Err((None, e)) => try_chan_entry!(self, Err(e), channel_state, chan),
                                                Err((Some(update), e)) => {
                                                        assert!(chan.get().is_awaiting_monitor_update());
@@ -3211,7 +3849,6 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        };
                                if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
                                        return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some());
-                                       //TODO: Rebroadcast closing_signed if present on monitor update restoration
                                }
                                channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
                                        node_id: counterparty_node_id.clone(),
@@ -3230,12 +3867,6 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                },
                                        });
                                }
-                               if let Some(msg) = closing_signed {
-                                       channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
-                                               node_id: counterparty_node_id.clone(),
-                                               msg,
-                                       });
-                               }
                                Ok(())
                        },
                        hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
@@ -3255,6 +3886,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        match channel_state.forward_htlcs.entry(match forward_info.routing {
                                                        PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id,
                                                        PendingHTLCRouting::Receive { .. } => 0,
+                                                       PendingHTLCRouting::ReceiveKeysend { .. } => 0,
                                        }) {
                                                hash_map::Entry::Occupied(mut entry) => {
                                                        entry.get_mut().push(HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_funding_outpoint,
@@ -3290,12 +3922,12 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                break Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                        }
                                        let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update();
-                                       let (commitment_update, pending_forwards, pending_failures, closing_signed, monitor_update, htlcs_to_fail_in) =
-                                               break_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.fee_estimator, &self.logger), channel_state, chan);
+                                       let (commitment_update, pending_forwards, pending_failures, monitor_update, htlcs_to_fail_in) =
+                                               break_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.logger), channel_state, chan);
                                        htlcs_to_fail = htlcs_to_fail_in;
                                        if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
                                                if was_frozen_for_monitor {
-                                                       assert!(commitment_update.is_none() && closing_signed.is_none() && pending_forwards.is_empty() && pending_failures.is_empty());
+                                                       assert!(commitment_update.is_none() && pending_forwards.is_empty() && pending_failures.is_empty());
                                                        break Err(MsgHandleErrInternal::ignore_no_close("Previous monitor update failure prevented responses to RAA".to_owned()));
                                                } else {
                                                        if let Err(e) = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, commitment_update.is_some(), pending_forwards, pending_failures) {
@@ -3309,12 +3941,6 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                        updates,
                                                });
                                        }
-                                       if let Some(msg) = closing_signed {
-                                               channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
-                                                       node_id: counterparty_node_id.clone(),
-                                                       msg,
-                                               });
-                                       }
                                        break Ok((pending_forwards, pending_failures, chan.get().get_short_channel_id().expect("RAA should only work on a short-id-available channel"), chan.get().get_funding_txo().unwrap()))
                                },
                                hash_map::Entry::Vacant(_) => break Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
@@ -3363,7 +3989,9 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
 
                                channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
                                        msg: try_chan_entry!(self, chan.get_mut().announcement_signatures(&self.our_network_key, self.get_our_node_id(), self.genesis_hash.clone(), msg), channel_state, chan),
-                                       update_msg: self.get_channel_update(chan.get()).unwrap(), // can only fail if we're not in a ready state
+                                       // Note that announcement_signatures fails if the channel cannot be announced,
+                                       // so get_channel_update_for_broadcast will never fail by the time we get here.
+                                       update_msg: self.get_channel_update_for_broadcast(chan.get()).unwrap(),
                                });
                        },
                        hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
@@ -3393,7 +4021,13 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        }
                                        return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id));
                                }
-                               try_chan_entry!(self, chan.get_mut().channel_update(&msg), channel_state, chan);
+                               let were_node_one = self.get_our_node_id().serialize()[..] < chan.get().get_counterparty_node_id().serialize()[..];
+                               let msg_from_node_one = msg.contents.flags & 1 == 0;
+                               if were_node_one == msg_from_node_one {
+                                       return Ok(NotifyOption::SkipPersist);
+                               } else {
+                                       try_chan_entry!(self, chan.get_mut().channel_update(&msg), channel_state, chan);
+                               }
                        },
                        hash_map::Entry::Vacant(_) => unreachable!()
                }
@@ -3401,7 +4035,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        }
 
        fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> {
-               let (htlcs_failed_forward, need_lnd_workaround, chan_restoration_res) = {
+               let chan_restoration_res;
+               let (htlcs_failed_forward, need_lnd_workaround) = {
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
 
@@ -3416,15 +4051,27 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here.
                                        let (funding_locked, revoke_and_ack, commitment_update, monitor_update_opt, order, htlcs_failed_forward, shutdown) =
                                                try_chan_entry!(self, chan.get_mut().channel_reestablish(msg, &self.logger), channel_state, chan);
+                                       let mut channel_update = None;
                                        if let Some(msg) = shutdown {
                                                channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
                                                        node_id: counterparty_node_id.clone(),
                                                        msg,
                                                });
+                                       } else if chan.get().is_usable() {
+                                               // If the channel is in a usable state (ie the channel is not being shut
+                                               // down), send a unicast channel_update to our counterparty to make sure
+                                               // they have the latest channel parameters.
+                                               channel_update = Some(events::MessageSendEvent::SendChannelUpdate {
+                                                       node_id: chan.get().get_counterparty_node_id(),
+                                                       msg: self.get_channel_update_for_unicast(chan.get()).unwrap(),
+                                               });
                                        }
                                        let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take();
-                                       (htlcs_failed_forward, need_lnd_workaround,
-                                               handle_chan_restoration_locked!(self, channel_state_lock, channel_state, chan, revoke_and_ack, commitment_update, order, monitor_update_opt, Vec::new(), None, funding_locked))
+                                       chan_restoration_res = handle_chan_restoration_locked!(self, channel_state_lock, channel_state, chan, revoke_and_ack, commitment_update, order, monitor_update_opt, Vec::new(), None, funding_locked);
+                                       if let Some(upd) = channel_update {
+                                               channel_state.pending_msg_events.push(upd);
+                                       }
+                                       (htlcs_failed_forward, need_lnd_workaround)
                                },
                                hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                        }
@@ -3438,79 +4085,23 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                Ok(())
        }
 
-       /// Begin Update fee process. Allowed only on an outbound channel.
-       /// If successful, will generate a UpdateHTLCs event, so you should probably poll
-       /// PeerManager::process_events afterwards.
-       /// Note: This API is likely to change!
-       /// (C-not exported) Cause its doc(hidden) anyway
-       #[doc(hidden)]
-       pub fn update_fee(&self, channel_id: [u8;32], feerate_per_kw: u32) -> Result<(), APIError> {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
-               let counterparty_node_id;
-               let err: Result<(), _> = loop {
-                       let mut channel_state_lock = self.channel_state.lock().unwrap();
-                       let channel_state = &mut *channel_state_lock;
-
-                       match channel_state.by_id.entry(channel_id) {
-                               hash_map::Entry::Vacant(_) => return Err(APIError::APIMisuseError{err: format!("Failed to find corresponding channel for id {}", channel_id.to_hex())}),
-                               hash_map::Entry::Occupied(mut chan) => {
-                                       if !chan.get().is_outbound() {
-                                               return Err(APIError::APIMisuseError{err: "update_fee cannot be sent for an inbound channel".to_owned()});
-                                       }
-                                       if chan.get().is_awaiting_monitor_update() {
-                                               return Err(APIError::MonitorUpdateFailed);
-                                       }
-                                       if !chan.get().is_live() {
-                                               return Err(APIError::ChannelUnavailable{err: "Channel is either not yet fully established or peer is currently disconnected".to_owned()});
-                                       }
-                                       counterparty_node_id = chan.get().get_counterparty_node_id();
-                                       if let Some((update_fee, commitment_signed, monitor_update)) =
-                                                       break_chan_entry!(self, chan.get_mut().send_update_fee_and_commit(feerate_per_kw, &self.logger), channel_state, chan)
-                                       {
-                                               if let Err(_e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
-                                                       unimplemented!();
-                                               }
-                                               log_debug!(self.logger, "Updating fee resulted in a commitment_signed for channel {}", log_bytes!(chan.get().channel_id()));
-                                               channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
-                                                       node_id: chan.get().get_counterparty_node_id(),
-                                                       updates: msgs::CommitmentUpdate {
-                                                               update_add_htlcs: Vec::new(),
-                                                               update_fulfill_htlcs: Vec::new(),
-                                                               update_fail_htlcs: Vec::new(),
-                                                               update_fail_malformed_htlcs: Vec::new(),
-                                                               update_fee: Some(update_fee),
-                                                               commitment_signed,
-                                                       },
-                                               });
-                                       }
-                               },
-                       }
-                       return Ok(())
-               };
-
-               match handle_error!(self, err, counterparty_node_id) {
-                       Ok(_) => unreachable!(),
-                       Err(e) => { Err(APIError::APIMisuseError { err: e.err })}
-               }
-       }
-
        /// Process pending events from the `chain::Watch`, returning whether any events were processed.
        fn process_pending_monitor_events(&self) -> bool {
                let mut failed_channels = Vec::new();
-               let pending_monitor_events = self.chain_monitor.release_pending_monitor_events();
+               let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events();
                let has_pending_monitor_events = !pending_monitor_events.is_empty();
-               for monitor_event in pending_monitor_events {
+               for monitor_event in pending_monitor_events.drain(..) {
                        match monitor_event {
                                MonitorEvent::HTLCEvent(htlc_update) => {
                                        if let Some(preimage) = htlc_update.payment_preimage {
                                                log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
-                                               self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage);
+                                               self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage, htlc_update.onchain_value_satoshis.map(|v| v * 1000), true);
                                        } else {
                                                log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
                                                self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
                                        }
                                },
-                               MonitorEvent::CommitmentTxBroadcasted(funding_outpoint) => {
+                               MonitorEvent::CommitmentTxConfirmed(funding_outpoint) => {
                                        let mut channel_lock = self.channel_state.lock().unwrap();
                                        let channel_state = &mut *channel_lock;
                                        let by_id = &mut channel_state.by_id;
@@ -3521,11 +4112,12 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                        short_to_id.remove(&short_id);
                                                }
                                                failed_channels.push(chan.force_shutdown(false));
-                                               if let Ok(update) = self.get_channel_update(&chan) {
+                                               if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
                                                        pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                                msg: update
                                                        });
                                                }
+                                               self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(),  reason: ClosureReason::CommitmentTxConfirmed });
                                                pending_msg_events.push(events::MessageSendEvent::HandleError {
                                                        node_id: chan.get_counterparty_node_id(),
                                                        action: msgs::ErrorAction::SendErrorMessage {
@@ -3587,13 +4179,14 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        Err(e) => {
                                                let (close_channel, res) = convert_chan_err!(self, e, short_to_id, chan, channel_id);
                                                handle_errors.push((chan.get_counterparty_node_id(), Err(res)));
+                                               // ChannelClosed event is generated by handle_error for us
                                                !close_channel
                                        }
                                }
                        });
                }
 
-               let has_update = has_monitor_update || !failed_htlcs.is_empty();
+               let has_update = has_monitor_update || !failed_htlcs.is_empty() || !handle_errors.is_empty();
                for (failures, channel_id) in failed_htlcs.drain(..) {
                        self.fail_holding_cell_htlcs(failures, channel_id);
                }
@@ -3605,6 +4198,70 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                has_update
        }
 
+       /// Check whether any channels have finished removing all pending updates after a shutdown
+       /// exchange and can now send a closing_signed.
+       /// Returns whether any closing_signed messages were generated.
+       fn maybe_generate_initial_closing_signed(&self) -> bool {
+               let mut handle_errors: Vec<(PublicKey, Result<(), _>)> = Vec::new();
+               let mut has_update = false;
+               {
+                       let mut channel_state_lock = self.channel_state.lock().unwrap();
+                       let channel_state = &mut *channel_state_lock;
+                       let by_id = &mut channel_state.by_id;
+                       let short_to_id = &mut channel_state.short_to_id;
+                       let pending_msg_events = &mut channel_state.pending_msg_events;
+
+                       by_id.retain(|channel_id, chan| {
+                               match chan.maybe_propose_closing_signed(&self.fee_estimator, &self.logger) {
+                                       Ok((msg_opt, tx_opt)) => {
+                                               if let Some(msg) = msg_opt {
+                                                       has_update = true;
+                                                       pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
+                                                               node_id: chan.get_counterparty_node_id(), msg,
+                                                       });
+                                               }
+                                               if let Some(tx) = tx_opt {
+                                                       // We're done with this channel. We got a closing_signed and sent back
+                                                       // a closing_signed with a closing transaction to broadcast.
+                                                       if let Some(short_id) = chan.get_short_channel_id() {
+                                                               short_to_id.remove(&short_id);
+                                                       }
+
+                                                       if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
+                                                               pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                                                       msg: update
+                                                               });
+                                                       }
+
+                                                       if let Ok(mut pending_events_lock) = self.pending_events.lock() {
+                                                               pending_events_lock.push(events::Event::ChannelClosed {
+                                                                       channel_id: *channel_id,
+                                                                       reason: ClosureReason::CooperativeClosure
+                                                               });
+                                                       }
+
+                                                       log_info!(self.logger, "Broadcasting {}", log_tx!(tx));
+                                                       self.tx_broadcaster.broadcast_transaction(&tx);
+                                                       false
+                                               } else { true }
+                                       },
+                                       Err(e) => {
+                                               has_update = true;
+                                               let (close_channel, res) = convert_chan_err!(self, e, short_to_id, chan, channel_id);
+                                               handle_errors.push((chan.get_counterparty_node_id(), Err(res)));
+                                               !close_channel
+                                       }
+                               }
+                       });
+               }
+
+               for (counterparty_node_id, err) in handle_errors.drain(..) {
+                       let _ = handle_error!(self, err, counterparty_node_id);
+               }
+
+               has_update
+       }
+
        /// Handle a list of channel failures during a block_connected or block_disconnected call,
        /// pushing the channel monitor update (if any) to the background events queue and removing the
        /// Channel object.
@@ -3689,7 +4346,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        /// The [`PaymentHash`] (and corresponding [`PaymentPreimage`]) must be globally unique. This
        /// method may return an Err if another payment with the same payment_hash is still pending.
        ///
-       /// `user_payment_id` will be provided back in [`PaymentReceived::user_payment_id`] events to
+       /// `user_payment_id` will be provided back in [`PaymentPurpose::InvoicePayment::user_payment_id`] events to
        /// allow tracking of which events correspond with which calls to this and
        /// [`create_inbound_payment`]. `user_payment_id` has no meaning inside of LDK, it is simply
        /// copied to events and otherwise ignored. It may be used to correlate PaymentReceived events
@@ -3723,7 +4380,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        ///
        /// [`create_inbound_payment`]: Self::create_inbound_payment
        /// [`PaymentReceived`]: events::Event::PaymentReceived
-       /// [`PaymentReceived::user_payment_id`]: events::Event::PaymentReceived::user_payment_id
+       /// [`PaymentPurpose::InvoicePayment::user_payment_id`]: events::PaymentPurpose::InvoicePayment::user_payment_id
        pub fn create_inbound_payment_for_hash(&self, payment_hash: PaymentHash, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32, user_payment_id: u64) -> Result<PaymentSecret, APIError> {
                self.set_payment_hash_secret_map(payment_hash, None, min_value_msat, invoice_expiry_delta_secs, user_payment_id)
        }
@@ -3731,10 +4388,15 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        #[cfg(any(test, feature = "fuzztarget", feature = "_test_utils"))]
        pub fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
                let events = core::cell::RefCell::new(Vec::new());
-               let event_handler = |event| events.borrow_mut().push(event);
+               let event_handler = |event: &events::Event| events.borrow_mut().push(event.clone());
                self.process_pending_events(&event_handler);
                events.into_inner()
        }
+
+       #[cfg(test)]
+       pub fn has_pending_payments(&self) -> bool {
+               !self.pending_outbound_payments.lock().unwrap().is_empty()
+       }
 }
 
 impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> MessageSendEventsProvider for ChannelManager<Signer, M, T, K, F, L>
@@ -3758,6 +4420,9 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> MessageSend
                        if self.check_free_holding_cells() {
                                result = NotifyOption::DoPersist;
                        }
+                       if self.maybe_generate_initial_closing_signed() {
+                               result = NotifyOption::DoPersist;
+                       }
 
                        let mut pending_events = Vec::new();
                        let mut channel_state = self.channel_state.lock().unwrap();
@@ -3799,13 +4464,13 @@ where
                                result = NotifyOption::DoPersist;
                        }
 
-                       let mut pending_events = std::mem::replace(&mut *self.pending_events.lock().unwrap(), vec![]);
+                       let mut pending_events = mem::replace(&mut *self.pending_events.lock().unwrap(), vec![]);
                        if !pending_events.is_empty() {
                                result = NotifyOption::DoPersist;
                        }
 
                        for event in pending_events.drain(..) {
-                               handler.handle_event(event);
+                               handler.handle_event(&event);
                        }
 
                        result
@@ -3907,6 +4572,16 @@ where
                payment_secrets.retain(|_, inbound_payment| {
                        inbound_payment.expiry_time > header.time as u64
                });
+
+               let mut outbounds = self.pending_outbound_payments.lock().unwrap();
+               outbounds.retain(|_, payment| {
+                       const PAYMENT_EXPIRY_BLOCKS: u32 = 3;
+                       if payment.remaining_parts() != 0 { return true }
+                       if let PendingOutboundPayment::Retryable { starting_block_height, .. } = payment {
+                               return *starting_block_height + PAYMENT_EXPIRY_BLOCKS > height
+                       }
+                       true
+               });
        }
 
        fn get_relevant_txids(&self) -> Vec<Txid> {
@@ -3960,7 +4635,7 @@ where
                                let res = f(channel);
                                if let Ok((chan_res, mut timed_out_pending_htlcs)) = res {
                                        for (source, payment_hash) in timed_out_pending_htlcs.drain(..) {
-                                               let chan_update = self.get_channel_update(&channel).map(|u| u.encode_with_len()).unwrap(); // Cannot add/recv HTLCs before we have a short_id so unwrap is safe
+                                               let chan_update = self.get_channel_update_for_unicast(&channel).map(|u| u.encode_with_len()).unwrap(); // Cannot add/recv HTLCs before we have a short_id so unwrap is safe
                                                timed_out_htlcs.push((source, payment_hash,  HTLCFailReason::Reason {
                                                        failure_code: 0x1000 | 14, // expiry_too_soon, or at least it is now
                                                        data: chan_update,
@@ -3977,6 +4652,12 @@ where
                                                                node_id: channel.get_counterparty_node_id(),
                                                                msg: announcement_sigs,
                                                        });
+                                               } else if channel.is_usable() {
+                                                       log_trace!(self.logger, "Sending funding_locked WITHOUT announcement_signatures but with private channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id()));
+                                                       pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
+                                                               node_id: channel.get_counterparty_node_id(),
+                                                               msg: self.get_channel_update_for_unicast(channel).unwrap(),
+                                                       });
                                                } else {
                                                        log_trace!(self.logger, "Sending funding_locked WITHOUT announcement_signatures for {}", log_bytes!(channel.channel_id()));
                                                }
@@ -3989,11 +4670,12 @@ where
                                        // It looks like our counterparty went on-chain or funding transaction was
                                        // reorged out of the main chain. Close the channel.
                                        failed_channels.push(channel.force_shutdown(true));
-                                       if let Ok(update) = self.get_channel_update(&channel) {
+                                       if let Ok(update) = self.get_channel_update_for_broadcast(&channel) {
                                                pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                        msg: update
                                                });
                                        }
+                                       self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: channel.channel_id(),  reason: ClosureReason::CommitmentTxConfirmed });
                                        pending_msg_events.push(events::MessageSendEvent::HandleError {
                                                node_id: channel.get_counterparty_node_id(),
                                                action: msgs::ErrorAction::SendErrorMessage { msg: e },
@@ -4179,11 +4861,12 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
                                                        short_to_id.remove(&short_id);
                                                }
                                                failed_channels.push(chan.force_shutdown(true));
-                                               if let Ok(update) = self.get_channel_update(&chan) {
+                                               if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
                                                        pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                                msg: update
                                                        });
                                                }
+                                               self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(),  reason: ClosureReason::DisconnectedPeer });
                                                false
                                        } else {
                                                true
@@ -4198,6 +4881,7 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
                                                        if let Some(short_id) = chan.get_short_channel_id() {
                                                                short_to_id.remove(&short_id);
                                                        }
+                                                       self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(),  reason: ClosureReason::DisconnectedPeer });
                                                        return false;
                                                } else {
                                                        no_channels_remain = false;
@@ -4222,8 +4906,8 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
                                        &events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true,
                                        &events::MessageSendEvent::BroadcastNodeAnnouncement { .. } => true,
                                        &events::MessageSendEvent::BroadcastChannelUpdate { .. } => true,
+                                       &events::MessageSendEvent::SendChannelUpdate { ref node_id, .. } => node_id != counterparty_node_id,
                                        &events::MessageSendEvent::HandleError { ref node_id, .. } => node_id != counterparty_node_id,
-                                       &events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => true,
                                        &events::MessageSendEvent::SendChannelRangeQuery { .. } => false,
                                        &events::MessageSendEvent::SendShortIdsQuery { .. } => false,
                                        &events::MessageSendEvent::SendReplyChannelRange { .. } => false,
@@ -4286,14 +4970,14 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
 
                if msg.channel_id == [0; 32] {
                        for chan in self.list_channels() {
-                               if chan.remote_network_id == *counterparty_node_id {
+                               if chan.counterparty.node_id == *counterparty_node_id {
                                        // Untrusted messages from peer, we throw away the error if id points to a non-existent channel
-                                       let _ = self.force_close_channel_with_peer(&chan.channel_id, Some(counterparty_node_id));
+                                       let _ = self.force_close_channel_with_peer(&chan.channel_id, Some(counterparty_node_id), Some(&msg.data));
                                }
                        }
                } else {
                        // Untrusted messages from peer, we throw away the error if id points to a non-existent channel
-                       let _ = self.force_close_channel_with_peer(&msg.channel_id, Some(counterparty_node_id));
+                       let _ = self.force_close_channel_with_peer(&msg.channel_id, Some(counterparty_node_id), Some(&msg.data));
                }
        }
 }
@@ -4380,7 +5064,11 @@ impl_writeable_tlv_based_enum!(PendingHTLCRouting,
        (1, Receive) => {
                (0, payment_data, required),
                (2, incoming_cltv_expiry, required),
-       }
+       },
+       (2, ReceiveKeysend) => {
+               (0, payment_preimage, required),
+               (2, incoming_cltv_expiry, required),
+       },
 ;);
 
 impl_writeable_tlv_based!(PendingHTLCInfo, {
@@ -4391,10 +5079,74 @@ impl_writeable_tlv_based!(PendingHTLCInfo, {
        (8, outgoing_cltv_value, required)
 });
 
-impl_writeable_tlv_based_enum!(HTLCFailureMsg, ;
-       (0, Relay),
-       (1, Malformed),
-);
+
+impl Writeable for HTLCFailureMsg {
+       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
+               match self {
+                       HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { channel_id, htlc_id, reason }) => {
+                               0u8.write(writer)?;
+                               channel_id.write(writer)?;
+                               htlc_id.write(writer)?;
+                               reason.write(writer)?;
+                       },
+                       HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
+                               channel_id, htlc_id, sha256_of_onion, failure_code
+                       }) => {
+                               1u8.write(writer)?;
+                               channel_id.write(writer)?;
+                               htlc_id.write(writer)?;
+                               sha256_of_onion.write(writer)?;
+                               failure_code.write(writer)?;
+                       },
+               }
+               Ok(())
+       }
+}
+
+impl Readable for HTLCFailureMsg {
+       fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
+               let id: u8 = Readable::read(reader)?;
+               match id {
+                       0 => {
+                               Ok(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
+                                       channel_id: Readable::read(reader)?,
+                                       htlc_id: Readable::read(reader)?,
+                                       reason: Readable::read(reader)?,
+                               }))
+                       },
+                       1 => {
+                               Ok(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
+                                       channel_id: Readable::read(reader)?,
+                                       htlc_id: Readable::read(reader)?,
+                                       sha256_of_onion: Readable::read(reader)?,
+                                       failure_code: Readable::read(reader)?,
+                               }))
+                       },
+                       // In versions prior to 0.0.101, HTLCFailureMsg objects were written with type 0 or 1 but
+                       // weren't length-prefixed and thus didn't support reading the TLV stream suffix of the network
+                       // messages contained in the variants.
+                       // In version 0.0.101, support for reading the variants with these types was added, and
+                       // we should migrate to writing these variants when UpdateFailHTLC or
+                       // UpdateFailMalformedHTLC get TLV fields.
+                       2 => {
+                               let length: BigSize = Readable::read(reader)?;
+                               let mut s = FixedLengthReader::new(reader, length.0);
+                               let res = Readable::read(&mut s)?;
+                               s.eat_remaining()?; // Return ShortRead if there's actually not enough bytes
+                               Ok(HTLCFailureMsg::Relay(res))
+                       },
+                       3 => {
+                               let length: BigSize = Readable::read(reader)?;
+                               let mut s = FixedLengthReader::new(reader, length.0);
+                               let res = Readable::read(&mut s)?;
+                               s.eat_remaining()?; // Return ShortRead if there's actually not enough bytes
+                               Ok(HTLCFailureMsg::Malformed(res))
+                       },
+                       _ => Err(DecodeError::UnknownRequiredFeature),
+               }
+       }
+}
+
 impl_writeable_tlv_based_enum!(PendingHTLCStatus, ;
        (0, Forward),
        (1, Fail),
@@ -4407,21 +5159,118 @@ impl_writeable_tlv_based!(HTLCPreviousHopData, {
        (6, incoming_packet_shared_secret, required)
 });
 
-impl_writeable_tlv_based!(ClaimableHTLC, {
-       (0, prev_hop, required),
-       (2, value, required),
-       (4, payment_data, required),
-       (6, cltv_expiry, required),
-});
+impl Writeable for ClaimableHTLC {
+       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
+               let payment_data = match &self.onion_payload {
+                       OnionPayload::Invoice(data) => Some(data.clone()),
+                       _ => None,
+               };
+               let keysend_preimage = match self.onion_payload {
+                       OnionPayload::Invoice(_) => None,
+                       OnionPayload::Spontaneous(preimage) => Some(preimage.clone()),
+               };
+               write_tlv_fields!
+               (writer,
+                {
+                  (0, self.prev_hop, required), (2, self.value, required),
+                  (4, payment_data, option), (6, self.cltv_expiry, required),
+                        (8, keysend_preimage, option),
+                });
+               Ok(())
+       }
+}
 
-impl_writeable_tlv_based_enum!(HTLCSource,
-       (0, OutboundRoute) => {
-               (0, session_priv, required),
-               (2, first_hop_htlc_msat, required),
-               (4, path, vec_type),
-       }, ;
-       (1, PreviousHopData)
-);
+impl Readable for ClaimableHTLC {
+       fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
+               let mut prev_hop = ::util::ser::OptionDeserWrapper(None);
+               let mut value = 0;
+               let mut payment_data: Option<msgs::FinalOnionHopData> = None;
+               let mut cltv_expiry = 0;
+               let mut keysend_preimage: Option<PaymentPreimage> = None;
+               read_tlv_fields!
+               (reader,
+                {
+                  (0, prev_hop, required), (2, value, required),
+                  (4, payment_data, option), (6, cltv_expiry, required),
+                        (8, keysend_preimage, option)
+                });
+               let onion_payload = match keysend_preimage {
+                       Some(p) => {
+                               if payment_data.is_some() {
+                                       return Err(DecodeError::InvalidValue)
+                               }
+                               OnionPayload::Spontaneous(p)
+                       },
+                       None => {
+                               if payment_data.is_none() {
+                                       return Err(DecodeError::InvalidValue)
+                               }
+                               OnionPayload::Invoice(payment_data.unwrap())
+                       },
+               };
+               Ok(Self {
+                       prev_hop: prev_hop.0.unwrap(),
+                       value,
+                       onion_payload,
+                       cltv_expiry,
+               })
+       }
+}
+
+impl Readable for HTLCSource {
+       fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
+               let id: u8 = Readable::read(reader)?;
+               match id {
+                       0 => {
+                               let mut session_priv: ::util::ser::OptionDeserWrapper<SecretKey> = ::util::ser::OptionDeserWrapper(None);
+                               let mut first_hop_htlc_msat: u64 = 0;
+                               let mut path = Some(Vec::new());
+                               let mut payment_id = None;
+                               read_tlv_fields!(reader, {
+                                       (0, session_priv, required),
+                                       (1, payment_id, option),
+                                       (2, first_hop_htlc_msat, required),
+                                       (4, path, vec_type),
+                               });
+                               if payment_id.is_none() {
+                                       // For backwards compat, if there was no payment_id written, use the session_priv bytes
+                                       // instead.
+                                       payment_id = Some(PaymentId(*session_priv.0.unwrap().as_ref()));
+                               }
+                               Ok(HTLCSource::OutboundRoute {
+                                       session_priv: session_priv.0.unwrap(),
+                                       first_hop_htlc_msat: first_hop_htlc_msat,
+                                       path: path.unwrap(),
+                                       payment_id: payment_id.unwrap(),
+                               })
+                       }
+                       1 => Ok(HTLCSource::PreviousHopData(Readable::read(reader)?)),
+                       _ => Err(DecodeError::UnknownRequiredFeature),
+               }
+       }
+}
+
+impl Writeable for HTLCSource {
+       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::io::Error> {
+               match self {
+                       HTLCSource::OutboundRoute { ref session_priv, ref first_hop_htlc_msat, ref path, payment_id } => {
+                               0u8.write(writer)?;
+                               let payment_id_opt = Some(payment_id);
+                               write_tlv_fields!(writer, {
+                                       (0, session_priv, required),
+                                       (1, payment_id_opt, option),
+                                       (2, first_hop_htlc_msat, required),
+                                       (4, path, vec_type),
+                                });
+                       }
+                       HTLCSource::PreviousHopData(ref field) => {
+                               1u8.write(writer)?;
+                               field.write(writer)?;
+                       }
+               }
+               Ok(())
+       }
+}
 
 impl_writeable_tlv_based_enum!(HTLCFailReason,
        (0, LightningError) => {
@@ -4454,6 +5303,20 @@ impl_writeable_tlv_based!(PendingInboundPayment, {
        (8, min_value_msat, required),
 });
 
+impl_writeable_tlv_based_enum!(PendingOutboundPayment,
+       (0, Legacy) => {
+               (0, session_privs, required),
+       },
+       (2, Retryable) => {
+               (0, session_privs, required),
+               (2, payment_hash, required),
+               (4, payment_secret, option),
+               (6, total_msat, required),
+               (8, pending_amt_msat, required),
+               (10, starting_block_height, required),
+       },
+;);
+
 impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable for ChannelManager<Signer, M, T, K, F, L>
        where M::Target: chain::Watch<Signer>,
         T::Target: BroadcasterInterface,
@@ -4461,7 +5324,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable f
         F::Target: FeeEstimator,
         L::Target: Logger,
 {
-       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
                let _consistency_lock = self.total_consistency_lock.write().unwrap();
 
                write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
@@ -4542,12 +5405,37 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable f
                }
 
                let pending_outbound_payments = self.pending_outbound_payments.lock().unwrap();
-               (pending_outbound_payments.len() as u64).write(writer)?;
-               for session_priv in pending_outbound_payments.iter() {
-                       session_priv.write(writer)?;
+               // For backwards compat, write the session privs and their total length.
+               let mut num_pending_outbounds_compat: u64 = 0;
+               for (_, outbound) in pending_outbound_payments.iter() {
+                       num_pending_outbounds_compat += outbound.remaining_parts() as u64;
+               }
+               num_pending_outbounds_compat.write(writer)?;
+               for (_, outbound) in pending_outbound_payments.iter() {
+                       match outbound {
+                               PendingOutboundPayment::Legacy { session_privs } |
+                               PendingOutboundPayment::Retryable { session_privs, .. } => {
+                                       for session_priv in session_privs.iter() {
+                                               session_priv.write(writer)?;
+                                       }
+                               }
+                       }
                }
 
-               write_tlv_fields!(writer, {});
+               // Encode without retry info for 0.0.101 compatibility.
+               let mut pending_outbound_payments_no_retry: HashMap<PaymentId, HashSet<[u8; 32]>> = HashMap::new();
+               for (id, outbound) in pending_outbound_payments.iter() {
+                       match outbound {
+                               PendingOutboundPayment::Legacy { session_privs } |
+                               PendingOutboundPayment::Retryable { session_privs, .. } => {
+                                       pending_outbound_payments_no_retry.insert(*id, session_privs.clone());
+                               }
+                       }
+               }
+               write_tlv_fields!(writer, {
+                       (1, pending_outbound_payments_no_retry, required),
+                       (3, pending_outbound_payments, required),
+               });
 
                Ok(())
        }
@@ -4657,7 +5545,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
         F::Target: FeeEstimator,
         L::Target: Logger,
 {
-       fn read<R: ::std::io::Read>(reader: &mut R, args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result<Self, DecodeError> {
+       fn read<R: io::Read>(reader: &mut R, args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result<Self, DecodeError> {
                let (blockhash, chan_manager) = <(BlockHash, ChannelManager<Signer, M, T, K, F, L>)>::read(reader, args)?;
                Ok((blockhash, Arc::new(chan_manager)))
        }
@@ -4671,7 +5559,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
         F::Target: FeeEstimator,
         L::Target: Logger,
 {
-       fn read<R: ::std::io::Read>(reader: &mut R, mut args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result<Self, DecodeError> {
+       fn read<R: io::Read>(reader: &mut R, mut args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result<Self, DecodeError> {
                let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
 
                let genesis_hash: BlockHash = Readable::read(reader)?;
@@ -4684,6 +5572,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128));
                let mut by_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
                let mut short_to_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
+               let mut channel_closures = Vec::new();
                for _ in 0..channel_count {
                        let mut channel: Channel<Signer> = Channel::read(reader, &args.keys_manager)?;
                        let funding_txo = channel.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
@@ -4707,9 +5596,17 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                                channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() ||
                                                channel.get_latest_monitor_update_id() < monitor.get_latest_update_id() {
                                        // But if the channel is behind of the monitor, close the channel:
+                                       log_error!(args.logger, "A ChannelManager is stale compared to the current ChannelMonitor!");
+                                       log_error!(args.logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast.");
+                                       log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
+                                               log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id());
                                        let (_, mut new_failed_htlcs) = channel.force_shutdown(true);
                                        failed_htlcs.append(&mut new_failed_htlcs);
                                        monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger);
+                                       channel_closures.push(events::Event::ChannelClosed {
+                                               channel_id: channel.channel_id(),
+                                               reason: ClosureReason::OutdatedChannelManager
+                                       });
                                } else {
                                        if let Some(short_channel_id) = channel.get_short_channel_id() {
                                                short_to_id.insert(short_channel_id, channel.channel_id());
@@ -4775,6 +5672,16 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                None => continue,
                        }
                }
+               if forward_htlcs_count > 0 {
+                       // If we have pending HTLCs to forward, assume we either dropped a
+                       // `PendingHTLCsForwardable` or the user received it but never processed it as they
+                       // shut down before the timer hit. Either way, set the time_forwardable to a small
+                       // constant as enough time has likely passed that we should simply handle the forwards
+                       // now, or at least after the user gets a chance to reconnect to our peers.
+                       pending_events_read.push(events::Event::PendingHTLCsForwardable {
+                               time_forwardable: Duration::from_secs(2),
+                       });
+               }
 
                let background_event_count: u64 = Readable::read(reader)?;
                let mut pending_background_events_read: Vec<BackgroundEvent> = Vec::with_capacity(cmp::min(background_event_count as usize, MAX_ALLOC_SIZE/mem::size_of::<BackgroundEvent>()));
@@ -4796,19 +5703,43 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                        }
                }
 
-               let pending_outbound_payments_count: u64 = Readable::read(reader)?;
-               let mut pending_outbound_payments: HashSet<[u8; 32]> = HashSet::with_capacity(cmp::min(pending_outbound_payments_count as usize, MAX_ALLOC_SIZE/32));
-               for _ in 0..pending_outbound_payments_count {
-                       if !pending_outbound_payments.insert(Readable::read(reader)?) {
-                               return Err(DecodeError::InvalidValue);
-                       }
+               let pending_outbound_payments_count_compat: u64 = Readable::read(reader)?;
+               let mut pending_outbound_payments_compat: HashMap<PaymentId, PendingOutboundPayment> =
+                       HashMap::with_capacity(cmp::min(pending_outbound_payments_count_compat as usize, MAX_ALLOC_SIZE/32));
+               for _ in 0..pending_outbound_payments_count_compat {
+                       let session_priv = Readable::read(reader)?;
+                       let payment = PendingOutboundPayment::Legacy {
+                               session_privs: [session_priv].iter().cloned().collect()
+                       };
+                       if pending_outbound_payments_compat.insert(PaymentId(session_priv), payment).is_some() {
+                               return Err(DecodeError::InvalidValue)
+                       };
                }
 
-               read_tlv_fields!(reader, {});
+               // pending_outbound_payments_no_retry is for compatibility with 0.0.101 clients.
+               let mut pending_outbound_payments_no_retry: Option<HashMap<PaymentId, HashSet<[u8; 32]>>> = None;
+               let mut pending_outbound_payments = None;
+               read_tlv_fields!(reader, {
+                       (1, pending_outbound_payments_no_retry, option),
+                       (3, pending_outbound_payments, option),
+               });
+               if pending_outbound_payments.is_none() && pending_outbound_payments_no_retry.is_none() {
+                       pending_outbound_payments = Some(pending_outbound_payments_compat);
+               } else if pending_outbound_payments.is_none() {
+                       let mut outbounds = HashMap::new();
+                       for (id, session_privs) in pending_outbound_payments_no_retry.unwrap().drain() {
+                               outbounds.insert(id, PendingOutboundPayment::Legacy { session_privs });
+                       }
+                       pending_outbound_payments = Some(outbounds);
+               }
 
                let mut secp_ctx = Secp256k1::new();
                secp_ctx.seeded_randomize(&args.keys_manager.get_secure_random_bytes());
 
+               if !channel_closures.is_empty() {
+                       pending_events_read.append(&mut channel_closures);
+               }
+
                let channel_manager = ChannelManager {
                        genesis_hash,
                        fee_estimator: args.fee_estimator,
@@ -4825,7 +5756,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                pending_msg_events: Vec::new(),
                        }),
                        pending_inbound_payments: Mutex::new(pending_inbound_payments),
-                       pending_outbound_payments: Mutex::new(pending_outbound_payments),
+                       pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()),
 
                        our_network_key: args.keys_manager.get_node_secret(),
                        our_network_pubkey: PublicKey::from_secret_key(&secp_ctx, &args.keys_manager.get_node_secret()),
@@ -4859,17 +5790,28 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
 
 #[cfg(test)]
 mod tests {
-       use ln::channelmanager::PersistenceNotifier;
-       use std::sync::Arc;
-       use core::sync::atomic::{AtomicBool, Ordering};
-       use std::thread;
+       use bitcoin::hashes::Hash;
+       use bitcoin::hashes::sha256::Hash as Sha256;
        use core::time::Duration;
+       use ln::{PaymentPreimage, PaymentHash, PaymentSecret};
+       use ln::channelmanager::{PaymentId, PaymentSendFailure};
+       use ln::features::{InitFeatures, InvoiceFeatures};
        use ln::functional_test_utils::*;
-       use ln::features::InitFeatures;
+       use ln::msgs;
        use ln::msgs::ChannelMessageHandler;
+       use routing::router::{get_keysend_route, get_route};
+       use util::errors::APIError;
+       use util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
+       use util::test_utils;
 
+       #[cfg(feature = "std")]
        #[test]
        fn test_wait_timeout() {
+               use ln::channelmanager::PersistenceNotifier;
+               use sync::Arc;
+               use core::sync::atomic::{AtomicBool, Ordering};
+               use std::thread;
+
                let persistence_notifier = Arc::new(PersistenceNotifier::new());
                let thread_notifier = Arc::clone(&persistence_notifier);
 
@@ -4919,6 +5861,12 @@ mod tests {
                let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
                let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
 
+               // All nodes start with a persistable update pending as `create_network` connects each node
+               // with all other nodes to make most tests simpler.
+               assert!(nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
+               assert!(nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
+               assert!(nodes[2].node.await_persistable_update_timeout(Duration::from_millis(1)));
+
                let mut chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
 
                // We check that the channel info nodes have doesn't change too early, even though we try
@@ -4955,6 +5903,320 @@ mod tests {
                // At this point the channel info given by peers should still be the same.
                assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
                assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info);
+
+               // An earlier version of handle_channel_update didn't check the directionality of the
+               // update message and would always update the local fee info, even if our peer was
+               // (spuriously) forwarding us our own channel_update.
+               let as_node_one = nodes[0].node.get_our_node_id().serialize()[..] < nodes[1].node.get_our_node_id().serialize()[..];
+               let as_update = if as_node_one == (chan.0.contents.flags & 1 == 0 /* chan.0 is from node one */) { &chan.0 } else { &chan.1 };
+               let bs_update = if as_node_one == (chan.0.contents.flags & 1 == 0 /* chan.0 is from node one */) { &chan.1 } else { &chan.0 };
+
+               // First deliver each peers' own message, checking that the node doesn't need to be
+               // persisted and that its channel info remains the same.
+               nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &as_update);
+               nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &bs_update);
+               assert!(!nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
+               assert!(!nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
+               assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
+               assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info);
+
+               // Finally, deliver the other peers' message, ensuring each node needs to be persisted and
+               // the channel info has updated.
+               nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &bs_update);
+               nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &as_update);
+               assert!(nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
+               assert!(nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
+               assert_ne!(nodes[0].node.list_channels()[0], node_a_chan_info);
+               assert_ne!(nodes[1].node.list_channels()[0], node_b_chan_info);
+       }
+
+       #[test]
+       fn test_keysend_dup_hash_partial_mpp() {
+               // Test that a keysend payment with a duplicate hash to an existing partial MPP payment fails as
+               // expected.
+               let chanmon_cfgs = create_chanmon_cfgs(2);
+               let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+               let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+               create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+               let logger = test_utils::TestLogger::new();
+
+               // First, send a partial MPP payment.
+               let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
+               let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap();
+               let (payment_preimage, our_payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[1]);
+               let payment_id = PaymentId([42; 32]);
+               // Use the utility function send_payment_along_path to send the payment with MPP data which
+               // indicates there are more HTLCs coming.
+               let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match.
+               nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None).unwrap();
+               check_added_monitors!(nodes[0], 1);
+               let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(events.len(), 1);
+               pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None);
+
+               // Next, send a keysend payment with the same payment_hash and make sure it fails.
+               nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap();
+               check_added_monitors!(nodes[0], 1);
+               let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(events.len(), 1);
+               let ev = events.drain(..).next().unwrap();
+               let payment_event = SendEvent::from_event(ev);
+               nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
+               check_added_monitors!(nodes[1], 0);
+               commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
+               expect_pending_htlcs_forwardable!(nodes[1]);
+               expect_pending_htlcs_forwardable!(nodes[1]);
+               check_added_monitors!(nodes[1], 1);
+               let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+               assert!(updates.update_add_htlcs.is_empty());
+               assert!(updates.update_fulfill_htlcs.is_empty());
+               assert_eq!(updates.update_fail_htlcs.len(), 1);
+               assert!(updates.update_fail_malformed_htlcs.is_empty());
+               assert!(updates.update_fee.is_none());
+               nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
+               commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
+               expect_payment_failed!(nodes[0], our_payment_hash, true);
+
+               // Send the second half of the original MPP payment.
+               nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None).unwrap();
+               check_added_monitors!(nodes[0], 1);
+               let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(events.len(), 1);
+               pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), true, None);
+
+               // Claim the full MPP payment. Note that we can't use a test utility like
+               // claim_funds_along_route because the ordering of the messages causes the second half of the
+               // payment to be put in the holding cell, which confuses the test utilities. So we exchange the
+               // lightning messages manually.
+               assert!(nodes[1].node.claim_funds(payment_preimage));
+               check_added_monitors!(nodes[1], 2);
+               let bs_first_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+               nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_first_updates.update_fulfill_htlcs[0]);
+               nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_first_updates.commitment_signed);
+               check_added_monitors!(nodes[0], 1);
+               let (as_first_raa, as_first_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+               nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_first_raa);
+               check_added_monitors!(nodes[1], 1);
+               let bs_second_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+               nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_first_cs);
+               check_added_monitors!(nodes[1], 1);
+               let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
+               nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_second_updates.update_fulfill_htlcs[0]);
+               nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_updates.commitment_signed);
+               check_added_monitors!(nodes[0], 1);
+               let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+               nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa);
+               let as_second_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+               check_added_monitors!(nodes[0], 1);
+               nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
+               check_added_monitors!(nodes[1], 1);
+               nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_updates.commitment_signed);
+               check_added_monitors!(nodes[1], 1);
+               let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
+               nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_third_raa);
+               check_added_monitors!(nodes[0], 1);
+
+               // Note that successful MPP payments will generate 1 event upon the first path's success. No
+               // further events will be generated for subsequence path successes.
+               let events = nodes[0].node.get_and_clear_pending_events();
+               match events[0] {
+                       Event::PaymentSent { payment_preimage: ref preimage, payment_hash: ref hash } => {
+                               assert_eq!(payment_preimage, *preimage);
+                               assert_eq!(our_payment_hash, *hash);
+                       },
+                       _ => panic!("Unexpected event"),
+               }
+       }
+
+       #[test]
+       fn test_keysend_dup_payment_hash() {
+               // (1): Test that a keysend payment with a duplicate payment hash to an existing pending
+               //      outbound regular payment fails as expected.
+               // (2): Test that a regular payment with a duplicate payment hash to an existing keysend payment
+               //      fails as expected.
+               let chanmon_cfgs = create_chanmon_cfgs(2);
+               let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+               let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+               create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+               let logger = test_utils::TestLogger::new();
+
+               // To start (1), send a regular payment but don't claim it.
+               let expected_route = [&nodes[1]];
+               let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &expected_route, 100_000);
+
+               // Next, attempt a keysend payment and make sure it fails.
+               let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph, &expected_route.last().unwrap().node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap();
+               nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap();
+               check_added_monitors!(nodes[0], 1);
+               let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(events.len(), 1);
+               let ev = events.drain(..).next().unwrap();
+               let payment_event = SendEvent::from_event(ev);
+               nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
+               check_added_monitors!(nodes[1], 0);
+               commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
+               expect_pending_htlcs_forwardable!(nodes[1]);
+               expect_pending_htlcs_forwardable!(nodes[1]);
+               check_added_monitors!(nodes[1], 1);
+               let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+               assert!(updates.update_add_htlcs.is_empty());
+               assert!(updates.update_fulfill_htlcs.is_empty());
+               assert_eq!(updates.update_fail_htlcs.len(), 1);
+               assert!(updates.update_fail_malformed_htlcs.is_empty());
+               assert!(updates.update_fee.is_none());
+               nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
+               commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
+               expect_payment_failed!(nodes[0], payment_hash, true);
+
+               // Finally, claim the original payment.
+               claim_payment(&nodes[0], &expected_route, payment_preimage);
+
+               // To start (2), send a keysend payment but don't claim it.
+               let payment_preimage = PaymentPreimage([42; 32]);
+               let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph, &expected_route.last().unwrap().node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap();
+               let (payment_hash, _) = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap();
+               check_added_monitors!(nodes[0], 1);
+               let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(events.len(), 1);
+               let event = events.pop().unwrap();
+               let path = vec![&nodes[1]];
+               pass_along_path(&nodes[0], &path, 100_000, payment_hash, None, event, true, Some(payment_preimage));
+
+               // Next, attempt a regular payment and make sure it fails.
+               let payment_secret = PaymentSecret([43; 32]);
+               nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
+               check_added_monitors!(nodes[0], 1);
+               let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(events.len(), 1);
+               let ev = events.drain(..).next().unwrap();
+               let payment_event = SendEvent::from_event(ev);
+               nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
+               check_added_monitors!(nodes[1], 0);
+               commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
+               expect_pending_htlcs_forwardable!(nodes[1]);
+               expect_pending_htlcs_forwardable!(nodes[1]);
+               check_added_monitors!(nodes[1], 1);
+               let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+               assert!(updates.update_add_htlcs.is_empty());
+               assert!(updates.update_fulfill_htlcs.is_empty());
+               assert_eq!(updates.update_fail_htlcs.len(), 1);
+               assert!(updates.update_fail_malformed_htlcs.is_empty());
+               assert!(updates.update_fee.is_none());
+               nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
+               commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
+               expect_payment_failed!(nodes[0], payment_hash, true);
+
+               // Finally, succeed the keysend payment.
+               claim_payment(&nodes[0], &expected_route, payment_preimage);
+       }
+
+       #[test]
+       fn test_keysend_hash_mismatch() {
+               // Test that if we receive a keysend `update_add_htlc` msg, we fail as expected if the keysend
+               // preimage doesn't match the msg's payment hash.
+               let chanmon_cfgs = create_chanmon_cfgs(2);
+               let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+               let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+               let payer_pubkey = nodes[0].node.get_our_node_id();
+               let payee_pubkey = nodes[1].node.get_our_node_id();
+               nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known() });
+               nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known() });
+
+               let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known());
+               let network_graph = &nodes[0].net_graph_msg_handler.network_graph;
+               let first_hops = nodes[0].node.list_usable_channels();
+               let route = get_keysend_route(&payer_pubkey, network_graph, &payee_pubkey,
+                                  Some(&first_hops.iter().collect::<Vec<_>>()), &vec![], 10000, 40,
+                                  nodes[0].logger).unwrap();
+
+               let test_preimage = PaymentPreimage([42; 32]);
+               let mismatch_payment_hash = PaymentHash([43; 32]);
+               let _ = nodes[0].node.send_payment_internal(&route, mismatch_payment_hash, &None, Some(test_preimage), None, None).unwrap();
+               check_added_monitors!(nodes[0], 1);
+
+               let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+               assert_eq!(updates.update_add_htlcs.len(), 1);
+               assert!(updates.update_fulfill_htlcs.is_empty());
+               assert!(updates.update_fail_htlcs.is_empty());
+               assert!(updates.update_fail_malformed_htlcs.is_empty());
+               assert!(updates.update_fee.is_none());
+               nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
+
+               nodes[1].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Payment preimage didn't match payment hash".to_string(), 1);
+       }
+
+       #[test]
+       fn test_keysend_msg_with_secret_err() {
+               // Test that we error as expected if we receive a keysend payment that includes a payment secret.
+               let chanmon_cfgs = create_chanmon_cfgs(2);
+               let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+               let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+               let payer_pubkey = nodes[0].node.get_our_node_id();
+               let payee_pubkey = nodes[1].node.get_our_node_id();
+               nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known() });
+               nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known() });
+
+               let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known());
+               let network_graph = &nodes[0].net_graph_msg_handler.network_graph;
+               let first_hops = nodes[0].node.list_usable_channels();
+               let route = get_keysend_route(&payer_pubkey, network_graph, &payee_pubkey,
+                                  Some(&first_hops.iter().collect::<Vec<_>>()), &vec![], 10000, 40,
+                                  nodes[0].logger).unwrap();
+
+               let test_preimage = PaymentPreimage([42; 32]);
+               let test_secret = PaymentSecret([43; 32]);
+               let payment_hash = PaymentHash(Sha256::hash(&test_preimage.0).into_inner());
+               let _ = nodes[0].node.send_payment_internal(&route, payment_hash, &Some(test_secret), Some(test_preimage), None, None).unwrap();
+               check_added_monitors!(nodes[0], 1);
+
+               let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+               assert_eq!(updates.update_add_htlcs.len(), 1);
+               assert!(updates.update_fulfill_htlcs.is_empty());
+               assert!(updates.update_fail_htlcs.is_empty());
+               assert!(updates.update_fail_malformed_htlcs.is_empty());
+               assert!(updates.update_fee.is_none());
+               nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
+
+               nodes[1].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "We don't support MPP keysend payments".to_string(), 1);
+       }
+
+       #[test]
+       fn test_multi_hop_missing_secret() {
+               let chanmon_cfgs = create_chanmon_cfgs(4);
+               let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+               let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
+               let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+
+               let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+               let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+               let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+               let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+               let logger = test_utils::TestLogger::new();
+
+               // Marshall an MPP route.
+               let (_, payment_hash, _) = get_payment_preimage_hash!(&nodes[3]);
+               let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
+               let mut route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[3].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap();
+               let path = route.paths[0].clone();
+               route.paths.push(path);
+               route.paths[0][0].pubkey = nodes[1].node.get_our_node_id();
+               route.paths[0][0].short_channel_id = chan_1_id;
+               route.paths[0][1].short_channel_id = chan_3_id;
+               route.paths[1][0].pubkey = nodes[2].node.get_our_node_id();
+               route.paths[1][0].short_channel_id = chan_2_id;
+               route.paths[1][1].short_channel_id = chan_4_id;
+
+               match nodes[0].node.send_payment(&route, payment_hash, &None).unwrap_err() {
+                       PaymentSendFailure::ParameterError(APIError::APIMisuseError { ref err }) => {
+                               assert!(regex::Regex::new(r"Payment secret is required for multi-path payments").unwrap().is_match(err))                        },
+                       _ => panic!("unexpected error")
+               }
        }
 }
 
@@ -4967,18 +6229,18 @@ pub mod bench {
        use ln::channelmanager::{BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage};
        use ln::features::{InitFeatures, InvoiceFeatures};
        use ln::functional_test_utils::*;
-       use ln::msgs::ChannelMessageHandler;
+       use ln::msgs::{ChannelMessageHandler, Init};
        use routing::network_graph::NetworkGraph;
        use routing::router::get_route;
        use util::test_utils;
        use util::config::UserConfig;
-       use util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
+       use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose};
 
        use bitcoin::hashes::Hash;
        use bitcoin::hashes::sha256::Hash as Sha256;
        use bitcoin::{Block, BlockHeader, Transaction, TxOut};
 
-       use std::sync::{Arc, Mutex};
+       use sync::{Arc, Mutex};
 
        use test::Bencher;
 
@@ -5005,7 +6267,7 @@ pub mod bench {
                let genesis_hash = bitcoin::blockdata::constants::genesis_block(network).header.block_hash();
 
                let tx_broadcaster = test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new()), blocks: Arc::new(Mutex::new(Vec::new()))};
-               let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 };
+               let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
 
                let mut config: UserConfig = Default::default();
                config.own_channel_config.minimum_depth = 1;
@@ -5030,6 +6292,8 @@ pub mod bench {
                });
                let node_b_holder = NodeHolder { node: &node_b };
 
+               node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: InitFeatures::known() });
+               node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: InitFeatures::known() });
                node_a.create_channel(node_b.get_our_node_id(), 8_000_000, 100_000_000, 42, None).unwrap();
                node_b.handle_open_channel(&node_a.get_our_node_id(), InitFeatures::known(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id()));
                node_a.handle_accept_channel(&node_b.get_our_node_id(), InitFeatures::known(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id()));
@@ -5055,7 +6319,19 @@ pub mod bench {
                Listen::block_connected(&node_b, &block, 1);
 
                node_a.handle_funding_locked(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendFundingLocked, node_a.get_our_node_id()));
-               node_b.handle_funding_locked(&node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendFundingLocked, node_b.get_our_node_id()));
+               let msg_events = node_a.get_and_clear_pending_msg_events();
+               assert_eq!(msg_events.len(), 2);
+               match msg_events[0] {
+                       MessageSendEvent::SendFundingLocked { ref msg, .. } => {
+                               node_b.handle_funding_locked(&node_a.get_our_node_id(), msg);
+                               get_event_msg!(node_b_holder, MessageSendEvent::SendChannelUpdate, node_a.get_our_node_id());
+                       },
+                       _ => panic!(),
+               }
+               match msg_events[1] {
+                       MessageSendEvent::SendChannelUpdate { .. } => {},
+                       _ => panic!(),
+               }
 
                let dummy_graph = NetworkGraph::new(genesis_hash);