Fix no_std warnings due to unused includes
[rust-lightning] / lightning / src / ln / channelmanager.rs
index 52bd8ad1cdbd82fdba046839f3497c49c8a775f2..224581595442f2954b2ad698957b8433e7df0565 100644 (file)
@@ -36,8 +36,7 @@ use bitcoin::secp256k1::ecdh::SharedSecret;
 use bitcoin::secp256k1;
 
 use chain;
-use chain::Confirm;
-use chain::Watch;
+use chain::{Confirm, Watch, BestBlock};
 use chain::chaininterface::{BroadcasterInterface, FeeEstimator};
 use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, ChannelMonitorUpdateErr, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID};
 use chain::transaction::{OutPoint, TransactionData};
@@ -45,7 +44,7 @@ use chain::transaction::{OutPoint, TransactionData};
 // construct one themselves.
 use ln::{PaymentHash, PaymentPreimage, PaymentSecret};
 pub use ln::channel::CounterpartyForwardingInfo;
-use ln::channel::{Channel, ChannelError, ChannelUpdateStatus};
+use ln::channel::{Channel, ChannelError, ChannelUpdateStatus, UpdateFulfillCommitFetch};
 use ln::features::{InitFeatures, NodeFeatures};
 use routing::router::{Route, RouteHop};
 use ln::msgs;
@@ -54,22 +53,23 @@ use ln::onion_utils;
 use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, OptionalField};
 use chain::keysinterface::{Sign, KeysInterface, KeysManager, InMemorySigner};
 use util::config::UserConfig;
-use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
+use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
 use util::{byte_utils, events};
 use util::ser::{Readable, ReadableArgs, MaybeReadable, Writeable, Writer};
 use util::chacha20::{ChaCha20, ChaChaReader};
-use util::logger::Logger;
+use util::logger::{Logger, Level};
 use util::errors::APIError;
 
-use std::{cmp, mem};
-use std::collections::{HashMap, hash_map, HashSet};
+use prelude::*;
+use core::{cmp, mem};
+use core::cell::RefCell;
 use std::io::{Cursor, Read};
-use std::sync::{Arc, Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard};
-use std::sync::atomic::{AtomicUsize, Ordering};
-use std::time::Duration;
+use sync::{Arc, Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard};
+use core::sync::atomic::{AtomicUsize, Ordering};
+use core::time::Duration;
 #[cfg(any(test, feature = "allow_wallclock_use"))]
 use std::time::Instant;
-use std::ops::Deref;
+use core::ops::Deref;
 use bitcoin::hashes::hex::ToHex;
 
 // We hold various information about HTLC relay in the HTLC objects in Channel itself:
@@ -99,6 +99,10 @@ enum PendingHTLCRouting {
                payment_data: msgs::FinalOnionHopData,
                incoming_cltv_expiry: u32, // Used to track when we should expire pending HTLCs that go unclaimed
        },
+       ReceiveKeysend {
+               payment_preimage: PaymentPreimage,
+               incoming_cltv_expiry: u32, // Used to track when we should expire pending HTLCs that go unclaimed
+       },
 }
 
 #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
@@ -153,14 +157,20 @@ pub(crate) struct HTLCPreviousHopData {
        outpoint: OutPoint,
 }
 
-struct ClaimableHTLC {
-       prev_hop: HTLCPreviousHopData,
-       value: u64,
+enum OnionPayload {
        /// Contains a total_msat (which may differ from value if this is a Multi-Path Payment) and a
        /// payment_secret which prevents path-probing attacks and can associate different HTLCs which
        /// are part of the same payment.
-       payment_data: msgs::FinalOnionHopData,
+       Invoice(msgs::FinalOnionHopData),
+       /// Contains the payer-provided preimage.
+       Spontaneous(PaymentPreimage),
+}
+
+struct ClaimableHTLC {
+       prev_hop: HTLCPreviousHopData,
        cltv_expiry: u32,
+       value: u64,
+       onion_payload: OnionPayload,
 }
 
 /// Tracks the inbound corresponding to an outbound HTLC
@@ -496,6 +506,7 @@ pub struct ChannelManager<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref,
 /// Typically, the block-specific parameters are derived from the best block hash for the network,
 /// as a newly constructed `ChannelManager` will not have created any channels yet. These parameters
 /// are not needed when deserializing a previously constructed `ChannelManager`.
+#[derive(Clone, Copy, PartialEq)]
 pub struct ChainParameters {
        /// The network for determining the `chain_hash` in Lightning messages.
        pub network: Network,
@@ -506,34 +517,6 @@ pub struct ChainParameters {
        pub best_block: BestBlock,
 }
 
-/// The best known block as identified by its hash and height.
-#[derive(Clone, Copy)]
-pub struct BestBlock {
-       block_hash: BlockHash,
-       height: u32,
-}
-
-impl BestBlock {
-       /// Returns the best block from the genesis of the given network.
-       pub fn from_genesis(network: Network) -> Self {
-               BestBlock {
-                       block_hash: genesis_block(network).header.block_hash(),
-                       height: 0,
-               }
-       }
-
-       /// Returns the best block as identified by the given block hash and height.
-       pub fn new(block_hash: BlockHash, height: u32) -> Self {
-               BestBlock { block_hash, height }
-       }
-
-       /// Returns the best block hash.
-       pub fn block_hash(&self) -> BlockHash { self.block_hash }
-
-       /// Returns the best block height.
-       pub fn height(&self) -> u32 { self.height }
-}
-
 #[derive(Copy, Clone, PartialEq)]
 enum NotifyOption {
        DoPersist,
@@ -624,19 +607,44 @@ pub const MIN_FINAL_CLTV_EXPIRY: u32 = HTLC_FAIL_BACK_BUFFER + 3;
 const CHECK_CLTV_EXPIRY_SANITY: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - CLTV_CLAIM_BUFFER - ANTI_REORG_DELAY - LATENCY_GRACE_PERIOD_BLOCKS;
 
 // Check for ability of an attacker to make us fail on-chain by delaying an HTLC claim. See
-// ChannelMontior::would_broadcast_at_height for a description of why this is needed.
+// ChannelMonitor::should_broadcast_holder_commitment_txn for a description of why this is needed.
 #[deny(const_err)]
 #[allow(dead_code)]
 const CHECK_CLTV_EXPIRY_SANITY_2: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER;
 
+/// Channel parameters which apply to our counterparty. These are split out from [`ChannelDetails`]
+/// to better separate parameters.
+#[derive(Clone, Debug, PartialEq)]
+pub struct ChannelCounterparty {
+       /// The node_id of our counterparty
+       pub node_id: PublicKey,
+       /// The Features the channel counterparty provided upon last connection.
+       /// Useful for routing as it is the most up-to-date copy of the counterparty's features and
+       /// many routing-relevant features are present in the init context.
+       pub features: InitFeatures,
+       /// The value, in satoshis, that must always be held in the channel for our counterparty. This
+       /// value ensures that if our counterparty broadcasts a revoked state, we can punish them by
+       /// claiming at least this value on chain.
+       ///
+       /// This value is not included in [`inbound_capacity_msat`] as it can never be spent.
+       ///
+       /// [`inbound_capacity_msat`]: ChannelDetails::inbound_capacity_msat
+       pub unspendable_punishment_reserve: u64,
+       /// Information on the fees and requirements that the counterparty requires when forwarding
+       /// payments to us through this channel.
+       pub forwarding_info: Option<CounterpartyForwardingInfo>,
+}
+
 /// Details of a channel, as returned by ChannelManager::list_channels and ChannelManager::list_usable_channels
-#[derive(Clone)]
+#[derive(Clone, Debug, PartialEq)]
 pub struct ChannelDetails {
        /// The channel's ID (prior to funding transaction generation, this is a random 32 bytes,
        /// thereafter this is the txid of the funding transaction xor the funding transaction output).
        /// Note that this means this value is *not* persistent - it can change once during the
        /// lifetime of the channel.
        pub channel_id: [u8; 32],
+       /// Parameters which apply to our counterparty. See individual fields for more information.
+       pub counterparty: ChannelCounterparty,
        /// The Channel's funding transaction output, if we've negotiated the funding transaction with
        /// our counterparty already.
        ///
@@ -646,45 +654,76 @@ pub struct ChannelDetails {
        /// The position of the funding transaction in the chain. None if the funding transaction has
        /// not yet been confirmed and the channel fully opened.
        pub short_channel_id: Option<u64>,
-       /// The node_id of our counterparty
-       pub remote_network_id: PublicKey,
-       /// The Features the channel counterparty provided upon last connection.
-       /// Useful for routing as it is the most up-to-date copy of the counterparty's features and
-       /// many routing-relevant features are present in the init context.
-       pub counterparty_features: InitFeatures,
        /// The value, in satoshis, of this channel as appears in the funding output
        pub channel_value_satoshis: u64,
+       /// The value, in satoshis, that must always be held in the channel for us. This value ensures
+       /// that if we broadcast a revoked state, our counterparty can punish us by claiming at least
+       /// this value on chain.
+       ///
+       /// This value is not included in [`outbound_capacity_msat`] as it can never be spent.
+       ///
+       /// This value will be `None` for outbound channels until the counterparty accepts the channel.
+       ///
+       /// [`outbound_capacity_msat`]: ChannelDetails::outbound_capacity_msat
+       pub unspendable_punishment_reserve: Option<u64>,
        /// The user_id passed in to create_channel, or 0 if the channel was inbound.
        pub user_id: u64,
        /// The available outbound capacity for sending HTLCs to the remote peer. This does not include
        /// any pending HTLCs which are not yet fully resolved (and, thus, who's balance is not
        /// available for inclusion in new outbound HTLCs). This further does not include any pending
        /// outgoing HTLCs which are awaiting some other resolution to be sent.
+       ///
+       /// This value is not exact. Due to various in-flight changes, feerate changes, and our
+       /// conflict-avoidance policy, exactly this amount is not likely to be spendable. However, we
+       /// should be able to spend nearly this amount.
        pub outbound_capacity_msat: u64,
        /// The available inbound capacity for the remote peer to send HTLCs to us. This does not
        /// include any pending HTLCs which are not yet fully resolved (and, thus, who's balance is not
        /// available for inclusion in new inbound HTLCs).
        /// Note that there are some corner cases not fully handled here, so the actual available
        /// inbound capacity may be slightly higher than this.
+       ///
+       /// This value is not exact. Due to various in-flight changes, feerate changes, and our
+       /// counterparty's conflict-avoidance policy, exactly this amount is not likely to be spendable.
+       /// However, our counterparty should be able to spend nearly this amount.
        pub inbound_capacity_msat: u64,
+       /// The number of required confirmations on the funding transaction before the funding will be
+       /// considered "locked". This number is selected by the channel fundee (i.e. us if
+       /// [`is_outbound`] is *not* set), and can be selected for inbound channels with
+       /// [`ChannelHandshakeConfig::minimum_depth`] or limited for outbound channels with
+       /// [`ChannelHandshakeLimits::max_minimum_depth`].
+       ///
+       /// This value will be `None` for outbound channels until the counterparty accepts the channel.
+       ///
+       /// [`is_outbound`]: ChannelDetails::is_outbound
+       /// [`ChannelHandshakeConfig::minimum_depth`]: crate::util::config::ChannelHandshakeConfig::minimum_depth
+       /// [`ChannelHandshakeLimits::max_minimum_depth`]: crate::util::config::ChannelHandshakeLimits::max_minimum_depth
+       pub confirmations_required: Option<u32>,
+       /// The number of blocks (after our commitment transaction confirms) that we will need to wait
+       /// until we can claim our funds after we force-close the channel. During this time our
+       /// counterparty is allowed to punish us if we broadcasted a stale state. If our counterparty
+       /// force-closes the channel and broadcasts a commitment transaction we do not have to wait any
+       /// time to claim our non-HTLC-encumbered funds.
+       ///
+       /// This value will be `None` for outbound channels until the counterparty accepts the channel.
+       pub force_close_spend_delay: Option<u16>,
        /// True if the channel was initiated (and thus funded) by us.
        pub is_outbound: bool,
        /// True if the channel is confirmed, funding_locked messages have been exchanged, and the
        /// channel is not currently being shut down. `funding_locked` message exchange implies the
        /// required confirmation count has been reached (and we were connected to the peer at some
-       /// point after the funding transaction received enough confirmations).
+       /// point after the funding transaction received enough confirmations). The required
+       /// confirmation count is provided in [`confirmations_required`].
+       ///
+       /// [`confirmations_required`]: ChannelDetails::confirmations_required
        pub is_funding_locked: bool,
        /// True if the channel is (a) confirmed and funding_locked messages have been exchanged, (b)
-       /// the peer is connected, (c) no monitor update failure is pending resolution, and (d) the
-       /// channel is not currently negotiating a shutdown.
+       /// the peer is connected, and (c) the channel is not currently negotiating a shutdown.
        ///
        /// This is a strict superset of `is_funding_locked`.
        pub is_usable: bool,
        /// True if this channel is (or will be) publicly-announced.
        pub is_public: bool,
-       /// Information on the fees and requirements that the counterparty requires when forwarding
-       /// payments to us through this channel.
-       pub counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
 }
 
 /// If a payment fails to send, it can be in one of several states. This enum is returned as the
@@ -766,22 +805,44 @@ macro_rules! handle_error {
        }
 }
 
+/// Returns (boolean indicating if we should remove the Channel object from memory, a mapped error)
+macro_rules! convert_chan_err {
+       ($self: ident, $err: expr, $short_to_id: expr, $channel: expr, $channel_id: expr) => {
+               match $err {
+                       ChannelError::Ignore(msg) => {
+                               (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id.clone()))
+                       },
+                       ChannelError::Close(msg) => {
+                               log_error!($self.logger, "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg);
+                               if let Some(short_id) = $channel.get_short_channel_id() {
+                                       $short_to_id.remove(&short_id);
+                               }
+                               let shutdown_res = $channel.force_shutdown(true);
+                               (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
+                       },
+                       ChannelError::CloseDelayBroadcast(msg) => {
+                               log_error!($self.logger, "Channel {} need to be shutdown but closing transactions not broadcast due to {}", log_bytes!($channel_id[..]), msg);
+                               if let Some(short_id) = $channel.get_short_channel_id() {
+                                       $short_to_id.remove(&short_id);
+                               }
+                               let shutdown_res = $channel.force_shutdown(false);
+                               (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
+                       }
+               }
+       }
+}
+
 macro_rules! break_chan_entry {
        ($self: ident, $res: expr, $channel_state: expr, $entry: expr) => {
                match $res {
                        Ok(res) => res,
-                       Err(ChannelError::Ignore(msg)) => {
-                               break Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $entry.key().clone()))
-                       },
-                       Err(ChannelError::Close(msg)) => {
-                               log_trace!($self.logger, "Closing channel {} due to Close-required error: {}", log_bytes!($entry.key()[..]), msg);
-                               let (channel_id, mut chan) = $entry.remove_entry();
-                               if let Some(short_id) = chan.get_short_channel_id() {
-                                       $channel_state.short_to_id.remove(&short_id);
+                       Err(e) => {
+                               let (drop, res) = convert_chan_err!($self, e, $channel_state.short_to_id, $entry.get_mut(), $entry.key());
+                               if drop {
+                                       $entry.remove_entry();
                                }
-                               break Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(true), $self.get_channel_update(&chan).ok()))
-                       },
-                       Err(ChannelError::CloseDelayBroadcast(_)) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); }
+                               break Err(res);
+                       }
                }
        }
 }
@@ -790,25 +851,12 @@ macro_rules! try_chan_entry {
        ($self: ident, $res: expr, $channel_state: expr, $entry: expr) => {
                match $res {
                        Ok(res) => res,
-                       Err(ChannelError::Ignore(msg)) => {
-                               return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $entry.key().clone()))
-                       },
-                       Err(ChannelError::Close(msg)) => {
-                               log_trace!($self.logger, "Closing channel {} due to Close-required error: {}", log_bytes!($entry.key()[..]), msg);
-                               let (channel_id, mut chan) = $entry.remove_entry();
-                               if let Some(short_id) = chan.get_short_channel_id() {
-                                       $channel_state.short_to_id.remove(&short_id);
-                               }
-                               return Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(true), $self.get_channel_update(&chan).ok()))
-                       },
-                       Err(ChannelError::CloseDelayBroadcast(msg)) => {
-                               log_error!($self.logger, "Channel {} need to be shutdown but closing transactions not broadcast due to {}", log_bytes!($entry.key()[..]), msg);
-                               let (channel_id, mut chan) = $entry.remove_entry();
-                               if let Some(short_id) = chan.get_short_channel_id() {
-                                       $channel_state.short_to_id.remove(&short_id);
+                       Err(e) => {
+                               let (drop, res) = convert_chan_err!($self, e, $channel_state.short_to_id, $entry.get_mut(), $entry.key());
+                               if drop {
+                                       $entry.remove_entry();
                                }
-                               let shutdown_res = chan.force_shutdown(false);
-                               return Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, shutdown_res, $self.get_channel_update(&chan).ok()))
+                               return Err(res);
                        }
                }
        }
@@ -818,13 +866,12 @@ macro_rules! handle_monitor_err {
        ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
                handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, Vec::new(), Vec::new())
        };
-       ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => {
+       ($self: ident, $err: expr, $short_to_id: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr, $chan_id: expr) => {
                match $err {
                        ChannelMonitorUpdateErr::PermanentFailure => {
-                               log_error!($self.logger, "Closing channel {} due to monitor update PermanentFailure", log_bytes!($entry.key()[..]));
-                               let (channel_id, mut chan) = $entry.remove_entry();
-                               if let Some(short_id) = chan.get_short_channel_id() {
-                                       $channel_state.short_to_id.remove(&short_id);
+                               log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateErr::PermanentFailure", log_bytes!($chan_id[..]));
+                               if let Some(short_id) = $chan.get_short_channel_id() {
+                                       $short_to_id.remove(&short_id);
                                }
                                // TODO: $failed_fails is dropped here, which will cause other channels to hit the
                                // chain in a confused state! We need to move them into the ChannelMonitor which
@@ -835,12 +882,13 @@ macro_rules! handle_monitor_err {
                                // splitting hairs we'd prefer to claim payments that were to us, but we haven't
                                // given up the preimage yet, so might as well just wait until the payment is
                                // retried, avoiding the on-chain fees.
-                               let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), channel_id, chan.force_shutdown(true), $self.get_channel_update(&chan).ok()));
-                               res
+                               let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), *$chan_id,
+                                               $chan.force_shutdown(true), $self.get_channel_update_for_broadcast(&$chan).ok() ));
+                               (res, true)
                        },
                        ChannelMonitorUpdateErr::TemporaryFailure => {
                                log_info!($self.logger, "Disabling channel {} due to monitor update TemporaryFailure. On restore will send {} and process {} forwards and {} fails",
-                                               log_bytes!($entry.key()[..]),
+                                               log_bytes!($chan_id[..]),
                                                if $resend_commitment && $resend_raa {
                                                                match $action_type {
                                                                        RAACommitmentOrder::CommitmentFirst => { "commitment then RAA" },
@@ -857,11 +905,18 @@ macro_rules! handle_monitor_err {
                                if !$resend_raa {
                                        debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst || !$resend_commitment);
                                }
-                               $entry.get_mut().monitor_update_failed($resend_raa, $resend_commitment, $failed_forwards, $failed_fails);
-                               Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor".to_owned()), *$entry.key()))
+                               $chan.monitor_update_failed($resend_raa, $resend_commitment, $failed_forwards, $failed_fails);
+                               (Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor".to_owned()), *$chan_id)), false)
                        },
                }
-       }
+       };
+       ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => { {
+               let (res, drop) = handle_monitor_err!($self, $err, $channel_state.short_to_id, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails, $entry.key());
+               if drop {
+                       $entry.remove_entry();
+               }
+               res
+       } };
 }
 
 macro_rules! return_monitor_err {
@@ -885,6 +940,133 @@ macro_rules! maybe_break_monitor_err {
        }
 }
 
+macro_rules! handle_chan_restoration_locked {
+       ($self: ident, $channel_lock: expr, $channel_state: expr, $channel_entry: expr,
+        $raa: expr, $commitment_update: expr, $order: expr, $chanmon_update: expr,
+        $pending_forwards: expr, $funding_broadcastable: expr, $funding_locked: expr) => { {
+               let mut htlc_forwards = None;
+               let counterparty_node_id = $channel_entry.get().get_counterparty_node_id();
+
+               let chanmon_update: Option<ChannelMonitorUpdate> = $chanmon_update; // Force type-checking to resolve
+               let chanmon_update_is_none = chanmon_update.is_none();
+               let res = loop {
+                       let forwards: Vec<(PendingHTLCInfo, u64)> = $pending_forwards; // Force type-checking to resolve
+                       if !forwards.is_empty() {
+                               htlc_forwards = Some(($channel_entry.get().get_short_channel_id().expect("We can't have pending forwards before funding confirmation"),
+                                       $channel_entry.get().get_funding_txo().unwrap(), forwards));
+                       }
+
+                       if chanmon_update.is_some() {
+                               // On reconnect, we, by definition, only resend a funding_locked if there have been
+                               // no commitment updates, so the only channel monitor update which could also be
+                               // associated with a funding_locked would be the funding_created/funding_signed
+                               // monitor update. That monitor update failing implies that we won't send
+                               // funding_locked until it's been updated, so we can't have a funding_locked and a
+                               // monitor update here (so we don't bother to handle it correctly below).
+                               assert!($funding_locked.is_none());
+                               // A channel monitor update makes no sense without either a funding_locked or a
+                               // commitment update to process after it. Since we can't have a funding_locked, we
+                               // only bother to handle the monitor-update + commitment_update case below.
+                               assert!($commitment_update.is_some());
+                       }
+
+                       if let Some(msg) = $funding_locked {
+                               // Similar to the above, this implies that we're letting the funding_locked fly
+                               // before it should be allowed to.
+                               assert!(chanmon_update.is_none());
+                               $channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingLocked {
+                                       node_id: counterparty_node_id,
+                                       msg,
+                               });
+                               if let Some(announcement_sigs) = $self.get_announcement_sigs($channel_entry.get()) {
+                                       $channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
+                                               node_id: counterparty_node_id,
+                                               msg: announcement_sigs,
+                                       });
+                               }
+                               $channel_state.short_to_id.insert($channel_entry.get().get_short_channel_id().unwrap(), $channel_entry.get().channel_id());
+                       }
+
+                       let funding_broadcastable: Option<Transaction> = $funding_broadcastable; // Force type-checking to resolve
+                       if let Some(monitor_update) = chanmon_update {
+                               // We only ever broadcast a funding transaction in response to a funding_signed
+                               // message and the resulting monitor update. Thus, on channel_reestablish
+                               // message handling we can't have a funding transaction to broadcast. When
+                               // processing a monitor update finishing resulting in a funding broadcast, we
+                               // cannot have a second monitor update, thus this case would indicate a bug.
+                               assert!(funding_broadcastable.is_none());
+                               // Given we were just reconnected or finished updating a channel monitor, the
+                               // only case where we can get a new ChannelMonitorUpdate would be if we also
+                               // have some commitment updates to send as well.
+                               assert!($commitment_update.is_some());
+                               if let Err(e) = $self.chain_monitor.update_channel($channel_entry.get().get_funding_txo().unwrap(), monitor_update) {
+                                       // channel_reestablish doesn't guarantee the order it returns is sensical
+                                       // for the messages it returns, but if we're setting what messages to
+                                       // re-transmit on monitor update success, we need to make sure it is sane.
+                                       let mut order = $order;
+                                       if $raa.is_none() {
+                                               order = RAACommitmentOrder::CommitmentFirst;
+                                       }
+                                       break handle_monitor_err!($self, e, $channel_state, $channel_entry, order, $raa.is_some(), true);
+                               }
+                       }
+
+                       macro_rules! handle_cs { () => {
+                               if let Some(update) = $commitment_update {
+                                       $channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+                                               node_id: counterparty_node_id,
+                                               updates: update,
+                                       });
+                               }
+                       } }
+                       macro_rules! handle_raa { () => {
+                               if let Some(revoke_and_ack) = $raa {
+                                       $channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
+                                               node_id: counterparty_node_id,
+                                               msg: revoke_and_ack,
+                                       });
+                               }
+                       } }
+                       match $order {
+                               RAACommitmentOrder::CommitmentFirst => {
+                                       handle_cs!();
+                                       handle_raa!();
+                               },
+                               RAACommitmentOrder::RevokeAndACKFirst => {
+                                       handle_raa!();
+                                       handle_cs!();
+                               },
+                       }
+                       if let Some(tx) = funding_broadcastable {
+                               log_info!($self.logger, "Broadcasting funding transaction with txid {}", tx.txid());
+                               $self.tx_broadcaster.broadcast_transaction(&tx);
+                       }
+                       break Ok(());
+               };
+
+               if chanmon_update_is_none {
+                       // If there was no ChannelMonitorUpdate, we should never generate an Err in the res loop
+                       // above. Doing so would imply calling handle_err!() from channel_monitor_updated() which
+                       // should *never* end up calling back to `chain_monitor.update_channel()`.
+                       assert!(res.is_ok());
+               }
+
+               (htlc_forwards, res, counterparty_node_id)
+       } }
+}
+
+macro_rules! post_handle_chan_restoration {
+       ($self: ident, $locked_res: expr) => { {
+               let (htlc_forwards, res, counterparty_node_id) = $locked_res;
+
+               let _ = handle_error!($self, res, counterparty_node_id);
+
+               if let Some(forwards) = htlc_forwards {
+                       $self.forward_htlcs(&mut [forwards][..]);
+               }
+       } }
+}
+
 impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<Signer, M, T, K, F, L>
        where M::Target: chain::Watch<Signer>,
         T::Target: BroadcasterInterface,
@@ -965,6 +1147,10 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        ///
        /// Raises APIError::APIMisuseError when channel_value_satoshis > 2**24 or push_msat is
        /// greater than channel_value_satoshis * 1k or channel_value_satoshis is < 1000.
+       ///
+       /// Note that we do not check if you are currently connected to the given peer. If no
+       /// connection is available, the outbound `open_channel` message may fail to send, resulting in
+       /// the channel eventually being silently forgotten.
        pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_id: u64, override_config: Option<UserConfig>) -> Result<(), APIError> {
                if channel_value_satoshis < 1000 {
                        return Err(APIError::APIMisuseError { err: format!("Channel value must be at least 1000 satoshis. It was {}", channel_value_satoshis) });
@@ -1003,28 +1189,36 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        res.reserve(channel_state.by_id.len());
                        for (channel_id, channel) in channel_state.by_id.iter().filter(f) {
                                let (inbound_capacity_msat, outbound_capacity_msat) = channel.get_inbound_outbound_available_balance_msat();
+                               let (to_remote_reserve_satoshis, to_self_reserve_satoshis) =
+                                       channel.get_holder_counterparty_selected_channel_reserve_satoshis();
                                res.push(ChannelDetails {
                                        channel_id: (*channel_id).clone(),
+                                       counterparty: ChannelCounterparty {
+                                               node_id: channel.get_counterparty_node_id(),
+                                               features: InitFeatures::empty(),
+                                               unspendable_punishment_reserve: to_remote_reserve_satoshis,
+                                               forwarding_info: channel.counterparty_forwarding_info(),
+                                       },
                                        funding_txo: channel.get_funding_txo(),
                                        short_channel_id: channel.get_short_channel_id(),
-                                       remote_network_id: channel.get_counterparty_node_id(),
-                                       counterparty_features: InitFeatures::empty(),
                                        channel_value_satoshis: channel.get_value_satoshis(),
+                                       unspendable_punishment_reserve: to_self_reserve_satoshis,
                                        inbound_capacity_msat,
                                        outbound_capacity_msat,
                                        user_id: channel.get_user_id(),
+                                       confirmations_required: channel.minimum_depth(),
+                                       force_close_spend_delay: channel.get_counterparty_selected_contest_delay(),
                                        is_outbound: channel.is_outbound(),
                                        is_funding_locked: channel.is_usable(),
                                        is_usable: channel.is_live(),
                                        is_public: channel.should_announce(),
-                                       counterparty_forwarding_info: channel.counterparty_forwarding_info(),
                                });
                        }
                }
                let per_peer_state = self.per_peer_state.read().unwrap();
                for chan in res.iter_mut() {
-                       if let Some(peer_state) = per_peer_state.get(&chan.remote_network_id) {
-                               chan.counterparty_features = peer_state.lock().unwrap().latest_features.clone();
+                       if let Some(peer_state) = per_peer_state.get(&chan.counterparty.node_id) {
+                               chan.counterparty.features = peer_state.lock().unwrap().latest_features.clone();
                        }
                }
                res
@@ -1081,9 +1275,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
                }
                let chan_update = if let Some(chan) = chan_option {
-                       if let Ok(update) = self.get_channel_update(&chan) {
-                               Some(update)
-                       } else { None }
+                       self.get_channel_update_for_broadcast(&chan).ok()
                } else { None };
 
                if let Some(update) = chan_update {
@@ -1099,7 +1291,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        #[inline]
        fn finish_force_close_channel(&self, shutdown_res: ShutdownResult) {
                let (monitor_update_option, mut failed_htlcs) = shutdown_res;
-               log_trace!(self.logger, "Finishing force-closure of channel {} HTLCs to fail", failed_htlcs.len());
+               log_debug!(self.logger, "Finishing force-closure of channel with {} HTLCs to fail", failed_htlcs.len());
                for htlc_source in failed_htlcs.drain(..) {
                        self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
                }
@@ -1130,9 +1322,9 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()});
                        }
                };
-               log_trace!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
+               log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
                self.finish_force_close_channel(chan.force_shutdown(true));
-               if let Ok(update) = self.get_channel_update(&chan) {
+               if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
                        let mut channel_state = self.channel_state.lock().unwrap();
                        channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                msg: update
@@ -1255,121 +1447,141 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                };
 
                let pending_forward_info = if next_hop_hmac == [0; 32] {
-                               #[cfg(test)]
-                               {
-                                       // In tests, make sure that the initial onion pcket data is, at least, non-0.
-                                       // We could do some fancy randomness test here, but, ehh, whatever.
-                                       // This checks for the issue where you can calculate the path length given the
-                                       // onion data as all the path entries that the originator sent will be here
-                                       // as-is (and were originally 0s).
-                                       // Of course reverse path calculation is still pretty easy given naive routing
-                                       // algorithms, but this fixes the most-obvious case.
-                                       let mut next_bytes = [0; 32];
-                                       chacha_stream.read_exact(&mut next_bytes).unwrap();
-                                       assert_ne!(next_bytes[..], [0; 32][..]);
-                                       chacha_stream.read_exact(&mut next_bytes).unwrap();
-                                       assert_ne!(next_bytes[..], [0; 32][..]);
-                               }
-
-                               // OUR PAYMENT!
-                               // final_expiry_too_soon
-                               // We have to have some headroom to broadcast on chain if we have the preimage, so make sure we have at least
-                               // HTLC_FAIL_BACK_BUFFER blocks to go.
-                               // Also, ensure that, in the case of an unknown payment hash, our payment logic has enough time to fail the HTLC backward
-                               // before our onchain logic triggers a channel closure (see HTLC_FAIL_BACK_BUFFER rational).
-                               if (msg.cltv_expiry as u64) <= self.best_block.read().unwrap().height() as u64 + HTLC_FAIL_BACK_BUFFER as u64 + 1 {
-                                       return_err!("The final CLTV expiry is too soon to handle", 17, &[0;0]);
-                               }
-                               // final_incorrect_htlc_amount
-                               if next_hop_data.amt_to_forward > msg.amount_msat {
-                                       return_err!("Upstream node sent less than we were supposed to receive in payment", 19, &byte_utils::be64_to_array(msg.amount_msat));
-                               }
-                               // final_incorrect_cltv_expiry
-                               if next_hop_data.outgoing_cltv_value != msg.cltv_expiry {
-                                       return_err!("Upstream node set CLTV to the wrong value", 18, &byte_utils::be32_to_array(msg.cltv_expiry));
-                               }
-
-                               let payment_data = match next_hop_data.format {
-                                       msgs::OnionHopDataFormat::Legacy { .. } => None,
-                                       msgs::OnionHopDataFormat::NonFinalNode { .. } => return_err!("Got non final data with an HMAC of 0", 0x4000 | 22, &[0;0]),
-                                       msgs::OnionHopDataFormat::FinalNode { payment_data } => payment_data,
-                               };
+                       #[cfg(test)]
+                       {
+                               // In tests, make sure that the initial onion pcket data is, at least, non-0.
+                               // We could do some fancy randomness test here, but, ehh, whatever.
+                               // This checks for the issue where you can calculate the path length given the
+                               // onion data as all the path entries that the originator sent will be here
+                               // as-is (and were originally 0s).
+                               // Of course reverse path calculation is still pretty easy given naive routing
+                               // algorithms, but this fixes the most-obvious case.
+                               let mut next_bytes = [0; 32];
+                               chacha_stream.read_exact(&mut next_bytes).unwrap();
+                               assert_ne!(next_bytes[..], [0; 32][..]);
+                               chacha_stream.read_exact(&mut next_bytes).unwrap();
+                               assert_ne!(next_bytes[..], [0; 32][..]);
+                       }
 
-                               if payment_data.is_none() {
-                                       return_err!("We require payment_secrets", 0x4000|0x2000|3, &[0;0]);
-                               }
+                       // OUR PAYMENT!
+                       // final_expiry_too_soon
+                       // We have to have some headroom to broadcast on chain if we have the preimage, so make sure
+                       // we have at least HTLC_FAIL_BACK_BUFFER blocks to go.
+                       // Also, ensure that, in the case of an unknown preimage for the received payment hash, our
+                       // payment logic has enough time to fail the HTLC backward before our onchain logic triggers a
+                       // channel closure (see HTLC_FAIL_BACK_BUFFER rationale).
+                       if (msg.cltv_expiry as u64) <= self.best_block.read().unwrap().height() as u64 + HTLC_FAIL_BACK_BUFFER as u64 + 1 {
+                               return_err!("The final CLTV expiry is too soon to handle", 17, &[0;0]);
+                       }
+                       // final_incorrect_htlc_amount
+                       if next_hop_data.amt_to_forward > msg.amount_msat {
+                               return_err!("Upstream node sent less than we were supposed to receive in payment", 19, &byte_utils::be64_to_array(msg.amount_msat));
+                       }
+                       // final_incorrect_cltv_expiry
+                       if next_hop_data.outgoing_cltv_value != msg.cltv_expiry {
+                               return_err!("Upstream node set CLTV to the wrong value", 18, &byte_utils::be32_to_array(msg.cltv_expiry));
+                       }
 
-                               // Note that we could obviously respond immediately with an update_fulfill_htlc
-                               // message, however that would leak that we are the recipient of this payment, so
-                               // instead we stay symmetric with the forwarding case, only responding (after a
-                               // delay) once they've send us a commitment_signed!
+                       let routing = match next_hop_data.format {
+                               msgs::OnionHopDataFormat::Legacy { .. } => return_err!("We require payment_secrets", 0x4000|0x2000|3, &[0;0]),
+                               msgs::OnionHopDataFormat::NonFinalNode { .. } => return_err!("Got non final data with an HMAC of 0", 0x4000 | 22, &[0;0]),
+                               msgs::OnionHopDataFormat::FinalNode { payment_data, keysend_preimage } => {
+                                       if payment_data.is_some() && keysend_preimage.is_some() {
+                                               return_err!("We don't support MPP keysend payments", 0x4000|22, &[0;0]);
+                                       } else if let Some(data) = payment_data {
+                                               PendingHTLCRouting::Receive {
+                                                       payment_data: data,
+                                                       incoming_cltv_expiry: msg.cltv_expiry,
+                                               }
+                                       } else if let Some(payment_preimage) = keysend_preimage {
+                                               // We need to check that the sender knows the keysend preimage before processing this
+                                               // payment further. Otherwise, an intermediary routing hop forwarding non-keysend-HTLC X
+                                               // could discover the final destination of X, by probing the adjacent nodes on the route
+                                               // with a keysend payment of identical payment hash to X and observing the processing
+                                               // time discrepancies due to a hash collision with X.
+                                               let hashed_preimage = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
+                                               if hashed_preimage != msg.payment_hash {
+                                                       return_err!("Payment preimage didn't match payment hash", 0x4000|22, &[0;0]);
+                                               }
 
-                               PendingHTLCStatus::Forward(PendingHTLCInfo {
-                                       routing: PendingHTLCRouting::Receive {
-                                               payment_data: payment_data.unwrap(),
-                                               incoming_cltv_expiry: msg.cltv_expiry,
-                                       },
-                                       payment_hash: msg.payment_hash.clone(),
-                                       incoming_shared_secret: shared_secret,
-                                       amt_to_forward: next_hop_data.amt_to_forward,
-                                       outgoing_cltv_value: next_hop_data.outgoing_cltv_value,
-                               })
-                       } else {
-                               let mut new_packet_data = [0; 20*65];
-                               let read_pos = chacha_stream.read(&mut new_packet_data).unwrap();
-                               #[cfg(debug_assertions)]
-                               {
-                                       // Check two things:
-                                       // a) that the behavior of our stream here will return Ok(0) even if the TLV
-                                       //    read above emptied out our buffer and the unwrap() wont needlessly panic
-                                       // b) that we didn't somehow magically end up with extra data.
-                                       let mut t = [0; 1];
-                                       debug_assert!(chacha_stream.read(&mut t).unwrap() == 0);
-                               }
-                               // Once we've emptied the set of bytes our peer gave us, encrypt 0 bytes until we
-                               // fill the onion hop data we'll forward to our next-hop peer.
-                               chacha_stream.chacha.process_in_place(&mut new_packet_data[read_pos..]);
+                                               PendingHTLCRouting::ReceiveKeysend {
+                                                       payment_preimage,
+                                                       incoming_cltv_expiry: msg.cltv_expiry,
+                                               }
+                                       } else {
+                                               return_err!("We require payment_secrets", 0x4000|0x2000|3, &[0;0]);
+                                       }
+                               },
+                       };
 
-                               let mut new_pubkey = msg.onion_routing_packet.public_key.unwrap();
+                       // Note that we could obviously respond immediately with an update_fulfill_htlc
+                       // message, however that would leak that we are the recipient of this payment, so
+                       // instead we stay symmetric with the forwarding case, only responding (after a
+                       // delay) once they've send us a commitment_signed!
+
+                       PendingHTLCStatus::Forward(PendingHTLCInfo {
+                               routing,
+                               payment_hash: msg.payment_hash.clone(),
+                               incoming_shared_secret: shared_secret,
+                               amt_to_forward: next_hop_data.amt_to_forward,
+                               outgoing_cltv_value: next_hop_data.outgoing_cltv_value,
+                       })
+               } else {
+                       let mut new_packet_data = [0; 20*65];
+                       let read_pos = chacha_stream.read(&mut new_packet_data).unwrap();
+                       #[cfg(debug_assertions)]
+                       {
+                               // Check two things:
+                               // a) that the behavior of our stream here will return Ok(0) even if the TLV
+                               //    read above emptied out our buffer and the unwrap() wont needlessly panic
+                               // b) that we didn't somehow magically end up with extra data.
+                               let mut t = [0; 1];
+                               debug_assert!(chacha_stream.read(&mut t).unwrap() == 0);
+                       }
+                       // Once we've emptied the set of bytes our peer gave us, encrypt 0 bytes until we
+                       // fill the onion hop data we'll forward to our next-hop peer.
+                       chacha_stream.chacha.process_in_place(&mut new_packet_data[read_pos..]);
 
-                               let blinding_factor = {
-                                       let mut sha = Sha256::engine();
-                                       sha.input(&new_pubkey.serialize()[..]);
-                                       sha.input(&shared_secret);
-                                       Sha256::from_engine(sha).into_inner()
-                               };
+                       let mut new_pubkey = msg.onion_routing_packet.public_key.unwrap();
 
-                               let public_key = if let Err(e) = new_pubkey.mul_assign(&self.secp_ctx, &blinding_factor[..]) {
-                                       Err(e)
-                               } else { Ok(new_pubkey) };
+                       let blinding_factor = {
+                               let mut sha = Sha256::engine();
+                               sha.input(&new_pubkey.serialize()[..]);
+                               sha.input(&shared_secret);
+                               Sha256::from_engine(sha).into_inner()
+                       };
 
-                               let outgoing_packet = msgs::OnionPacket {
-                                       version: 0,
-                                       public_key,
-                                       hop_data: new_packet_data,
-                                       hmac: next_hop_hmac.clone(),
-                               };
+                       let public_key = if let Err(e) = new_pubkey.mul_assign(&self.secp_ctx, &blinding_factor[..]) {
+                               Err(e)
+                       } else { Ok(new_pubkey) };
 
-                               let short_channel_id = match next_hop_data.format {
-                                       msgs::OnionHopDataFormat::Legacy { short_channel_id } => short_channel_id,
-                                       msgs::OnionHopDataFormat::NonFinalNode { short_channel_id } => short_channel_id,
-                                       msgs::OnionHopDataFormat::FinalNode { .. } => {
-                                               return_err!("Final Node OnionHopData provided for us as an intermediary node", 0x4000 | 22, &[0;0]);
-                                       },
-                               };
+                       let outgoing_packet = msgs::OnionPacket {
+                               version: 0,
+                               public_key,
+                               hop_data: new_packet_data,
+                               hmac: next_hop_hmac.clone(),
+                       };
 
-                               PendingHTLCStatus::Forward(PendingHTLCInfo {
-                                       routing: PendingHTLCRouting::Forward {
-                                               onion_packet: outgoing_packet,
-                                               short_channel_id,
-                                       },
-                                       payment_hash: msg.payment_hash.clone(),
-                                       incoming_shared_secret: shared_secret,
-                                       amt_to_forward: next_hop_data.amt_to_forward,
-                                       outgoing_cltv_value: next_hop_data.outgoing_cltv_value,
-                               })
+                       let short_channel_id = match next_hop_data.format {
+                               msgs::OnionHopDataFormat::Legacy { short_channel_id } => short_channel_id,
+                               msgs::OnionHopDataFormat::NonFinalNode { short_channel_id } => short_channel_id,
+                               msgs::OnionHopDataFormat::FinalNode { .. } => {
+                                       return_err!("Final Node OnionHopData provided for us as an intermediary node", 0x4000 | 22, &[0;0]);
+                               },
                        };
 
+                       PendingHTLCStatus::Forward(PendingHTLCInfo {
+                               routing: PendingHTLCRouting::Forward {
+                                       onion_packet: outgoing_packet,
+                                       short_channel_id,
+                               },
+                               payment_hash: msg.payment_hash.clone(),
+                               incoming_shared_secret: shared_secret,
+                               amt_to_forward: next_hop_data.amt_to_forward,
+                               outgoing_cltv_value: next_hop_data.outgoing_cltv_value,
+                       })
+               };
+
                channel_state = Some(self.channel_state.lock().unwrap());
                if let &PendingHTLCStatus::Forward(PendingHTLCInfo { ref routing, ref amt_to_forward, ref outgoing_cltv_value, .. }) = &pending_forward_info {
                        // If short_channel_id is 0 here, we'll reject the HTLC as there cannot be a channel
@@ -1377,46 +1589,56 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        // short_channel_id is non-0 in any ::Forward.
                        if let &PendingHTLCRouting::Forward { ref short_channel_id, .. } = routing {
                                let id_option = channel_state.as_ref().unwrap().short_to_id.get(&short_channel_id).cloned();
-                               let forwarding_id = match id_option {
-                                       None => { // unknown_next_peer
-                                               return_err!("Don't have available channel for forwarding as requested.", 0x4000 | 10, &[0;0]);
-                                       },
-                                       Some(id) => id.clone(),
-                               };
                                if let Some((err, code, chan_update)) = loop {
+                                       let forwarding_id = match id_option {
+                                               None => { // unknown_next_peer
+                                                       break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
+                                               },
+                                               Some(id) => id.clone(),
+                                       };
+
                                        let chan = channel_state.as_mut().unwrap().by_id.get_mut(&forwarding_id).unwrap();
 
+                                       if !chan.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels {
+                                               // Note that the behavior here should be identical to the above block - we
+                                               // should NOT reveal the existence or non-existence of a private channel if
+                                               // we don't allow forwards outbound over them.
+                                               break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
+                                       }
+
                                        // Note that we could technically not return an error yet here and just hope
                                        // that the connection is reestablished or monitor updated by the time we get
                                        // around to doing the actual forward, but better to fail early if we can and
                                        // hopefully an attacker trying to path-trace payments cannot make this occur
                                        // on a small/per-node/per-channel scale.
                                        if !chan.is_live() { // channel_disabled
-                                               break Some(("Forwarding channel is not in a ready state.", 0x1000 | 20, Some(self.get_channel_update(chan).unwrap())));
+                                               break Some(("Forwarding channel is not in a ready state.", 0x1000 | 20, Some(self.get_channel_update_for_unicast(chan).unwrap())));
                                        }
                                        if *amt_to_forward < chan.get_counterparty_htlc_minimum_msat() { // amount_below_minimum
-                                               break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, Some(self.get_channel_update(chan).unwrap())));
+                                               break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, Some(self.get_channel_update_for_unicast(chan).unwrap())));
                                        }
-                                       let fee = amt_to_forward.checked_mul(chan.get_fee_proportional_millionths() as u64).and_then(|prop_fee| { (prop_fee / 1000000).checked_add(chan.get_holder_fee_base_msat(&self.fee_estimator) as u64) });
+                                       let fee = amt_to_forward.checked_mul(chan.get_fee_proportional_millionths() as u64)
+                                               .and_then(|prop_fee| { (prop_fee / 1000000)
+                                               .checked_add(chan.get_outbound_forwarding_fee_base_msat() as u64) });
                                        if fee.is_none() || msg.amount_msat < fee.unwrap() || (msg.amount_msat - fee.unwrap()) < *amt_to_forward { // fee_insufficient
-                                               break Some(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, Some(self.get_channel_update(chan).unwrap())));
+                                               break Some(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, Some(self.get_channel_update_for_unicast(chan).unwrap())));
                                        }
                                        if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + chan.get_cltv_expiry_delta() as u64 { // incorrect_cltv_expiry
-                                               break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update(chan).unwrap())));
+                                               break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update_for_unicast(chan).unwrap())));
                                        }
                                        let cur_height = self.best_block.read().unwrap().height() + 1;
                                        // Theoretically, channel counterparty shouldn't send us a HTLC expiring now, but we want to be robust wrt to counterparty
                                        // packet sanitization (see HTLC_FAIL_BACK_BUFFER rational)
                                        if msg.cltv_expiry <= cur_height + HTLC_FAIL_BACK_BUFFER as u32 { // expiry_too_soon
-                                               break Some(("CLTV expiry is too close", 0x1000 | 14, Some(self.get_channel_update(chan).unwrap())));
+                                               break Some(("CLTV expiry is too close", 0x1000 | 14, Some(self.get_channel_update_for_unicast(chan).unwrap())));
                                        }
                                        if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far
                                                break Some(("CLTV expiry is too far in the future", 21, None));
                                        }
-                                       // In theory, we would be safe against unitentional channel-closure, if we only required a margin of LATENCY_GRACE_PERIOD_BLOCKS.
-                                       // But, to be safe against policy reception, we use a longuer delay.
+                                       // In theory, we would be safe against unintentional channel-closure, if we only required a margin of LATENCY_GRACE_PERIOD_BLOCKS.
+                                       // But, to be safe against policy reception, we use a longer delay.
                                        if (*outgoing_cltv_value) as u64 <= (cur_height + HTLC_FAIL_BACK_BUFFER) as u64 {
-                                               break Some(("Outgoing CLTV value is too soon", 0x1000 | 14, Some(self.get_channel_update(chan).unwrap())));
+                                               break Some(("Outgoing CLTV value is too soon", 0x1000 | 14, Some(self.get_channel_update_for_unicast(chan).unwrap())));
                                        }
 
                                        break None;
@@ -1444,9 +1666,29 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                (pending_forward_info, channel_state.unwrap())
        }
 
-       /// only fails if the channel does not yet have an assigned short_id
+       /// Gets the current channel_update for the given channel. This first checks if the channel is
+       /// public, and thus should be called whenever the result is going to be passed out in a
+       /// [`MessageSendEvent::BroadcastChannelUpdate`] event.
+       ///
+       /// May be called with channel_state already locked!
+       fn get_channel_update_for_broadcast(&self, chan: &Channel<Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
+               if !chan.should_announce() {
+                       return Err(LightningError {
+                               err: "Cannot broadcast a channel_update for a private channel".to_owned(),
+                               action: msgs::ErrorAction::IgnoreError
+                       });
+               }
+               log_trace!(self.logger, "Attempting to generate broadcast channel update for channel {}", log_bytes!(chan.channel_id()));
+               self.get_channel_update_for_unicast(chan)
+       }
+
+       /// Gets the current channel_update for the given channel. This does not check if the channel
+       /// is public (only returning an Err if the channel does not yet have an assigned short_id),
+       /// and thus MUST NOT be called unless the recipient of the resulting message has already
+       /// provided evidence that they know about the existence of the channel.
        /// May be called with channel_state already locked!
-       fn get_channel_update(&self, chan: &Channel<Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
+       fn get_channel_update_for_unicast(&self, chan: &Channel<Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
+               log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.channel_id()));
                let short_channel_id = match chan.get_short_channel_id() {
                        None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}),
                        Some(id) => id,
@@ -1462,7 +1704,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        cltv_expiry_delta: chan.get_cltv_expiry_delta(),
                        htlc_minimum_msat: chan.get_counterparty_htlc_minimum_msat(),
                        htlc_maximum_msat: OptionalField::Present(chan.get_announced_htlc_max_msat()),
-                       fee_base_msat: chan.get_holder_fee_base_msat(&self.fee_estimator),
+                       fee_base_msat: chan.get_outbound_forwarding_fee_base_msat(),
                        fee_proportional_millionths: chan.get_fee_proportional_millionths(),
                        excess_data: Vec::new(),
                };
@@ -1477,7 +1719,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        }
 
        // Only public for testing, this should otherwise never be called direcly
-       pub(crate) fn send_payment_along_path(&self, path: &Vec<RouteHop>, payment_hash: &PaymentHash, payment_secret: &Option<PaymentSecret>, total_value: u64, cur_height: u32) -> Result<(), APIError> {
+       pub(crate) fn send_payment_along_path(&self, path: &Vec<RouteHop>, payment_hash: &PaymentHash, payment_secret: &Option<PaymentSecret>, total_value: u64, cur_height: u32, keysend_preimage: &Option<PaymentPreimage>) -> Result<(), APIError> {
                log_trace!(self.logger, "Attempting to send payment for path with next hop {}", path.first().unwrap().short_channel_id);
                let prng_seed = self.keys_manager.get_secure_random_bytes();
                let session_priv_bytes = self.keys_manager.get_secure_random_bytes();
@@ -1485,7 +1727,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
 
                let onion_keys = onion_utils::construct_onion_keys(&self.secp_ctx, &path, &session_priv)
                        .map_err(|_| APIError::RouteError{err: "Pubkey along hop was maliciously selected"})?;
-               let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(path, total_value, payment_secret, cur_height)?;
+               let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(path, total_value, payment_secret, cur_height, keysend_preimage)?;
                if onion_utils::route_size_insane(&onion_payloads) {
                        return Err(APIError::RouteError{err: "Route size too large considering onion data"});
                }
@@ -1527,6 +1769,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                        return Err(APIError::MonitorUpdateFailed);
                                                }
 
+                                               log_debug!(self.logger, "Sending payment along path resulted in a commitment_signed for channel {}", log_bytes!(chan.get().channel_id()));
                                                channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
                                                        node_id: path.first().unwrap().pubkey,
                                                        updates: msgs::CommitmentUpdate {
@@ -1593,6 +1836,10 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        /// bit set (either as required or as available). If multiple paths are present in the Route,
        /// we assume the invoice had the basic_mpp feature set.
        pub fn send_payment(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>) -> Result<(), PaymentSendFailure> {
+               self.send_payment_internal(route, payment_hash, payment_secret, None)
+       }
+
+       fn send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>, keysend_preimage: Option<PaymentPreimage>) -> Result<(), PaymentSendFailure> {
                if route.paths.len() < 1 {
                        return Err(PaymentSendFailure::ParameterError(APIError::RouteError{err: "There must be at least one path to send over"}));
                }
@@ -1626,7 +1873,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                let cur_height = self.best_block.read().unwrap().height() + 1;
                let mut results = Vec::new();
                for path in route.paths.iter() {
-                       results.push(self.send_payment_along_path(&path, &payment_hash, payment_secret, total_value, cur_height));
+                       results.push(self.send_payment_along_path(&path, &payment_hash, payment_secret, total_value, cur_height, &keysend_preimage));
                }
                let mut has_ok = false;
                let mut has_err = false;
@@ -1650,6 +1897,28 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                }
        }
 
+       /// Send a spontaneous payment, which is a payment that does not require the recipient to have
+       /// generated an invoice. Optionally, you may specify the preimage. If you do choose to specify
+       /// the preimage, it must be a cryptographically secure random value that no intermediate node
+       /// would be able to guess -- otherwise, an intermediate node may claim the payment and it will
+       /// never reach the recipient.
+       ///
+       /// Similar to regular payments, you MUST NOT reuse a `payment_preimage` value. See
+       /// [`send_payment`] for more information about the risks of duplicate preimage usage.
+       ///
+       /// [`send_payment`]: Self::send_payment
+       pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option<PaymentPreimage>) -> Result<PaymentHash, PaymentSendFailure> {
+               let preimage = match payment_preimage {
+                       Some(p) => p,
+                       None => PaymentPreimage(self.keys_manager.get_secure_random_bytes()),
+               };
+               let payment_hash = PaymentHash(Sha256::hash(&preimage.0).into_inner());
+               match self.send_payment_internal(route, payment_hash, &None, Some(preimage)) {
+                       Ok(()) => Ok(payment_hash),
+                       Err(e) => Err(e)
+               }
+       }
+
        /// Handles the generation of a funding transaction, optionally (for tests) with a function
        /// which checks the correctness of the funding transaction given the associated channel.
        fn funding_transaction_generated_intern<FundingOutput: Fn(&Channel<Signer>, &Transaction) -> Result<OutPoint, APIError>>
@@ -1718,6 +1987,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        /// Note that this includes RBF or similar transaction replacement strategies - lightning does
        /// not currently support replacing a funding transaction on an existing channel. Instead,
        /// create a new channel with a conflicting funding transaction.
+       ///
+       /// [`Event::FundingGenerationReady`]: crate::util::events::Event::FundingGenerationReady
        pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], funding_transaction: Transaction) -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 
@@ -1781,26 +2052,31 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        // be absurd. We ensure this by checking that at least 500 (our stated public contract on when
        // broadcast_node_announcement panics) of the maximum-length addresses would fit in a 64KB
        // message...
-       const HALF_MESSAGE_IS_ADDRS: u32 = ::std::u16::MAX as u32 / (NetAddress::MAX_LEN as u32 + 1) / 2;
+       const HALF_MESSAGE_IS_ADDRS: u32 = ::core::u16::MAX as u32 / (NetAddress::MAX_LEN as u32 + 1) / 2;
        #[deny(const_err)]
        #[allow(dead_code)]
        // ...by failing to compile if the number of addresses that would be half of a message is
        // smaller than 500:
        const STATIC_ASSERT: u32 = Self::HALF_MESSAGE_IS_ADDRS - 500;
 
-       /// Generates a signed node_announcement from the given arguments and creates a
-       /// BroadcastNodeAnnouncement event. Note that such messages will be ignored unless peers have
-       /// seen a channel_announcement from us (ie unless we have public channels open).
+       /// Regenerates channel_announcements and generates a signed node_announcement from the given
+       /// arguments, providing them in corresponding events via
+       /// [`get_and_clear_pending_msg_events`], if at least one public channel has been confirmed
+       /// on-chain. This effectively re-broadcasts all channel announcements and sends our node
+       /// announcement to ensure that the lightning P2P network is aware of the channels we have and
+       /// our network addresses.
        ///
-       /// RGB is a node "color" and alias is a printable human-readable string to describe this node
-       /// to humans. They carry no in-protocol meaning.
+       /// `rgb` is a node "color" and `alias` is a printable human-readable string to describe this
+       /// node to humans. They carry no in-protocol meaning.
        ///
-       /// addresses represent the set (possibly empty) of socket addresses on which this node accepts
-       /// incoming connections. These will be broadcast to the network, publicly tying these
-       /// addresses together. If you wish to preserve user privacy, addresses should likely contain
-       /// only Tor Onion addresses.
+       /// `addresses` represent the set (possibly empty) of socket addresses on which this node
+       /// accepts incoming connections. These will be included in the node_announcement, publicly
+       /// tying these addresses together and to this node. If you wish to preserve user privacy,
+       /// addresses should likely contain only Tor Onion addresses.
        ///
-       /// Panics if addresses is absurdly large (more than 500).
+       /// Panics if `addresses` is absurdly large (more than 500).
+       ///
+       /// [`get_and_clear_pending_msg_events`]: MessageSendEventsProvider::get_and_clear_pending_msg_events
        pub fn broadcast_node_announcement(&self, rgb: [u8; 3], alias: [u8; 32], mut addresses: Vec<NetAddress>) {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 
@@ -1821,14 +2097,37 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        excess_data: Vec::new(),
                };
                let msghash = hash_to_message!(&Sha256dHash::hash(&announcement.encode()[..])[..]);
+               let node_announce_sig = self.secp_ctx.sign(&msghash, &self.our_network_key);
 
-               let mut channel_state = self.channel_state.lock().unwrap();
-               channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastNodeAnnouncement {
-                       msg: msgs::NodeAnnouncement {
-                               signature: self.secp_ctx.sign(&msghash, &self.our_network_key),
-                               contents: announcement
-                       },
-               });
+               let mut channel_state_lock = self.channel_state.lock().unwrap();
+               let channel_state = &mut *channel_state_lock;
+
+               let mut announced_chans = false;
+               for (_, chan) in channel_state.by_id.iter() {
+                       if let Some(msg) = chan.get_signed_channel_announcement(&self.our_network_key, self.get_our_node_id(), self.genesis_hash.clone()) {
+                               channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
+                                       msg,
+                                       update_msg: match self.get_channel_update_for_broadcast(chan) {
+                                               Ok(msg) => msg,
+                                               Err(_) => continue,
+                                       },
+                               });
+                               announced_chans = true;
+                       } else {
+                               // If the channel is not public or has not yet reached funding_locked, check the
+                               // next channel. If we don't yet have any public channels, we'll skip the broadcast
+                               // below as peers may not accept it without channels on chain first.
+                       }
+               }
+
+               if announced_chans {
+                       channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastNodeAnnouncement {
+                               msg: msgs::NodeAnnouncement {
+                                       signature: node_announce_sig,
+                                       contents: announcement
+                               },
+                       });
+               }
        }
 
        /// Processes HTLCs which are pending waiting on random forward delay.
@@ -1886,7 +2185,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                        onion_packet, ..
                                                                                }, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value },
                                                                                prev_funding_outpoint } => {
-                                                                       log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", log_bytes!(payment_hash.0), prev_short_channel_id, short_chan_id);
+                                                                       log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, log_bytes!(payment_hash.0), short_chan_id);
                                                                        let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
                                                                                short_channel_id: prev_short_channel_id,
                                                                                outpoint: prev_funding_outpoint,
@@ -1900,7 +2199,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                        } else {
                                                                                                panic!("Stated return value requirements in send_htlc() were not met");
                                                                                        }
-                                                                                       let chan_update = self.get_channel_update(chan.get()).unwrap();
+                                                                                       let chan_update = self.get_channel_update_for_unicast(chan.get()).unwrap();
                                                                                        failed_forwards.push((htlc_source, payment_hash,
                                                                                                HTLCFailReason::Reason { failure_code: 0x1000 | 7, data: chan_update.encode_with_len() }
                                                                                        ));
@@ -1926,11 +2225,11 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                        panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
                                                                },
                                                                HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
-                                                                       log_trace!(self.logger, "Failing HTLC back to channel with short id {} after delay", short_chan_id);
-                                                                       match chan.get_mut().get_update_fail_htlc(htlc_id, err_packet) {
+                                                                       log_trace!(self.logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
+                                                                       match chan.get_mut().get_update_fail_htlc(htlc_id, err_packet, &self.logger) {
                                                                                Err(e) => {
                                                                                        if let ChannelError::Ignore(msg) = e {
-                                                                                               log_trace!(self.logger, "Failed to fail backwards to short_id {}: {}", short_chan_id, msg);
+                                                                                               log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
                                                                                        } else {
                                                                                                panic!("Stated return value requirements in get_update_fail_htlc() were not met");
                                                                                        }
@@ -1972,7 +2271,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                        if let Some(short_id) = channel.get_short_channel_id() {
                                                                                                channel_state.short_to_id.remove(&short_id);
                                                                                        }
-                                                                                       Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.force_shutdown(true), self.get_channel_update(&channel).ok()))
+                                                                                       Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok()))
                                                                                },
                                                                                ChannelError::CloseDelayBroadcast(_) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); }
                                                                        };
@@ -1984,6 +2283,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                handle_errors.push((chan.get().get_counterparty_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true)));
                                                                continue;
                                                        }
+                                                       log_debug!(self.logger, "Forwarding HTLCs resulted in a commitment update with {} HTLCs added and {} HTLCs failed for channel {}",
+                                                               add_htlc_msgs.len(), fail_htlc_msgs.len(), log_bytes!(chan.get().channel_id()));
                                                        channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
                                                                node_id: chan.get().get_counterparty_node_id(),
                                                                updates: msgs::CommitmentUpdate {
@@ -2003,9 +2304,17 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        for forward_info in pending_forwards.drain(..) {
                                                match forward_info {
                                                        HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo {
-                                                                       routing: PendingHTLCRouting::Receive { payment_data, incoming_cltv_expiry },
-                                                                       incoming_shared_secret, payment_hash, amt_to_forward, .. },
+                                                                       routing, incoming_shared_secret, payment_hash, amt_to_forward, .. },
                                                                        prev_funding_outpoint } => {
+                                                               let (cltv_expiry, onion_payload) = match routing {
+                                                                       PendingHTLCRouting::Receive { payment_data, incoming_cltv_expiry } =>
+                                                                               (incoming_cltv_expiry, OnionPayload::Invoice(payment_data)),
+                                                                       PendingHTLCRouting::ReceiveKeysend { payment_preimage, incoming_cltv_expiry } =>
+                                                                               (incoming_cltv_expiry, OnionPayload::Spontaneous(payment_preimage)),
+                                                                       _ => {
+                                                                               panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive");
+                                                                       }
+                                                               };
                                                                let claimable_htlc = ClaimableHTLC {
                                                                        prev_hop: HTLCPreviousHopData {
                                                                                short_channel_id: prev_short_channel_id,
@@ -2014,8 +2323,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                incoming_packet_shared_secret: incoming_shared_secret,
                                                                        },
                                                                        value: amt_to_forward,
-                                                                       payment_data: payment_data.clone(),
-                                                                       cltv_expiry: incoming_cltv_expiry,
+                                                                       cltv_expiry,
+                                                                       onion_payload,
                                                                };
 
                                                                macro_rules! fail_htlc {
@@ -2044,10 +2353,38 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                let mut payment_secrets = self.pending_inbound_payments.lock().unwrap();
                                                                match payment_secrets.entry(payment_hash) {
                                                                        hash_map::Entry::Vacant(_) => {
-                                                                               log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we didn't have a corresponding inbound payment.", log_bytes!(payment_hash.0));
-                                                                               fail_htlc!(claimable_htlc);
+                                                                               match claimable_htlc.onion_payload {
+                                                                                       OnionPayload::Invoice(_) => {
+                                                                                               log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we didn't have a corresponding inbound payment.", log_bytes!(payment_hash.0));
+                                                                                               fail_htlc!(claimable_htlc);
+                                                                                       },
+                                                                                       OnionPayload::Spontaneous(preimage) => {
+                                                                                               match channel_state.claimable_htlcs.entry(payment_hash) {
+                                                                                                       hash_map::Entry::Vacant(e) => {
+                                                                                                               e.insert(vec![claimable_htlc]);
+                                                                                                               new_events.push(events::Event::PaymentReceived {
+                                                                                                                       payment_hash,
+                                                                                                                       amt: amt_to_forward,
+                                                                                                                       purpose: events::PaymentPurpose::SpontaneousPayment(preimage),
+                                                                                                               });
+                                                                                                       },
+                                                                                                       hash_map::Entry::Occupied(_) => {
+                                                                                                               log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} for a duplicative payment hash", log_bytes!(payment_hash.0));
+                                                                                                               fail_htlc!(claimable_htlc);
+                                                                                                       }
+                                                                                               }
+                                                                                       }
+                                                                               }
                                                                        },
                                                                        hash_map::Entry::Occupied(inbound_payment) => {
+                                                                               let payment_data =
+                                                                                       if let OnionPayload::Invoice(ref data) = claimable_htlc.onion_payload {
+                                                                                               data.clone()
+                                                                                       } else {
+                                                                                               log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} because we already have an inbound payment with the same payment hash", log_bytes!(payment_hash.0));
+                                                                                               fail_htlc!(claimable_htlc);
+                                                                                               continue
+                                                                                       };
                                                                                if inbound_payment.get().payment_secret != payment_data.payment_secret {
                                                                                        log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our expected payment secret.", log_bytes!(payment_hash.0));
                                                                                        fail_htlc!(claimable_htlc);
@@ -2059,15 +2396,27 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                        let mut total_value = 0;
                                                                                        let htlcs = channel_state.claimable_htlcs.entry(payment_hash)
                                                                                                .or_insert(Vec::new());
+                                                                                       if htlcs.len() == 1 {
+                                                                                               if let OnionPayload::Spontaneous(_) = htlcs[0].onion_payload {
+                                                                                                       log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we already had an existing keysend HTLC with the same payment hash", log_bytes!(payment_hash.0));
+                                                                                                       fail_htlc!(claimable_htlc);
+                                                                                                       continue
+                                                                                               }
+                                                                                       }
                                                                                        htlcs.push(claimable_htlc);
                                                                                        for htlc in htlcs.iter() {
                                                                                                total_value += htlc.value;
-                                                                                               if htlc.payment_data.total_msat != payment_data.total_msat {
-                                                                                                       log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the HTLCs had inconsistent total values (eg {} and {})",
-                                                                                                               log_bytes!(payment_hash.0), payment_data.total_msat, htlc.payment_data.total_msat);
-                                                                                                       total_value = msgs::MAX_VALUE_MSAT;
+                                                                                               match &htlc.onion_payload {
+                                                                                                       OnionPayload::Invoice(htlc_payment_data) => {
+                                                                                                               if htlc_payment_data.total_msat != payment_data.total_msat {
+                                                                                                                       log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the HTLCs had inconsistent total values (eg {} and {})",
+                                                                                                                                                                log_bytes!(payment_hash.0), payment_data.total_msat, htlc_payment_data.total_msat);
+                                                                                                                       total_value = msgs::MAX_VALUE_MSAT;
+                                                                                                               }
+                                                                                                               if total_value >= msgs::MAX_VALUE_MSAT { break; }
+                                                                                                       },
+                                                                                                       _ => unreachable!(),
                                                                                                }
-                                                                                               if total_value >= msgs::MAX_VALUE_MSAT { break; }
                                                                                        }
                                                                                        if total_value >= msgs::MAX_VALUE_MSAT || total_value > payment_data.total_msat {
                                                                                                log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the total value {} ran over expected value {} (or HTLCs were inconsistent)",
@@ -2078,10 +2427,12 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                        } else if total_value == payment_data.total_msat {
                                                                                                new_events.push(events::Event::PaymentReceived {
                                                                                                        payment_hash,
-                                                                                                       payment_preimage: inbound_payment.get().payment_preimage,
-                                                                                                       payment_secret: payment_data.payment_secret,
+                                                                                                       purpose: events::PaymentPurpose::InvoicePayment {
+                                                                                                               payment_preimage: inbound_payment.get().payment_preimage,
+                                                                                                               payment_secret: payment_data.payment_secret,
+                                                                                                               user_payment_id: inbound_payment.get().user_payment_id,
+                                                                                                       },
                                                                                                        amt: total_value,
-                                                                                                       user_payment_id: inbound_payment.get().user_payment_id,
                                                                                                });
                                                                                                // Only ever generate at most one PaymentReceived
                                                                                                // per registered payment_hash, even if it isn't
@@ -2096,9 +2447,6 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                        },
                                                                };
                                                        },
-                                                       HTLCForwardInfo::AddHTLC { .. } => {
-                                                               panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive");
-                                                       },
                                                        HTLCForwardInfo::FailHTLC { .. } => {
                                                                panic!("Got pending fail of our own HTLC");
                                                        }
@@ -2147,7 +2495,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        }
 
        #[cfg(any(test, feature = "_test_utils"))]
-       pub(crate) fn test_process_background_events(&self) {
+       /// Process background events, for functional testing
+       pub fn test_process_background_events(&self) {
                self.process_background_events();
        }
 
@@ -2172,7 +2521,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        ChannelUpdateStatus::DisabledStaged if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Enabled),
                                        ChannelUpdateStatus::EnabledStaged if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Disabled),
                                        ChannelUpdateStatus::DisabledStaged if !chan.is_live() => {
-                                               if let Ok(update) = self.get_channel_update(&chan) {
+                                               if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
                                                        channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                                msg: update
                                                        });
@@ -2181,7 +2530,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                chan.set_channel_update_status(ChannelUpdateStatus::Disabled);
                                        },
                                        ChannelUpdateStatus::EnabledStaged if chan.is_live() => {
-                                               if let Ok(update) = self.get_channel_update(&chan) {
+                                               if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
                                                        channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                                msg: update
                                                        });
@@ -2231,7 +2580,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        let (failure_code, onion_failure_data) =
                                                match self.channel_state.lock().unwrap().by_id.entry(channel_id) {
                                                        hash_map::Entry::Occupied(chan_entry) => {
-                                                               if let Ok(upd) = self.get_channel_update(&chan_entry.get()) {
+                                                               if let Ok(upd) = self.get_channel_update_for_unicast(&chan_entry.get()) {
                                                                        (0x1000|7, upd.encode_with_len())
                                                                } else {
                                                                        (0x4000|10, Vec::new())
@@ -2476,43 +2825,48 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                };
 
                if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(chan_id) {
-                       let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update();
                        match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) {
-                               Ok((msgs, monitor_option)) => {
-                                       if let Some(monitor_update) = monitor_option {
+                               Ok(msgs_monitor_option) => {
+                                       if let UpdateFulfillCommitFetch::NewClaim { msgs, monitor_update } = msgs_monitor_option {
                                                if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
-                                                       if was_frozen_for_monitor {
-                                                               assert!(msgs.is_none());
-                                                       } else {
-                                                               return Err(Some((chan.get().get_counterparty_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err())));
-                                                       }
+                                                       log_given_level!(self.logger, if e == ChannelMonitorUpdateErr::PermanentFailure { Level::Error } else { Level::Debug },
+                                                               "Failed to update channel monitor with preimage {:?}: {:?}",
+                                                               payment_preimage, e);
+                                                       return Err(Some((
+                                                               chan.get().get_counterparty_node_id(),
+                                                               handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(),
+                                                       )));
+                                               }
+                                               if let Some((msg, commitment_signed)) = msgs {
+                                                       log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}",
+                                                               log_bytes!(payment_preimage.0), log_bytes!(chan.get().channel_id()));
+                                                       channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+                                                               node_id: chan.get().get_counterparty_node_id(),
+                                                               updates: msgs::CommitmentUpdate {
+                                                                       update_add_htlcs: Vec::new(),
+                                                                       update_fulfill_htlcs: vec![msg],
+                                                                       update_fail_htlcs: Vec::new(),
+                                                                       update_fail_malformed_htlcs: Vec::new(),
+                                                                       update_fee: None,
+                                                                       commitment_signed,
+                                                               }
+                                                       });
                                                }
-                                       }
-                                       if let Some((msg, commitment_signed)) = msgs {
-                                               channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
-                                                       node_id: chan.get().get_counterparty_node_id(),
-                                                       updates: msgs::CommitmentUpdate {
-                                                               update_add_htlcs: Vec::new(),
-                                                               update_fulfill_htlcs: vec![msg],
-                                                               update_fail_htlcs: Vec::new(),
-                                                               update_fail_malformed_htlcs: Vec::new(),
-                                                               update_fee: None,
-                                                               commitment_signed,
-                                                       }
-                                               });
                                        }
                                        return Ok(())
                                },
-                               Err(e) => {
-                                       // TODO: Do something with e?
-                                       // This should only occur if we are claiming an HTLC at the same time as the
-                                       // HTLC is being failed (eg because a block is being connected and this caused
-                                       // an HTLC to time out). This should, of course, only occur if the user is the
-                                       // one doing the claiming (as it being a part of a peer claim would imply we're
-                                       // about to lose funds) and only if the lock in claim_funds was dropped as a
-                                       // previous HTLC was failed (thus not for an MPP payment).
-                                       debug_assert!(false, "This shouldn't be reachable except in absurdly rare cases between monitor updates and HTLC timeouts: {:?}", e);
-                                       return Err(None)
+                               Err((e, monitor_update)) => {
+                                       if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
+                                               log_given_level!(self.logger, if e == ChannelMonitorUpdateErr::PermanentFailure { Level::Error } else { Level::Info },
+                                                       "Failed to update channel monitor with preimage {:?} immediately prior to force-close: {:?}",
+                                                       payment_preimage, e);
+                                       }
+                                       let counterparty_node_id = chan.get().get_counterparty_node_id();
+                                       let (drop, res) = convert_chan_err!(self, e, channel_state.short_to_id, chan.get_mut(), &chan_id);
+                                       if drop {
+                                               chan.remove_entry();
+                                       }
+                                       return Err(Some((counterparty_node_id, res)));
                                },
                        }
                } else { unreachable!(); }
@@ -2593,86 +2947,40 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        pub fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64) {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 
-               let mut close_results = Vec::new();
-               let mut htlc_forwards = Vec::new();
-               let mut htlc_failures = Vec::new();
-               let mut pending_events = Vec::new();
-
-               {
+               let chan_restoration_res;
+               let mut pending_failures = {
                        let mut channel_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_lock;
-                       let short_to_id = &mut channel_state.short_to_id;
-                       let pending_msg_events = &mut channel_state.pending_msg_events;
-                       let channel = match channel_state.by_id.get_mut(&funding_txo.to_channel_id()) {
-                               Some(chan) => chan,
-                               None => return,
+                       let mut channel = match channel_state.by_id.entry(funding_txo.to_channel_id()) {
+                               hash_map::Entry::Occupied(chan) => chan,
+                               hash_map::Entry::Vacant(_) => return,
                        };
-                       if !channel.is_awaiting_monitor_update() || channel.get_latest_monitor_update_id() != highest_applied_update_id {
+                       if !channel.get().is_awaiting_monitor_update() || channel.get().get_latest_monitor_update_id() != highest_applied_update_id {
                                return;
                        }
 
-                       let (raa, commitment_update, order, pending_forwards, mut pending_failures, funding_broadcastable, funding_locked) = channel.monitor_updating_restored(&self.logger);
-                       if !pending_forwards.is_empty() {
-                               htlc_forwards.push((channel.get_short_channel_id().expect("We can't have pending forwards before funding confirmation"), funding_txo.clone(), pending_forwards));
+                       let (raa, commitment_update, order, pending_forwards, pending_failures, funding_broadcastable, funding_locked) = channel.get_mut().monitor_updating_restored(&self.logger);
+                       let channel_update = if funding_locked.is_some() && channel.get().is_usable() && !channel.get().should_announce() {
+                               // We only send a channel_update in the case where we are just now sending a
+                               // funding_locked and the channel is in a usable state. Further, we rely on the
+                               // normal announcement_signatures process to send a channel_update for public
+                               // channels, only generating a unicast channel_update if this is a private channel.
+                               Some(events::MessageSendEvent::SendChannelUpdate {
+                                       node_id: channel.get().get_counterparty_node_id(),
+                                       msg: self.get_channel_update_for_unicast(channel.get()).unwrap(),
+                               })
+                       } else { None };
+                       chan_restoration_res = handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, raa, commitment_update, order, None, pending_forwards, funding_broadcastable, funding_locked);
+                       if let Some(upd) = channel_update {
+                               channel_state.pending_msg_events.push(upd);
                        }
-                       htlc_failures.append(&mut pending_failures);
-
-                       macro_rules! handle_cs { () => {
-                               if let Some(update) = commitment_update {
-                                       pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
-                                               node_id: channel.get_counterparty_node_id(),
-                                               updates: update,
-                                       });
-                               }
-                       } }
-                       macro_rules! handle_raa { () => {
-                               if let Some(revoke_and_ack) = raa {
-                                       pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
-                                               node_id: channel.get_counterparty_node_id(),
-                                               msg: revoke_and_ack,
-                                       });
-                               }
-                       } }
-                       match order {
-                               RAACommitmentOrder::CommitmentFirst => {
-                                       handle_cs!();
-                                       handle_raa!();
-                               },
-                               RAACommitmentOrder::RevokeAndACKFirst => {
-                                       handle_raa!();
-                                       handle_cs!();
-                               },
-                       }
-                       if let Some(tx) = funding_broadcastable {
-                               log_info!(self.logger, "Broadcasting funding transaction with txid {}", tx.txid());
-                               self.tx_broadcaster.broadcast_transaction(&tx);
-                       }
-                       if let Some(msg) = funding_locked {
-                               pending_msg_events.push(events::MessageSendEvent::SendFundingLocked {
-                                       node_id: channel.get_counterparty_node_id(),
-                                       msg,
-                               });
-                               if let Some(announcement_sigs) = self.get_announcement_sigs(channel) {
-                                       pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
-                                               node_id: channel.get_counterparty_node_id(),
-                                               msg: announcement_sigs,
-                                       });
-                               }
-                               short_to_id.insert(channel.get_short_channel_id().unwrap(), channel.channel_id());
-                       }
-               }
-
-               self.pending_events.lock().unwrap().append(&mut pending_events);
-
-               for failure in htlc_failures.drain(..) {
-                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
-               }
-               self.forward_htlcs(&mut htlc_forwards[..]);
-
-               for res in close_results.drain(..) {
-                       self.finish_force_close_channel(res);
-               }
-       }
+                       pending_failures
+               };
+               post_handle_chan_restoration!(self, chan_restoration_res);
+               for failure in pending_failures.drain(..) {
+                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
+               }
+       }
 
        fn internal_open_channel(&self, counterparty_node_id: &PublicKey, their_features: InitFeatures, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> {
                if msg.chain_hash != self.genesis_hash {
@@ -2813,7 +3121,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                if chan.get().get_counterparty_node_id() != *counterparty_node_id {
                                        return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                }
-                               try_chan_entry!(self, chan.get_mut().funding_locked(&msg), channel_state, chan);
+                               try_chan_entry!(self, chan.get_mut().funding_locked(&msg, &self.logger), channel_state, chan);
                                if let Some(announcement_sigs) = self.get_announcement_sigs(chan.get()) {
                                        log_trace!(self.logger, "Sending announcement_signatures for {} in response to funding_locked", log_bytes!(chan.get().channel_id()));
                                        // If we see locking block before receiving remote funding_locked, we broadcast our
@@ -2829,6 +3137,11 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                node_id: counterparty_node_id.clone(),
                                                msg: announcement_sigs,
                                        });
+                               } else if chan.get().is_usable() {
+                                       channel_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
+                                               node_id: counterparty_node_id.clone(),
+                                               msg: self.get_channel_update_for_unicast(chan.get()).unwrap(),
+                                       });
                                }
                                Ok(())
                        },
@@ -2873,7 +3186,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
                }
                if let Some(chan) = chan_option {
-                       if let Ok(update) = self.get_channel_update(&chan) {
+                       if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
                                let mut channel_state = self.channel_state.lock().unwrap();
                                channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                        msg: update
@@ -2919,7 +3232,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        self.tx_broadcaster.broadcast_transaction(&broadcast_tx);
                }
                if let Some(chan) = chan_option {
-                       if let Ok(update) = self.get_channel_update(&chan) {
+                       if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
                                let mut channel_state = self.channel_state.lock().unwrap();
                                channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                        msg: update
@@ -2957,7 +3270,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        // want to reject the new HTLC and fail it backwards instead of forwarding.
                                        match pending_forward_info {
                                                PendingHTLCStatus::Forward(PendingHTLCInfo { ref incoming_shared_secret, .. }) => {
-                                                       let reason = if let Ok(upd) = self.get_channel_update(chan) {
+                                                       let reason = if let Ok(upd) = self.get_channel_update_for_unicast(chan) {
                                                                onion_utils::build_first_hop_failure_packet(incoming_shared_secret, error_code, &{
                                                                        let mut res = Vec::with_capacity(8 + 128);
                                                                        // TODO: underspecified, follow https://github.com/lightningnetwork/lightning-rfc/issues/791
@@ -3111,6 +3424,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        match channel_state.forward_htlcs.entry(match forward_info.routing {
                                                        PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id,
                                                        PendingHTLCRouting::Receive { .. } => 0,
+                                                       PendingHTLCRouting::ReceiveKeysend { .. } => 0,
                                        }) {
                                                hash_map::Entry::Occupied(mut entry) => {
                                                        entry.get_mut().push(HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_funding_outpoint,
@@ -3217,40 +3531,11 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it".to_owned(), action: msgs::ErrorAction::IgnoreError}));
                                }
 
-                               let our_node_id = self.get_our_node_id();
-                               let (announcement, our_bitcoin_sig) =
-                                       try_chan_entry!(self, chan.get_mut().get_channel_announcement(our_node_id.clone(), self.genesis_hash.clone()), channel_state, chan);
-
-                               let were_node_one = announcement.node_id_1 == our_node_id;
-                               let msghash = hash_to_message!(&Sha256dHash::hash(&announcement.encode()[..])[..]);
-                               {
-                                       let their_node_key = if were_node_one { &announcement.node_id_2 } else { &announcement.node_id_1 };
-                                       let their_bitcoin_key = if were_node_one { &announcement.bitcoin_key_2 } else { &announcement.bitcoin_key_1 };
-                                       match (self.secp_ctx.verify(&msghash, &msg.node_signature, their_node_key),
-                                                  self.secp_ctx.verify(&msghash, &msg.bitcoin_signature, their_bitcoin_key)) {
-                                               (Err(e), _) => {
-                                                       let chan_err: ChannelError = ChannelError::Close(format!("Bad announcement_signatures. Failed to verify node_signature: {:?}. Maybe using different node_secret for transport and routing msg? UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}", e, &announcement, their_node_key));
-                                                       try_chan_entry!(self, Err(chan_err), channel_state, chan);
-                                               },
-                                               (_, Err(e)) => {
-                                                       let chan_err: ChannelError = ChannelError::Close(format!("Bad announcement_signatures. Failed to verify bitcoin_signature: {:?}. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})", e, &announcement, their_bitcoin_key));
-                                                       try_chan_entry!(self, Err(chan_err), channel_state, chan);
-                                               },
-                                               _ => {}
-                                       }
-                               }
-
-                               let our_node_sig = self.secp_ctx.sign(&msghash, &self.our_network_key);
-
                                channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
-                                       msg: msgs::ChannelAnnouncement {
-                                               node_signature_1: if were_node_one { our_node_sig } else { msg.node_signature },
-                                               node_signature_2: if were_node_one { msg.node_signature } else { our_node_sig },
-                                               bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { msg.bitcoin_signature },
-                                               bitcoin_signature_2: if were_node_one { msg.bitcoin_signature } else { our_bitcoin_sig },
-                                               contents: announcement,
-                                       },
-                                       update_msg: self.get_channel_update(chan.get()).unwrap(), // can only fail if we're not in a ready state
+                                       msg: try_chan_entry!(self, chan.get_mut().announcement_signatures(&self.our_network_key, self.get_our_node_id(), self.genesis_hash.clone(), msg), channel_state, chan),
+                                       // Note that announcement_signatures fails if the channel cannot be announced,
+                                       // so get_channel_update_for_broadcast will never fail by the time we get here.
+                                       update_msg: self.get_channel_update_for_broadcast(chan.get()).unwrap(),
                                });
                        },
                        hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
@@ -3258,101 +3543,90 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                Ok(())
        }
 
-       fn internal_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) -> Result<(), MsgHandleErrInternal> {
+       /// Returns ShouldPersist if anything changed, otherwise either SkipPersist or an Err.
+       fn internal_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) -> Result<NotifyOption, MsgHandleErrInternal> {
                let mut channel_state_lock = self.channel_state.lock().unwrap();
                let channel_state = &mut *channel_state_lock;
                let chan_id = match channel_state.short_to_id.get(&msg.contents.short_channel_id) {
                        Some(chan_id) => chan_id.clone(),
                        None => {
                                // It's not a local channel
-                               return Ok(())
+                               return Ok(NotifyOption::SkipPersist)
                        }
                };
                match channel_state.by_id.entry(chan_id) {
                        hash_map::Entry::Occupied(mut chan) => {
                                if chan.get().get_counterparty_node_id() != *counterparty_node_id {
-                                       // TODO: see issue #153, need a consistent behavior on obnoxious behavior from random node
-                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), chan_id));
+                                       if chan.get().should_announce() {
+                                               // If the announcement is about a channel of ours which is public, some
+                                               // other peer may simply be forwarding all its gossip to us. Don't provide
+                                               // a scary-looking error message and return Ok instead.
+                                               return Ok(NotifyOption::SkipPersist);
+                                       }
+                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id));
+                               }
+                               let were_node_one = self.get_our_node_id().serialize()[..] < chan.get().get_counterparty_node_id().serialize()[..];
+                               let msg_from_node_one = msg.contents.flags & 1 == 0;
+                               if were_node_one == msg_from_node_one {
+                                       return Ok(NotifyOption::SkipPersist);
+                               } else {
+                                       try_chan_entry!(self, chan.get_mut().channel_update(&msg), channel_state, chan);
                                }
-                               try_chan_entry!(self, chan.get_mut().channel_update(&msg), channel_state, chan);
                        },
                        hash_map::Entry::Vacant(_) => unreachable!()
                }
-               Ok(())
+               Ok(NotifyOption::DoPersist)
        }
 
        fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> {
-               let mut channel_state_lock = self.channel_state.lock().unwrap();
-               let channel_state = &mut *channel_state_lock;
+               let chan_restoration_res;
+               let (htlcs_failed_forward, need_lnd_workaround) = {
+                       let mut channel_state_lock = self.channel_state.lock().unwrap();
+                       let channel_state = &mut *channel_state_lock;
 
-               match channel_state.by_id.entry(msg.channel_id) {
-                       hash_map::Entry::Occupied(mut chan) => {
-                               if chan.get().get_counterparty_node_id() != *counterparty_node_id {
-                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
-                               }
-                               // Currently, we expect all holding cell update_adds to be dropped on peer
-                               // disconnect, so Channel's reestablish will never hand us any holding cell
-                               // freed HTLCs to fail backwards. If in the future we no longer drop pending
-                               // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here.
-                               let (funding_locked, revoke_and_ack, commitment_update, monitor_update_opt, mut order, shutdown) =
-                                       try_chan_entry!(self, chan.get_mut().channel_reestablish(msg, &self.logger), channel_state, chan);
-                               if let Some(monitor_update) = monitor_update_opt {
-                                       if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
-                                               // channel_reestablish doesn't guarantee the order it returns is sensical
-                                               // for the messages it returns, but if we're setting what messages to
-                                               // re-transmit on monitor update success, we need to make sure it is sane.
-                                               if revoke_and_ack.is_none() {
-                                                       order = RAACommitmentOrder::CommitmentFirst;
-                                               }
-                                               if commitment_update.is_none() {
-                                                       order = RAACommitmentOrder::RevokeAndACKFirst;
-                                               }
-                                               return_monitor_err!(self, e, channel_state, chan, order, revoke_and_ack.is_some(), commitment_update.is_some());
-                                               //TODO: Resend the funding_locked if needed once we get the monitor running again
+                       match channel_state.by_id.entry(msg.channel_id) {
+                               hash_map::Entry::Occupied(mut chan) => {
+                                       if chan.get().get_counterparty_node_id() != *counterparty_node_id {
+                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                        }
-                               }
-                               if let Some(msg) = funding_locked {
-                                       channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingLocked {
-                                               node_id: counterparty_node_id.clone(),
-                                               msg
-                                       });
-                               }
-                               macro_rules! send_raa { () => {
-                                       if let Some(msg) = revoke_and_ack {
-                                               channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
+                                       // Currently, we expect all holding cell update_adds to be dropped on peer
+                                       // disconnect, so Channel's reestablish will never hand us any holding cell
+                                       // freed HTLCs to fail backwards. If in the future we no longer drop pending
+                                       // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here.
+                                       let (funding_locked, revoke_and_ack, commitment_update, monitor_update_opt, order, htlcs_failed_forward, shutdown) =
+                                               try_chan_entry!(self, chan.get_mut().channel_reestablish(msg, &self.logger), channel_state, chan);
+                                       let mut channel_update = None;
+                                       if let Some(msg) = shutdown {
+                                               channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
                                                        node_id: counterparty_node_id.clone(),
-                                                       msg
+                                                       msg,
                                                });
-                                       }
-                               } }
-                               macro_rules! send_cu { () => {
-                                       if let Some(updates) = commitment_update {
-                                               channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
-                                                       node_id: counterparty_node_id.clone(),
-                                                       updates
+                                       } else if chan.get().is_usable() {
+                                               // If the channel is in a usable state (ie the channel is not being shut
+                                               // down), send a unicast channel_update to our counterparty to make sure
+                                               // they have the latest channel parameters.
+                                               channel_update = Some(events::MessageSendEvent::SendChannelUpdate {
+                                                       node_id: chan.get().get_counterparty_node_id(),
+                                                       msg: self.get_channel_update_for_unicast(chan.get()).unwrap(),
                                                });
                                        }
-                               } }
-                               match order {
-                                       RAACommitmentOrder::RevokeAndACKFirst => {
-                                               send_raa!();
-                                               send_cu!();
-                                       },
-                                       RAACommitmentOrder::CommitmentFirst => {
-                                               send_cu!();
-                                               send_raa!();
-                                       },
-                               }
-                               if let Some(msg) = shutdown {
-                                       channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
-                                               node_id: counterparty_node_id.clone(),
-                                               msg,
-                                       });
-                               }
-                               Ok(())
-                       },
-                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
+                                       let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take();
+                                       chan_restoration_res = handle_chan_restoration_locked!(self, channel_state_lock, channel_state, chan, revoke_and_ack, commitment_update, order, monitor_update_opt, Vec::new(), None, funding_locked);
+                                       if let Some(upd) = channel_update {
+                                               channel_state.pending_msg_events.push(upd);
+                                       }
+                                       (htlcs_failed_forward, need_lnd_workaround)
+                               },
+                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
+                       }
+               };
+               post_handle_chan_restoration!(self, chan_restoration_res);
+               self.fail_holding_cell_htlcs(htlcs_failed_forward, msg.channel_id);
+
+               if let Some(funding_locked_msg) = need_lnd_workaround {
+                       self.internal_funding_locked(counterparty_node_id, &funding_locked_msg)?;
                }
+               Ok(())
        }
 
        /// Begin Update fee process. Allowed only on an outbound channel.
@@ -3387,6 +3661,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                if let Err(_e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
                                                        unimplemented!();
                                                }
+                                               log_debug!(self.logger, "Updating fee resulted in a commitment_signed for channel {}", log_bytes!(chan.get().channel_id()));
                                                channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
                                                        node_id: chan.get().get_counterparty_node_id(),
                                                        updates: msgs::CommitmentUpdate {
@@ -3410,52 +3685,115 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                }
        }
 
-       /// Process pending events from the `chain::Watch`.
-       fn process_pending_monitor_events(&self) {
+       /// Process pending events from the `chain::Watch`, returning whether any events were processed.
+       fn process_pending_monitor_events(&self) -> bool {
                let mut failed_channels = Vec::new();
+               let pending_monitor_events = self.chain_monitor.release_pending_monitor_events();
+               let has_pending_monitor_events = !pending_monitor_events.is_empty();
+               for monitor_event in pending_monitor_events {
+                       match monitor_event {
+                               MonitorEvent::HTLCEvent(htlc_update) => {
+                                       if let Some(preimage) = htlc_update.payment_preimage {
+                                               log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
+                                               self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage);
+                                       } else {
+                                               log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
+                                               self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+                                       }
+                               },
+                               MonitorEvent::CommitmentTxBroadcasted(funding_outpoint) => {
+                                       let mut channel_lock = self.channel_state.lock().unwrap();
+                                       let channel_state = &mut *channel_lock;
+                                       let by_id = &mut channel_state.by_id;
+                                       let short_to_id = &mut channel_state.short_to_id;
+                                       let pending_msg_events = &mut channel_state.pending_msg_events;
+                                       if let Some(mut chan) = by_id.remove(&funding_outpoint.to_channel_id()) {
+                                               if let Some(short_id) = chan.get_short_channel_id() {
+                                                       short_to_id.remove(&short_id);
+                                               }
+                                               failed_channels.push(chan.force_shutdown(false));
+                                               if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
+                                                       pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                                               msg: update
+                                                       });
+                                               }
+                                               pending_msg_events.push(events::MessageSendEvent::HandleError {
+                                                       node_id: chan.get_counterparty_node_id(),
+                                                       action: msgs::ErrorAction::SendErrorMessage {
+                                                               msg: msgs::ErrorMessage { channel_id: chan.channel_id(), data: "Channel force-closed".to_owned() }
+                                                       },
+                                               });
+                                       }
+                               },
+                       }
+               }
+
+               for failure in failed_channels.drain(..) {
+                       self.finish_force_close_channel(failure);
+               }
+
+               has_pending_monitor_events
+       }
+
+       /// Check the holding cell in each channel and free any pending HTLCs in them if possible.
+       /// Returns whether there were any updates such as if pending HTLCs were freed or a monitor
+       /// update was applied.
+       ///
+       /// This should only apply to HTLCs which were added to the holding cell because we were
+       /// waiting on a monitor update to finish. In that case, we don't want to free the holding cell
+       /// directly in `channel_monitor_updated` as it may introduce deadlocks calling back into user
+       /// code to inform them of a channel monitor update.
+       fn check_free_holding_cells(&self) -> bool {
+               let mut has_monitor_update = false;
+               let mut failed_htlcs = Vec::new();
+               let mut handle_errors = Vec::new();
                {
-                       for monitor_event in self.chain_monitor.release_pending_monitor_events() {
-                               match monitor_event {
-                                       MonitorEvent::HTLCEvent(htlc_update) => {
-                                               if let Some(preimage) = htlc_update.payment_preimage {
-                                                       log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
-                                                       self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage);
-                                               } else {
-                                                       log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
-                                                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+                       let mut channel_state_lock = self.channel_state.lock().unwrap();
+                       let channel_state = &mut *channel_state_lock;
+                       let by_id = &mut channel_state.by_id;
+                       let short_to_id = &mut channel_state.short_to_id;
+                       let pending_msg_events = &mut channel_state.pending_msg_events;
+
+                       by_id.retain(|channel_id, chan| {
+                               match chan.maybe_free_holding_cell_htlcs(&self.logger) {
+                                       Ok((commitment_opt, holding_cell_failed_htlcs)) => {
+                                               if !holding_cell_failed_htlcs.is_empty() {
+                                                       failed_htlcs.push((holding_cell_failed_htlcs, *channel_id));
                                                }
-                                       },
-                                       MonitorEvent::CommitmentTxBroadcasted(funding_outpoint) => {
-                                               let mut channel_lock = self.channel_state.lock().unwrap();
-                                               let channel_state = &mut *channel_lock;
-                                               let by_id = &mut channel_state.by_id;
-                                               let short_to_id = &mut channel_state.short_to_id;
-                                               let pending_msg_events = &mut channel_state.pending_msg_events;
-                                               if let Some(mut chan) = by_id.remove(&funding_outpoint.to_channel_id()) {
-                                                       if let Some(short_id) = chan.get_short_channel_id() {
-                                                               short_to_id.remove(&short_id);
-                                                       }
-                                                       failed_channels.push(chan.force_shutdown(false));
-                                                       if let Ok(update) = self.get_channel_update(&chan) {
-                                                               pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
-                                                                       msg: update
+                                               if let Some((commitment_update, monitor_update)) = commitment_opt {
+                                                       if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) {
+                                                               has_monitor_update = true;
+                                                               let (res, close_channel) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, false, true, Vec::new(), Vec::new(), channel_id);
+                                                               handle_errors.push((chan.get_counterparty_node_id(), res));
+                                                               if close_channel { return false; }
+                                                       } else {
+                                                               pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+                                                                       node_id: chan.get_counterparty_node_id(),
+                                                                       updates: commitment_update,
                                                                });
                                                        }
-                                                       pending_msg_events.push(events::MessageSendEvent::HandleError {
-                                                               node_id: chan.get_counterparty_node_id(),
-                                                               action: msgs::ErrorAction::SendErrorMessage {
-                                                                       msg: msgs::ErrorMessage { channel_id: chan.channel_id(), data: "Channel force-closed".to_owned() }
-                                                               },
-                                                       });
                                                }
+                                               true
                                        },
+                                       Err(e) => {
+                                               let (close_channel, res) = convert_chan_err!(self, e, short_to_id, chan, channel_id);
+                                               handle_errors.push((chan.get_counterparty_node_id(), Err(res)));
+                                               !close_channel
+                                       }
                                }
-                       }
+                       });
                }
 
-               for failure in failed_channels.drain(..) {
-                       self.finish_force_close_channel(failure);
+               let has_update = has_monitor_update || !failed_htlcs.is_empty();
+               for (failures, channel_id) in failed_htlcs.drain(..) {
+                       self.fail_holding_cell_htlcs(failures, channel_id);
+               }
+
+               for (counterparty_node_id, err) in handle_errors.drain(..) {
+                       let _ = handle_error!(self, err, counterparty_node_id);
                }
+
+               has_update
        }
 
        /// Handle a list of channel failures during a block_connected or block_disconnected call,
@@ -3542,7 +3880,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        /// The [`PaymentHash`] (and corresponding [`PaymentPreimage`]) must be globally unique. This
        /// method may return an Err if another payment with the same payment_hash is still pending.
        ///
-       /// `user_payment_id` will be provided back in [`PaymentReceived::user_payment_id`] events to
+       /// `user_payment_id` will be provided back in [`PaymentPurpose::InvoicePayment::user_payment_id`] events to
        /// allow tracking of which events correspond with which calls to this and
        /// [`create_inbound_payment`]. `user_payment_id` has no meaning inside of LDK, it is simply
        /// copied to events and otherwise ignored. It may be used to correlate PaymentReceived events
@@ -3576,10 +3914,18 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        ///
        /// [`create_inbound_payment`]: Self::create_inbound_payment
        /// [`PaymentReceived`]: events::Event::PaymentReceived
-       /// [`PaymentReceived::user_payment_id`]: events::Event::PaymentReceived::user_payment_id
+       /// [`PaymentPurpose::InvoicePayment::user_payment_id`]: events::PaymentPurpose::InvoicePayment::user_payment_id
        pub fn create_inbound_payment_for_hash(&self, payment_hash: PaymentHash, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32, user_payment_id: u64) -> Result<PaymentSecret, APIError> {
                self.set_payment_hash_secret_map(payment_hash, None, min_value_msat, invoice_expiry_delta_secs, user_payment_id)
        }
+
+       #[cfg(any(test, feature = "fuzztarget", feature = "_test_utils"))]
+       pub fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
+               let events = core::cell::RefCell::new(Vec::new());
+               let event_handler = |event| events.borrow_mut().push(event);
+               self.process_pending_events(&event_handler);
+               events.into_inner()
+       }
 }
 
 impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> MessageSendEventsProvider for ChannelManager<Signer, M, T, K, F, L>
@@ -3590,33 +3936,71 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> MessageSend
                                L::Target: Logger,
 {
        fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
-               //TODO: This behavior should be documented. It's non-intuitive that we query
-               // ChannelMonitors when clearing other events.
-               self.process_pending_monitor_events();
+               let events = RefCell::new(Vec::new());
+               PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
+                       let mut result = NotifyOption::SkipPersist;
 
-               let mut ret = Vec::new();
-               let mut channel_state = self.channel_state.lock().unwrap();
-               mem::swap(&mut ret, &mut channel_state.pending_msg_events);
-               ret
+                       // TODO: This behavior should be documented. It's unintuitive that we query
+                       // ChannelMonitors when clearing other events.
+                       if self.process_pending_monitor_events() {
+                               result = NotifyOption::DoPersist;
+                       }
+
+                       if self.check_free_holding_cells() {
+                               result = NotifyOption::DoPersist;
+                       }
+
+                       let mut pending_events = Vec::new();
+                       let mut channel_state = self.channel_state.lock().unwrap();
+                       mem::swap(&mut pending_events, &mut channel_state.pending_msg_events);
+
+                       if !pending_events.is_empty() {
+                               events.replace(pending_events);
+                       }
+
+                       result
+               });
+               events.into_inner()
        }
 }
 
 impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> EventsProvider for ChannelManager<Signer, M, T, K, F, L>
-       where M::Target: chain::Watch<Signer>,
-        T::Target: BroadcasterInterface,
-        K::Target: KeysInterface<Signer = Signer>,
-        F::Target: FeeEstimator,
-                               L::Target: Logger,
+where
+       M::Target: chain::Watch<Signer>,
+       T::Target: BroadcasterInterface,
+       K::Target: KeysInterface<Signer = Signer>,
+       F::Target: FeeEstimator,
+       L::Target: Logger,
 {
-       fn get_and_clear_pending_events(&self) -> Vec<Event> {
-               //TODO: This behavior should be documented. It's non-intuitive that we query
-               // ChannelMonitors when clearing other events.
-               self.process_pending_monitor_events();
+       /// Processes events that must be periodically handled.
+       ///
+       /// An [`EventHandler`] may safely call back to the provider in order to handle an event.
+       /// However, it must not call [`Writeable::write`] as doing so would result in a deadlock.
+       ///
+       /// Pending events are persisted as part of [`ChannelManager`]. While these events are cleared
+       /// when processed, an [`EventHandler`] must be able to handle previously seen events when
+       /// restarting from an old state.
+       fn process_pending_events<H: Deref>(&self, handler: H) where H::Target: EventHandler {
+               PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
+                       let mut result = NotifyOption::SkipPersist;
 
-               let mut ret = Vec::new();
-               let mut pending_events = self.pending_events.lock().unwrap();
-               mem::swap(&mut ret, &mut *pending_events);
-               ret
+                       // TODO: This behavior should be documented. It's unintuitive that we query
+                       // ChannelMonitors when clearing other events.
+                       if self.process_pending_monitor_events() {
+                               result = NotifyOption::DoPersist;
+                       }
+
+                       let mut pending_events = std::mem::replace(&mut *self.pending_events.lock().unwrap(), vec![]);
+                       if !pending_events.is_empty() {
+                               result = NotifyOption::DoPersist;
+                       }
+
+                       for event in pending_events.drain(..) {
+                               handler.handle_event(event);
+                       }
+
+                       result
+               });
        }
 }
 
@@ -3654,7 +4038,7 @@ where
                        *best_block = BestBlock::new(header.prev_blockhash, new_height)
                }
 
-               self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time));
+               self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time, &self.logger));
        }
 }
 
@@ -3690,7 +4074,7 @@ where
 
                *self.best_block.write().unwrap() = BestBlock::new(block_hash, height);
 
-               self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time));
+               self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, &self.logger));
 
                macro_rules! max_time {
                        ($timestamp: expr) => {
@@ -3732,7 +4116,7 @@ where
                self.do_chain_event(None, |channel| {
                        if let Some(funding_txo) = channel.get_funding_txo() {
                                if funding_txo.txid == *txid {
-                                       channel.funding_transaction_unconfirmed().map(|_| (None, Vec::new()))
+                                       channel.funding_transaction_unconfirmed(&self.logger).map(|_| (None, Vec::new()))
                                } else { Ok((None, Vec::new())) }
                        } else { Ok((None, Vec::new())) }
                });
@@ -3767,7 +4151,7 @@ where
                                let res = f(channel);
                                if let Ok((chan_res, mut timed_out_pending_htlcs)) = res {
                                        for (source, payment_hash) in timed_out_pending_htlcs.drain(..) {
-                                               let chan_update = self.get_channel_update(&channel).map(|u| u.encode_with_len()).unwrap(); // Cannot add/recv HTLCs before we have a short_id so unwrap is safe
+                                               let chan_update = self.get_channel_update_for_unicast(&channel).map(|u| u.encode_with_len()).unwrap(); // Cannot add/recv HTLCs before we have a short_id so unwrap is safe
                                                timed_out_htlcs.push((source, payment_hash,  HTLCFailReason::Reason {
                                                        failure_code: 0x1000 | 14, // expiry_too_soon, or at least it is now
                                                        data: chan_update,
@@ -3784,6 +4168,12 @@ where
                                                                node_id: channel.get_counterparty_node_id(),
                                                                msg: announcement_sigs,
                                                        });
+                                               } else if channel.is_usable() {
+                                                       log_trace!(self.logger, "Sending funding_locked WITHOUT announcement_signatures but with private channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id()));
+                                                       pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
+                                                               node_id: channel.get_counterparty_node_id(),
+                                                               msg: self.get_channel_update_for_unicast(channel).unwrap(),
+                                                       });
                                                } else {
                                                        log_trace!(self.logger, "Sending funding_locked WITHOUT announcement_signatures for {}", log_bytes!(channel.channel_id()));
                                                }
@@ -3796,7 +4186,7 @@ where
                                        // It looks like our counterparty went on-chain or funding transaction was
                                        // reorged out of the main chain. Close the channel.
                                        failed_channels.push(channel.force_shutdown(true));
-                                       if let Ok(update) = self.get_channel_update(&channel) {
+                                       if let Ok(update) = self.get_channel_update_for_broadcast(&channel) {
                                                pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                        msg: update
                                                });
@@ -3863,6 +4253,12 @@ where
                let guard = mtx.lock().unwrap();
                *guard
        }
+
+       /// Gets the latest best block which was connected either via the [`chain::Listen`] or
+       /// [`chain::Confirm`] interfaces.
+       pub fn current_best_block(&self) -> BestBlock {
+               self.best_block.read().unwrap().clone()
+       }
 }
 
 impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
@@ -3949,8 +4345,13 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
        }
 
        fn handle_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
-               let _ = handle_error!(self, self.internal_channel_update(counterparty_node_id, msg), *counterparty_node_id);
+               PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
+                       if let Ok(persist) = handle_error!(self, self.internal_channel_update(counterparty_node_id, msg), *counterparty_node_id) {
+                               persist
+                       } else {
+                               NotifyOption::SkipPersist
+                       }
+               });
        }
 
        fn handle_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) {
@@ -3961,7 +4362,6 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
        fn peer_disconnected(&self, counterparty_node_id: &PublicKey, no_connection_possible: bool) {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
                let mut failed_channels = Vec::new();
-               let mut failed_payments = Vec::new();
                let mut no_channels_remain = true;
                {
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
@@ -3976,7 +4376,7 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
                                                        short_to_id.remove(&short_id);
                                                }
                                                failed_channels.push(chan.force_shutdown(true));
-                                               if let Ok(update) = self.get_channel_update(&chan) {
+                                               if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
                                                        pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                                msg: update
                                                        });
@@ -3990,15 +4390,7 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
                                log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates", log_pubkey!(counterparty_node_id));
                                channel_state.by_id.retain(|_, chan| {
                                        if chan.get_counterparty_node_id() == *counterparty_node_id {
-                                               // Note that currently on channel reestablish we assert that there are no
-                                               // holding cell add-HTLCs, so if in the future we stop removing uncommitted HTLCs
-                                               // on peer disconnect here, there will need to be corresponding changes in
-                                               // reestablish logic.
-                                               let failed_adds = chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
-                                               if !failed_adds.is_empty() {
-                                                       let chan_update = self.get_channel_update(&chan).map(|u| u.encode_with_len()).unwrap(); // Cannot add/recv HTLCs before we have a short_id so unwrap is safe
-                                                       failed_payments.push((chan_update, failed_adds));
-                                               }
+                                               chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
                                                if chan.is_shutdown() {
                                                        if let Some(short_id) = chan.get_short_channel_id() {
                                                                short_to_id.remove(&short_id);
@@ -4027,6 +4419,7 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
                                        &events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true,
                                        &events::MessageSendEvent::BroadcastNodeAnnouncement { .. } => true,
                                        &events::MessageSendEvent::BroadcastChannelUpdate { .. } => true,
+                                       &events::MessageSendEvent::SendChannelUpdate { ref node_id, .. } => node_id != counterparty_node_id,
                                        &events::MessageSendEvent::HandleError { ref node_id, .. } => node_id != counterparty_node_id,
                                        &events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => true,
                                        &events::MessageSendEvent::SendChannelRangeQuery { .. } => false,
@@ -4042,11 +4435,6 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
                for failure in failed_channels.drain(..) {
                        self.finish_force_close_channel(failure);
                }
-               for (chan_update, mut htlc_sources) in failed_payments {
-                       for (htlc_source, payment_hash) in htlc_sources.drain(..) {
-                               self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x1000 | 7, data: chan_update.clone() });
-                       }
-               }
        }
 
        fn peer_connected(&self, counterparty_node_id: &PublicKey, init_msg: &msgs::Init) {
@@ -4096,7 +4484,7 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
 
                if msg.channel_id == [0; 32] {
                        for chan in self.list_channels() {
-                               if chan.remote_network_id == *counterparty_node_id {
+                               if chan.counterparty.node_id == *counterparty_node_id {
                                        // Untrusted messages from peer, we throw away the error if id points to a non-existent channel
                                        let _ = self.force_close_channel_with_peer(&chan.channel_id, Some(counterparty_node_id));
                                }
@@ -4182,243 +4570,141 @@ impl PersistenceNotifier {
 const SERIALIZATION_VERSION: u8 = 1;
 const MIN_SERIALIZATION_VERSION: u8 = 1;
 
-impl Writeable for PendingHTLCInfo {
-       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
-               match &self.routing {
-                       &PendingHTLCRouting::Forward { ref onion_packet, ref short_channel_id } => {
-                               0u8.write(writer)?;
-                               onion_packet.write(writer)?;
-                               short_channel_id.write(writer)?;
-                       },
-                       &PendingHTLCRouting::Receive { ref payment_data, ref incoming_cltv_expiry } => {
-                               1u8.write(writer)?;
-                               payment_data.payment_secret.write(writer)?;
-                               payment_data.total_msat.write(writer)?;
-                               incoming_cltv_expiry.write(writer)?;
-                       },
-               }
-               self.incoming_shared_secret.write(writer)?;
-               self.payment_hash.write(writer)?;
-               self.amt_to_forward.write(writer)?;
-               self.outgoing_cltv_value.write(writer)?;
-               Ok(())
-       }
-}
-
-impl Readable for PendingHTLCInfo {
-       fn read<R: ::std::io::Read>(reader: &mut R) -> Result<PendingHTLCInfo, DecodeError> {
-               Ok(PendingHTLCInfo {
-                       routing: match Readable::read(reader)? {
-                               0u8 => PendingHTLCRouting::Forward {
-                                       onion_packet: Readable::read(reader)?,
-                                       short_channel_id: Readable::read(reader)?,
-                               },
-                               1u8 => PendingHTLCRouting::Receive {
-                                       payment_data: msgs::FinalOnionHopData {
-                                               payment_secret: Readable::read(reader)?,
-                                               total_msat: Readable::read(reader)?,
-                                       },
-                                       incoming_cltv_expiry: Readable::read(reader)?,
-                               },
-                               _ => return Err(DecodeError::InvalidValue),
-                       },
-                       incoming_shared_secret: Readable::read(reader)?,
-                       payment_hash: Readable::read(reader)?,
-                       amt_to_forward: Readable::read(reader)?,
-                       outgoing_cltv_value: Readable::read(reader)?,
-               })
-       }
-}
-
-impl Writeable for HTLCFailureMsg {
-       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
-               match self {
-                       &HTLCFailureMsg::Relay(ref fail_msg) => {
-                               0u8.write(writer)?;
-                               fail_msg.write(writer)?;
-                       },
-                       &HTLCFailureMsg::Malformed(ref fail_msg) => {
-                               1u8.write(writer)?;
-                               fail_msg.write(writer)?;
-                       }
-               }
-               Ok(())
-       }
-}
-
-impl Readable for HTLCFailureMsg {
-       fn read<R: ::std::io::Read>(reader: &mut R) -> Result<HTLCFailureMsg, DecodeError> {
-               match <u8 as Readable>::read(reader)? {
-                       0 => Ok(HTLCFailureMsg::Relay(Readable::read(reader)?)),
-                       1 => Ok(HTLCFailureMsg::Malformed(Readable::read(reader)?)),
-                       _ => Err(DecodeError::InvalidValue),
-               }
-       }
-}
-
-impl Writeable for PendingHTLCStatus {
-       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
-               match self {
-                       &PendingHTLCStatus::Forward(ref forward_info) => {
-                               0u8.write(writer)?;
-                               forward_info.write(writer)?;
-                       },
-                       &PendingHTLCStatus::Fail(ref fail_msg) => {
-                               1u8.write(writer)?;
-                               fail_msg.write(writer)?;
-                       }
-               }
-               Ok(())
-       }
-}
-
-impl Readable for PendingHTLCStatus {
-       fn read<R: ::std::io::Read>(reader: &mut R) -> Result<PendingHTLCStatus, DecodeError> {
-               match <u8 as Readable>::read(reader)? {
-                       0 => Ok(PendingHTLCStatus::Forward(Readable::read(reader)?)),
-                       1 => Ok(PendingHTLCStatus::Fail(Readable::read(reader)?)),
-                       _ => Err(DecodeError::InvalidValue),
-               }
-       }
-}
+impl_writeable_tlv_based_enum!(PendingHTLCRouting,
+       (0, Forward) => {
+               (0, onion_packet, required),
+               (2, short_channel_id, required),
+       },
+       (1, Receive) => {
+               (0, payment_data, required),
+               (2, incoming_cltv_expiry, required),
+       },
+       (2, ReceiveKeysend) => {
+               (0, payment_preimage, required),
+               (2, incoming_cltv_expiry, required),
+       },
+;);
+
+impl_writeable_tlv_based!(PendingHTLCInfo, {
+       (0, routing, required),
+       (2, incoming_shared_secret, required),
+       (4, payment_hash, required),
+       (6, amt_to_forward, required),
+       (8, outgoing_cltv_value, required)
+});
 
-impl_writeable!(HTLCPreviousHopData, 0, {
-       short_channel_id,
-       outpoint,
-       htlc_id,
-       incoming_packet_shared_secret
+impl_writeable_tlv_based_enum!(HTLCFailureMsg, ;
+       (0, Relay),
+       (1, Malformed),
+);
+impl_writeable_tlv_based_enum!(PendingHTLCStatus, ;
+       (0, Forward),
+       (1, Fail),
+);
+
+impl_writeable_tlv_based!(HTLCPreviousHopData, {
+       (0, short_channel_id, required),
+       (2, outpoint, required),
+       (4, htlc_id, required),
+       (6, incoming_packet_shared_secret, required)
 });
 
 impl Writeable for ClaimableHTLC {
        fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
-               self.prev_hop.write(writer)?;
-               self.value.write(writer)?;
-               self.payment_data.payment_secret.write(writer)?;
-               self.payment_data.total_msat.write(writer)?;
-               self.cltv_expiry.write(writer)
-       }
-}
-
-impl Readable for ClaimableHTLC {
-       fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
-               Ok(ClaimableHTLC {
-                       prev_hop: Readable::read(reader)?,
-                       value: Readable::read(reader)?,
-                       payment_data: msgs::FinalOnionHopData {
-                               payment_secret: Readable::read(reader)?,
-                               total_msat: Readable::read(reader)?,
-                       },
-                       cltv_expiry: Readable::read(reader)?,
-               })
-       }
-}
-
-impl Writeable for HTLCSource {
-       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
-               match self {
-                       &HTLCSource::PreviousHopData(ref hop_data) => {
-                               0u8.write(writer)?;
-                               hop_data.write(writer)?;
-                       },
-                       &HTLCSource::OutboundRoute { ref path, ref session_priv, ref first_hop_htlc_msat } => {
-                               1u8.write(writer)?;
-                               path.write(writer)?;
-                               session_priv.write(writer)?;
-                               first_hop_htlc_msat.write(writer)?;
-                       }
-               }
-               Ok(())
-       }
-}
-
-impl Readable for HTLCSource {
-       fn read<R: ::std::io::Read>(reader: &mut R) -> Result<HTLCSource, DecodeError> {
-               match <u8 as Readable>::read(reader)? {
-                       0 => Ok(HTLCSource::PreviousHopData(Readable::read(reader)?)),
-                       1 => Ok(HTLCSource::OutboundRoute {
-                               path: Readable::read(reader)?,
-                               session_priv: Readable::read(reader)?,
-                               first_hop_htlc_msat: Readable::read(reader)?,
-                       }),
-                       _ => Err(DecodeError::InvalidValue),
-               }
-       }
-}
-
-impl Writeable for HTLCFailReason {
-       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
-               match self {
-                       &HTLCFailReason::LightningError { ref err } => {
-                               0u8.write(writer)?;
-                               err.write(writer)?;
-                       },
-                       &HTLCFailReason::Reason { ref failure_code, ref data } => {
-                               1u8.write(writer)?;
-                               failure_code.write(writer)?;
-                               data.write(writer)?;
-                       }
-               }
+               let payment_data = match &self.onion_payload {
+                       OnionPayload::Invoice(data) => Some(data.clone()),
+                       _ => None,
+               };
+               let keysend_preimage = match self.onion_payload {
+                       OnionPayload::Invoice(_) => None,
+                       OnionPayload::Spontaneous(preimage) => Some(preimage.clone()),
+               };
+               write_tlv_fields!
+               (writer,
+                {
+                  (0, self.prev_hop, required), (2, self.value, required),
+                  (4, payment_data, option), (6, self.cltv_expiry, required),
+                        (8, keysend_preimage, option),
+                });
                Ok(())
        }
 }
 
-impl Readable for HTLCFailReason {
-       fn read<R: ::std::io::Read>(reader: &mut R) -> Result<HTLCFailReason, DecodeError> {
-               match <u8 as Readable>::read(reader)? {
-                       0 => Ok(HTLCFailReason::LightningError { err: Readable::read(reader)? }),
-                       1 => Ok(HTLCFailReason::Reason {
-                               failure_code: Readable::read(reader)?,
-                               data: Readable::read(reader)?,
-                       }),
-                       _ => Err(DecodeError::InvalidValue),
-               }
-       }
-}
-
-impl Writeable for HTLCForwardInfo {
-       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
-               match self {
-                       &HTLCForwardInfo::AddHTLC { ref prev_short_channel_id, ref prev_funding_outpoint, ref prev_htlc_id, ref forward_info } => {
-                               0u8.write(writer)?;
-                               prev_short_channel_id.write(writer)?;
-                               prev_funding_outpoint.write(writer)?;
-                               prev_htlc_id.write(writer)?;
-                               forward_info.write(writer)?;
+impl Readable for ClaimableHTLC {
+       fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
+               let mut prev_hop = ::util::ser::OptionDeserWrapper(None);
+               let mut value = 0;
+               let mut payment_data: Option<msgs::FinalOnionHopData> = None;
+               let mut cltv_expiry = 0;
+               let mut keysend_preimage: Option<PaymentPreimage> = None;
+               read_tlv_fields!
+               (reader,
+                {
+                  (0, prev_hop, required), (2, value, required),
+                  (4, payment_data, option), (6, cltv_expiry, required),
+                        (8, keysend_preimage, option)
+                });
+               let onion_payload = match keysend_preimage {
+                       Some(p) => {
+                               if payment_data.is_some() {
+                                       return Err(DecodeError::InvalidValue)
+                               }
+                               OnionPayload::Spontaneous(p)
                        },
-                       &HTLCForwardInfo::FailHTLC { ref htlc_id, ref err_packet } => {
-                               1u8.write(writer)?;
-                               htlc_id.write(writer)?;
-                               err_packet.write(writer)?;
+                       None => {
+                               if payment_data.is_none() {
+                                       return Err(DecodeError::InvalidValue)
+                               }
+                               OnionPayload::Invoice(payment_data.unwrap())
                        },
-               }
-               Ok(())
-       }
-}
-
-impl Readable for HTLCForwardInfo {
-       fn read<R: ::std::io::Read>(reader: &mut R) -> Result<HTLCForwardInfo, DecodeError> {
-               match <u8 as Readable>::read(reader)? {
-                       0 => Ok(HTLCForwardInfo::AddHTLC {
-                               prev_short_channel_id: Readable::read(reader)?,
-                               prev_funding_outpoint: Readable::read(reader)?,
-                               prev_htlc_id: Readable::read(reader)?,
-                               forward_info: Readable::read(reader)?,
-                       }),
-                       1 => Ok(HTLCForwardInfo::FailHTLC {
-                               htlc_id: Readable::read(reader)?,
-                               err_packet: Readable::read(reader)?,
-                       }),
-                       _ => Err(DecodeError::InvalidValue),
-               }
+               };
+               Ok(Self {
+                       prev_hop: prev_hop.0.unwrap(),
+                       value,
+                       onion_payload,
+                       cltv_expiry,
+               })
        }
 }
 
-impl_writeable!(PendingInboundPayment, 0, {
-       payment_secret,
-       expiry_time,
-       user_payment_id,
-       payment_preimage,
-       min_value_msat
+impl_writeable_tlv_based_enum!(HTLCSource,
+       (0, OutboundRoute) => {
+               (0, session_priv, required),
+               (2, first_hop_htlc_msat, required),
+               (4, path, vec_type),
+       }, ;
+       (1, PreviousHopData)
+);
+
+impl_writeable_tlv_based_enum!(HTLCFailReason,
+       (0, LightningError) => {
+               (0, err, required),
+       },
+       (1, Reason) => {
+               (0, failure_code, required),
+               (2, data, vec_type),
+       },
+;);
+
+impl_writeable_tlv_based_enum!(HTLCForwardInfo,
+       (0, AddHTLC) => {
+               (0, forward_info, required),
+               (2, prev_short_channel_id, required),
+               (4, prev_htlc_id, required),
+               (6, prev_funding_outpoint, required),
+       },
+       (1, FailHTLC) => {
+               (0, htlc_id, required),
+               (2, err_packet, required),
+       },
+;);
+
+impl_writeable_tlv_based!(PendingInboundPayment, {
+       (0, payment_secret, required),
+       (2, expiry_time, required),
+       (4, user_payment_id, required),
+       (6, payment_preimage, required),
+       (8, min_value_msat, required),
 });
 
 impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable for ChannelManager<Signer, M, T, K, F, L>
@@ -4431,8 +4717,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable f
        fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
                let _consistency_lock = self.total_consistency_lock.write().unwrap();
 
-               writer.write_all(&[SERIALIZATION_VERSION; 1])?;
-               writer.write_all(&[MIN_SERIALIZATION_VERSION; 1])?;
+               write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
 
                self.genesis_hash.write(writer)?;
                {
@@ -4515,6 +4800,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable f
                        session_priv.write(writer)?;
                }
 
+               write_tlv_fields!(writer, {});
+
                Ok(())
        }
 }
@@ -4638,11 +4925,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
         L::Target: Logger,
 {
        fn read<R: ::std::io::Read>(reader: &mut R, mut args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result<Self, DecodeError> {
-               let _ver: u8 = Readable::read(reader)?;
-               let min_ver: u8 = Readable::read(reader)?;
-               if min_ver > SERIALIZATION_VERSION {
-                       return Err(DecodeError::UnknownVersion);
-               }
+               let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
 
                let genesis_hash: BlockHash = Readable::read(reader)?;
                let best_block_height: u32 = Readable::read(reader)?;
@@ -4664,6 +4947,13 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                                channel.get_cur_counterparty_commitment_transaction_number() < monitor.get_cur_counterparty_commitment_number() ||
                                                channel.get_latest_monitor_update_id() > monitor.get_latest_update_id() {
                                        // If the channel is ahead of the monitor, return InvalidValue:
+                                       log_error!(args.logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!");
+                                       log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
+                                               log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id());
+                                       log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
+                                       log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
+                                       log_error!(args.logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
+                                       log_error!(args.logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/rust-bitcoin/rust-lightning");
                                        return Err(DecodeError::InvalidValue);
                                } else if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() ||
                                                channel.get_revoked_counterparty_commitment_transaction_number() > monitor.get_min_seen_secret() ||
@@ -4680,6 +4970,11 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                        by_id.insert(channel.channel_id(), channel);
                                }
                        } else {
+                               log_error!(args.logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", log_bytes!(channel.channel_id()));
+                               log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
+                               log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
+                               log_error!(args.logger, " Without the ChannelMonitor we cannot continue without risking funds.");
+                               log_error!(args.logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/rust-bitcoin/rust-lightning");
                                return Err(DecodeError::InvalidValue);
                        }
                }
@@ -4762,6 +5057,8 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                        }
                }
 
+               read_tlv_fields!(reader, {});
+
                let mut secp_ctx = Secp256k1::new();
                secp_ctx.seeded_randomize(&args.keys_manager.get_secure_random_bytes());
 
@@ -4815,14 +5112,26 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
 
 #[cfg(test)]
 mod tests {
-       use ln::channelmanager::PersistenceNotifier;
-       use std::sync::Arc;
-       use std::sync::atomic::{AtomicBool, Ordering};
-       use std::thread;
-       use std::time::Duration;
+       use bitcoin::hashes::Hash;
+       use bitcoin::hashes::sha256::Hash as Sha256;
+       use core::time::Duration;
+       use ln::{PaymentPreimage, PaymentHash, PaymentSecret};
+       use ln::features::{InitFeatures, InvoiceFeatures};
+       use ln::functional_test_utils::*;
+       use ln::msgs;
+       use ln::msgs::ChannelMessageHandler;
+       use routing::router::{get_keysend_route, get_route};
+       use util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
+       use util::test_utils;
 
+       #[cfg(feature = "std")]
        #[test]
        fn test_wait_timeout() {
+               use ln::channelmanager::PersistenceNotifier;
+               use sync::Arc;
+               use core::sync::atomic::{AtomicBool, Ordering};
+               use std::thread;
+
                let persistence_notifier = Arc::new(PersistenceNotifier::new());
                let thread_notifier = Arc::clone(&persistence_notifier);
 
@@ -4862,6 +5171,337 @@ mod tests {
                        }
                }
        }
+
+       #[test]
+       fn test_notify_limits() {
+               // Check that a few cases which don't require the persistence of a new ChannelManager,
+               // indeed, do not cause the persistence of a new ChannelManager.
+               let chanmon_cfgs = create_chanmon_cfgs(3);
+               let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+               let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+               let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+               let mut chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+
+               // We check that the channel info nodes have doesn't change too early, even though we try
+               // to connect messages with new values
+               chan.0.contents.fee_base_msat *= 2;
+               chan.1.contents.fee_base_msat *= 2;
+               let node_a_chan_info = nodes[0].node.list_channels()[0].clone();
+               let node_b_chan_info = nodes[1].node.list_channels()[0].clone();
+
+               // The first two nodes (which opened a channel) should now require fresh persistence
+               assert!(nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
+               assert!(nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
+               // ... but the last node should not.
+               assert!(!nodes[2].node.await_persistable_update_timeout(Duration::from_millis(1)));
+               // After persisting the first two nodes they should no longer need fresh persistence.
+               assert!(!nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
+               assert!(!nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
+
+               // Node 3, unrelated to the only channel, shouldn't care if it receives a channel_update
+               // about the channel.
+               nodes[2].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &chan.0);
+               nodes[2].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &chan.1);
+               assert!(!nodes[2].node.await_persistable_update_timeout(Duration::from_millis(1)));
+
+               // The nodes which are a party to the channel should also ignore messages from unrelated
+               // parties.
+               nodes[0].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.0);
+               nodes[0].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.1);
+               nodes[1].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.0);
+               nodes[1].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.1);
+               assert!(!nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
+               assert!(!nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
+
+               // At this point the channel info given by peers should still be the same.
+               assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
+               assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info);
+
+               // An earlier version of handle_channel_update didn't check the directionality of the
+               // update message and would always update the local fee info, even if our peer was
+               // (spuriously) forwarding us our own channel_update.
+               let as_node_one = nodes[0].node.get_our_node_id().serialize()[..] < nodes[1].node.get_our_node_id().serialize()[..];
+               let as_update = if as_node_one == (chan.0.contents.flags & 1 == 0 /* chan.0 is from node one */) { &chan.0 } else { &chan.1 };
+               let bs_update = if as_node_one == (chan.0.contents.flags & 1 == 0 /* chan.0 is from node one */) { &chan.1 } else { &chan.0 };
+
+               // First deliver each peers' own message, checking that the node doesn't need to be
+               // persisted and that its channel info remains the same.
+               nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &as_update);
+               nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &bs_update);
+               assert!(!nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
+               assert!(!nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
+               assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
+               assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info);
+
+               // Finally, deliver the other peers' message, ensuring each node needs to be persisted and
+               // the channel info has updated.
+               nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &bs_update);
+               nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &as_update);
+               assert!(nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
+               assert!(nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
+               assert_ne!(nodes[0].node.list_channels()[0], node_a_chan_info);
+               assert_ne!(nodes[1].node.list_channels()[0], node_b_chan_info);
+       }
+
+       #[test]
+       fn test_keysend_dup_hash_partial_mpp() {
+               // Test that a keysend payment with a duplicate hash to an existing partial MPP payment fails as
+               // expected.
+               let chanmon_cfgs = create_chanmon_cfgs(2);
+               let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+               let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+               create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+               let logger = test_utils::TestLogger::new();
+
+               // First, send a partial MPP payment.
+               let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
+               let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap();
+               let (payment_preimage, our_payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[1]);
+               // Use the utility function send_payment_along_path to send the payment with MPP data which
+               // indicates there are more HTLCs coming.
+               let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match.
+               nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, &None).unwrap();
+               check_added_monitors!(nodes[0], 1);
+               let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(events.len(), 1);
+               pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None);
+
+               // Next, send a keysend payment with the same payment_hash and make sure it fails.
+               nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap();
+               check_added_monitors!(nodes[0], 1);
+               let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(events.len(), 1);
+               let ev = events.drain(..).next().unwrap();
+               let payment_event = SendEvent::from_event(ev);
+               nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
+               check_added_monitors!(nodes[1], 0);
+               commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
+               expect_pending_htlcs_forwardable!(nodes[1]);
+               expect_pending_htlcs_forwardable!(nodes[1]);
+               check_added_monitors!(nodes[1], 1);
+               let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+               assert!(updates.update_add_htlcs.is_empty());
+               assert!(updates.update_fulfill_htlcs.is_empty());
+               assert_eq!(updates.update_fail_htlcs.len(), 1);
+               assert!(updates.update_fail_malformed_htlcs.is_empty());
+               assert!(updates.update_fee.is_none());
+               nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
+               commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
+               expect_payment_failed!(nodes[0], our_payment_hash, true);
+
+               // Send the second half of the original MPP payment.
+               nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, &None).unwrap();
+               check_added_monitors!(nodes[0], 1);
+               let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(events.len(), 1);
+               pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), true, None);
+
+               // Claim the full MPP payment. Note that we can't use a test utility like
+               // claim_funds_along_route because the ordering of the messages causes the second half of the
+               // payment to be put in the holding cell, which confuses the test utilities. So we exchange the
+               // lightning messages manually.
+               assert!(nodes[1].node.claim_funds(payment_preimage));
+               check_added_monitors!(nodes[1], 2);
+               let bs_first_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+               nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_first_updates.update_fulfill_htlcs[0]);
+               nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_first_updates.commitment_signed);
+               check_added_monitors!(nodes[0], 1);
+               let (as_first_raa, as_first_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+               nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_first_raa);
+               check_added_monitors!(nodes[1], 1);
+               let bs_second_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+               nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_first_cs);
+               check_added_monitors!(nodes[1], 1);
+               let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
+               nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_second_updates.update_fulfill_htlcs[0]);
+               nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_updates.commitment_signed);
+               check_added_monitors!(nodes[0], 1);
+               let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+               nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa);
+               let as_second_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+               check_added_monitors!(nodes[0], 1);
+               nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
+               check_added_monitors!(nodes[1], 1);
+               nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_updates.commitment_signed);
+               check_added_monitors!(nodes[1], 1);
+               let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
+               nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_third_raa);
+               check_added_monitors!(nodes[0], 1);
+
+               // There's an existing bug that generates a PaymentSent event for each MPP path, so handle that here.
+               let events = nodes[0].node.get_and_clear_pending_events();
+               match events[0] {
+                       Event::PaymentSent { payment_preimage: ref preimage } => {
+                               assert_eq!(payment_preimage, *preimage);
+                       },
+                       _ => panic!("Unexpected event"),
+               }
+               match events[1] {
+                       Event::PaymentSent { payment_preimage: ref preimage } => {
+                               assert_eq!(payment_preimage, *preimage);
+                       },
+                       _ => panic!("Unexpected event"),
+               }
+       }
+
+       #[test]
+       fn test_keysend_dup_payment_hash() {
+               // (1): Test that a keysend payment with a duplicate payment hash to an existing pending
+               //      outbound regular payment fails as expected.
+               // (2): Test that a regular payment with a duplicate payment hash to an existing keysend payment
+               //      fails as expected.
+               let chanmon_cfgs = create_chanmon_cfgs(2);
+               let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+               let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+               create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+               let logger = test_utils::TestLogger::new();
+
+               // To start (1), send a regular payment but don't claim it.
+               let expected_route = [&nodes[1]];
+               let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &expected_route, 100_000);
+
+               // Next, attempt a keysend payment and make sure it fails.
+               let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph.read().unwrap(), &expected_route.last().unwrap().node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap();
+               nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap();
+               check_added_monitors!(nodes[0], 1);
+               let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(events.len(), 1);
+               let ev = events.drain(..).next().unwrap();
+               let payment_event = SendEvent::from_event(ev);
+               nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
+               check_added_monitors!(nodes[1], 0);
+               commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
+               expect_pending_htlcs_forwardable!(nodes[1]);
+               expect_pending_htlcs_forwardable!(nodes[1]);
+               check_added_monitors!(nodes[1], 1);
+               let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+               assert!(updates.update_add_htlcs.is_empty());
+               assert!(updates.update_fulfill_htlcs.is_empty());
+               assert_eq!(updates.update_fail_htlcs.len(), 1);
+               assert!(updates.update_fail_malformed_htlcs.is_empty());
+               assert!(updates.update_fee.is_none());
+               nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
+               commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
+               expect_payment_failed!(nodes[0], payment_hash, true);
+
+               // Finally, claim the original payment.
+               claim_payment(&nodes[0], &expected_route, payment_preimage);
+
+               // To start (2), send a keysend payment but don't claim it.
+               let payment_preimage = PaymentPreimage([42; 32]);
+               let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph.read().unwrap(), &expected_route.last().unwrap().node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap();
+               let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap();
+               check_added_monitors!(nodes[0], 1);
+               let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(events.len(), 1);
+               let event = events.pop().unwrap();
+               let path = vec![&nodes[1]];
+               pass_along_path(&nodes[0], &path, 100_000, payment_hash, None, event, true, Some(payment_preimage));
+
+               // Next, attempt a regular payment and make sure it fails.
+               let payment_secret = PaymentSecret([43; 32]);
+               nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
+               check_added_monitors!(nodes[0], 1);
+               let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(events.len(), 1);
+               let ev = events.drain(..).next().unwrap();
+               let payment_event = SendEvent::from_event(ev);
+               nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
+               check_added_monitors!(nodes[1], 0);
+               commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
+               expect_pending_htlcs_forwardable!(nodes[1]);
+               expect_pending_htlcs_forwardable!(nodes[1]);
+               check_added_monitors!(nodes[1], 1);
+               let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+               assert!(updates.update_add_htlcs.is_empty());
+               assert!(updates.update_fulfill_htlcs.is_empty());
+               assert_eq!(updates.update_fail_htlcs.len(), 1);
+               assert!(updates.update_fail_malformed_htlcs.is_empty());
+               assert!(updates.update_fee.is_none());
+               nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
+               commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
+               expect_payment_failed!(nodes[0], payment_hash, true);
+
+               // Finally, succeed the keysend payment.
+               claim_payment(&nodes[0], &expected_route, payment_preimage);
+       }
+
+       #[test]
+       fn test_keysend_hash_mismatch() {
+               // Test that if we receive a keysend `update_add_htlc` msg, we fail as expected if the keysend
+               // preimage doesn't match the msg's payment hash.
+               let chanmon_cfgs = create_chanmon_cfgs(2);
+               let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+               let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+               let payer_pubkey = nodes[0].node.get_our_node_id();
+               let payee_pubkey = nodes[1].node.get_our_node_id();
+               nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known() });
+               nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known() });
+
+               let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known());
+               let network_graph = nodes[0].net_graph_msg_handler.network_graph.read().unwrap();
+               let first_hops = nodes[0].node.list_usable_channels();
+               let route = get_keysend_route(&payer_pubkey, &network_graph, &payee_pubkey,
+                                  Some(&first_hops.iter().collect::<Vec<_>>()), &vec![], 10000, 40,
+                                  nodes[0].logger).unwrap();
+
+               let test_preimage = PaymentPreimage([42; 32]);
+               let mismatch_payment_hash = PaymentHash([43; 32]);
+               let _ = nodes[0].node.send_payment_internal(&route, mismatch_payment_hash, &None, Some(test_preimage)).unwrap();
+               check_added_monitors!(nodes[0], 1);
+
+               let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+               assert_eq!(updates.update_add_htlcs.len(), 1);
+               assert!(updates.update_fulfill_htlcs.is_empty());
+               assert!(updates.update_fail_htlcs.is_empty());
+               assert!(updates.update_fail_malformed_htlcs.is_empty());
+               assert!(updates.update_fee.is_none());
+               nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
+
+               nodes[1].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Payment preimage didn't match payment hash".to_string(), 1);
+       }
+
+       #[test]
+       fn test_keysend_msg_with_secret_err() {
+               // Test that we error as expected if we receive a keysend payment that includes a payment secret.
+               let chanmon_cfgs = create_chanmon_cfgs(2);
+               let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+               let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+               let payer_pubkey = nodes[0].node.get_our_node_id();
+               let payee_pubkey = nodes[1].node.get_our_node_id();
+               nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known() });
+               nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known() });
+
+               let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known());
+               let network_graph = nodes[0].net_graph_msg_handler.network_graph.read().unwrap();
+               let first_hops = nodes[0].node.list_usable_channels();
+               let route = get_keysend_route(&payer_pubkey, &network_graph, &payee_pubkey,
+                                  Some(&first_hops.iter().collect::<Vec<_>>()), &vec![], 10000, 40,
+                                  nodes[0].logger).unwrap();
+
+               let test_preimage = PaymentPreimage([42; 32]);
+               let test_secret = PaymentSecret([43; 32]);
+               let payment_hash = PaymentHash(Sha256::hash(&test_preimage.0).into_inner());
+               let _ = nodes[0].node.send_payment_internal(&route, payment_hash, &Some(test_secret), Some(test_preimage)).unwrap();
+               check_added_monitors!(nodes[0], 1);
+
+               let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+               assert_eq!(updates.update_add_htlcs.len(), 1);
+               assert!(updates.update_fulfill_htlcs.is_empty());
+               assert!(updates.update_fail_htlcs.is_empty());
+               assert!(updates.update_fail_malformed_htlcs.is_empty());
+               assert!(updates.update_fee.is_none());
+               nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
+
+               nodes[1].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "We don't support MPP keysend payments".to_string(), 1);
+       }
 }
 
 #[cfg(all(any(test, feature = "_test_utils"), feature = "unstable"))]
@@ -4878,13 +5518,13 @@ pub mod bench {
        use routing::router::get_route;
        use util::test_utils;
        use util::config::UserConfig;
-       use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
+       use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose};
 
        use bitcoin::hashes::Hash;
        use bitcoin::hashes::sha256::Hash as Sha256;
        use bitcoin::{Block, BlockHeader, Transaction, TxOut};
 
-       use std::sync::Mutex;
+       use sync::{Arc, Mutex};
 
        use test::Bencher;
 
@@ -4910,8 +5550,8 @@ pub mod bench {
                let network = bitcoin::Network::Testnet;
                let genesis_hash = bitcoin::blockdata::constants::genesis_block(network).header.block_hash();
 
-               let tx_broadcaster = test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new())};
-               let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 };
+               let tx_broadcaster = test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new()), blocks: Arc::new(Mutex::new(Vec::new()))};
+               let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
 
                let mut config: UserConfig = Default::default();
                config.own_channel_config.minimum_depth = 1;
@@ -4961,7 +5601,19 @@ pub mod bench {
                Listen::block_connected(&node_b, &block, 1);
 
                node_a.handle_funding_locked(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendFundingLocked, node_a.get_our_node_id()));
-               node_b.handle_funding_locked(&node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendFundingLocked, node_b.get_our_node_id()));
+               let msg_events = node_a.get_and_clear_pending_msg_events();
+               assert_eq!(msg_events.len(), 2);
+               match msg_events[0] {
+                       MessageSendEvent::SendFundingLocked { ref msg, .. } => {
+                               node_b.handle_funding_locked(&node_a.get_our_node_id(), msg);
+                               get_event_msg!(node_b_holder, MessageSendEvent::SendChannelUpdate, node_a.get_our_node_id());
+                       },
+                       _ => panic!(),
+               }
+               match msg_events[1] {
+                       MessageSendEvent::SendChannelUpdate { .. } => {},
+                       _ => panic!(),
+               }
 
                let dummy_graph = NetworkGraph::new(genesis_hash);