Avoid unnecessary immediate retake `per_peer_state` lock
[rust-lightning] / lightning / src / ln / channelmanager.rs
index 7e6d72a8f5156d23fc447ad651d4965506ac61ad..a76e8b71b1ef35f5f3147e82d1387bdae6a15ba5 100644 (file)
@@ -46,12 +46,18 @@ use crate::ln::channel::{Channel, ChannelError, ChannelUpdateStatus, UpdateFulfi
 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
 #[cfg(any(feature = "_test_utils", test))]
 use crate::ln::features::InvoiceFeatures;
-use crate::routing::router::{InFlightHtlcs, PaymentParameters, Route, RouteHop, RoutePath, RouteParameters};
+use crate::routing::gossip::NetworkGraph;
+use crate::routing::router::{DefaultRouter, InFlightHtlcs, PaymentParameters, Route, RouteHop, RoutePath, Router};
+use crate::routing::scoring::ProbabilisticScorer;
 use crate::ln::msgs;
 use crate::ln::onion_utils;
+use crate::ln::onion_utils::HTLCFailReason;
 use crate::ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, MAX_VALUE_MSAT};
+#[cfg(test)]
+use crate::ln::outbound_payment;
+use crate::ln::outbound_payment::{OutboundPayments, PendingOutboundPayment};
 use crate::ln::wire::Encode;
-use crate::chain::keysinterface::{Sign, KeysInterface, KeysManager, Recipient};
+use crate::chain::keysinterface::{EntropySource, KeysInterface, KeysManager, NodeSigner, Recipient, Sign, SignerProvider};
 use crate::util::config::{UserConfig, ChannelConfig};
 use crate::util::events::{Event, EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination};
 use crate::util::events;
@@ -71,6 +77,9 @@ use core::sync::atomic::{AtomicUsize, Ordering};
 use core::time::Duration;
 use core::ops::Deref;
 
+// Re-export this for use in the public API.
+pub use crate::ln::outbound_payment::PaymentSendFailure;
+
 // We hold various information about HTLC relay in the HTLC objects in Channel itself:
 //
 // Upon receipt of an HTLC from a peer, we'll give it a PendingHTLCStatus indicating if it should
@@ -276,47 +285,18 @@ impl HTLCSource {
        }
 }
 
-#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
-pub(super) enum HTLCFailReason {
-       LightningError {
-               err: msgs::OnionErrorPacket,
-       },
-       Reason {
-               failure_code: u16,
-               data: Vec<u8>,
-       }
-}
-
-impl HTLCFailReason {
-       pub(super) fn reason(failure_code: u16, data: Vec<u8>) -> Self {
-               Self::Reason { failure_code, data }
-       }
-
-       pub(super) fn from_failure_code(failure_code: u16) -> Self {
-               Self::Reason { failure_code, data: Vec::new() }
-       }
-}
-
 struct ReceiveError {
        err_code: u16,
        err_data: Vec<u8>,
        msg: &'static str,
 }
 
-/// Return value for claim_funds_from_hop
-enum ClaimFundsFromHop {
-       PrevHopForceClosed,
-       MonitorUpdateFail(PublicKey, MsgHandleErrInternal, Option<u64>),
-       Success(u64),
-       DuplicateClaim,
-}
-
 type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>);
 
-/// Error type returned across the channel_state mutex boundary. When an Err is generated for a
+/// Error type returned across the peer_state mutex boundary. When an Err is generated for a
 /// Channel, we generally end up with a ChannelError::Close for which we have to close the channel
 /// immediately (ie with no further calls on it made). Thus, this step happens inside a
-/// channel_state lock. We then return the set of things that need to be done outside the lock in
+/// peer_state lock. We then return the set of things that need to be done outside the lock in
 /// this struct and call handle_error!() on it.
 
 struct MsgHandleErrInternal {
@@ -455,8 +435,7 @@ struct ClaimablePayments {
 }
 
 // Note this is only exposed in cfg(test):
-pub(super) struct ChannelHolder<Signer: Sign> {
-       pub(super) by_id: HashMap<[u8; 32], Channel<Signer>>,
+pub(super) struct ChannelHolder {
        /// Messages to send to peers - pushed to in the same lock that they are generated in (except
        /// for broadcast messages, where ordering isn't as strict).
        pub(super) pending_msg_events: Vec<MessageSendEvent>,
@@ -471,9 +450,25 @@ enum BackgroundEvent {
        ClosingMonitorUpdate((OutPoint, ChannelMonitorUpdate)),
 }
 
-/// State we hold per-peer. In the future we should put channels in here, but for now we only hold
-/// the latest Init features we heard from the peer.
-struct PeerState {
+pub(crate) enum MonitorUpdateCompletionAction {
+       /// Indicates that a payment ultimately destined for us was claimed and we should emit an
+       /// [`events::Event::PaymentClaimed`] to the user if we haven't yet generated such an event for
+       /// this payment. Note that this is only best-effort. On restart it's possible such a duplicate
+       /// event can be generated.
+       PaymentClaimed { payment_hash: PaymentHash },
+       /// Indicates an [`events::Event`] should be surfaced to the user.
+       EmitEvent { event: events::Event },
+}
+
+/// State we hold per-peer.
+pub(super) struct PeerState<Signer: Sign> {
+       /// `temporary_channel_id` or `channel_id` -> `channel`.
+       ///
+       /// Holds all channels where the peer is the counterparty. Once a channel has been assigned a
+       /// `channel_id`, the `temporary_channel_id` key in the map is updated and is replaced by the
+       /// `channel_id`.
+       pub(super) channel_by_id: HashMap<[u8; 32], Channel<Signer>>,
+       /// The latest `InitFeatures` we heard from the peer.
        latest_features: InitFeatures,
 }
 
@@ -498,182 +493,39 @@ struct PendingInboundPayment {
        min_value_msat: Option<u64>,
 }
 
-/// Stores the session_priv for each part of a payment that is still pending. For versions 0.0.102
-/// and later, also stores information for retrying the payment.
-pub(crate) enum PendingOutboundPayment {
-       Legacy {
-               session_privs: HashSet<[u8; 32]>,
-       },
-       Retryable {
-               session_privs: HashSet<[u8; 32]>,
-               payment_hash: PaymentHash,
-               payment_secret: Option<PaymentSecret>,
-               pending_amt_msat: u64,
-               /// Used to track the fee paid. Only present if the payment was serialized on 0.0.103+.
-               pending_fee_msat: Option<u64>,
-               /// The total payment amount across all paths, used to verify that a retry is not overpaying.
-               total_msat: u64,
-               /// Our best known block height at the time this payment was initiated.
-               starting_block_height: u32,
-       },
-       /// When a pending payment is fulfilled, we continue tracking it until all pending HTLCs have
-       /// been resolved. This ensures we don't look up pending payments in ChannelMonitors on restart
-       /// and add a pending payment that was already fulfilled.
-       Fulfilled {
-               session_privs: HashSet<[u8; 32]>,
-               payment_hash: Option<PaymentHash>,
-               timer_ticks_without_htlcs: u8,
-       },
-       /// When a payer gives up trying to retry a payment, they inform us, letting us generate a
-       /// `PaymentFailed` event when all HTLCs have irrevocably failed. This avoids a number of race
-       /// conditions in MPP-aware payment retriers (1), where the possibility of multiple
-       /// `PaymentPathFailed` events with `all_paths_failed` can be pending at once, confusing a
-       /// downstream event handler as to when a payment has actually failed.
-       ///
-       /// (1) https://github.com/lightningdevkit/rust-lightning/issues/1164
-       Abandoned {
-               session_privs: HashSet<[u8; 32]>,
-               payment_hash: PaymentHash,
-       },
-}
-
-impl PendingOutboundPayment {
-       fn is_fulfilled(&self) -> bool {
-               match self {
-                       PendingOutboundPayment::Fulfilled { .. } => true,
-                       _ => false,
-               }
-       }
-       fn abandoned(&self) -> bool {
-               match self {
-                       PendingOutboundPayment::Abandoned { .. } => true,
-                       _ => false,
-               }
-       }
-       fn get_pending_fee_msat(&self) -> Option<u64> {
-               match self {
-                       PendingOutboundPayment::Retryable { pending_fee_msat, .. } => pending_fee_msat.clone(),
-                       _ => None,
-               }
-       }
-
-       fn payment_hash(&self) -> Option<PaymentHash> {
-               match self {
-                       PendingOutboundPayment::Legacy { .. } => None,
-                       PendingOutboundPayment::Retryable { payment_hash, .. } => Some(*payment_hash),
-                       PendingOutboundPayment::Fulfilled { payment_hash, .. } => *payment_hash,
-                       PendingOutboundPayment::Abandoned { payment_hash, .. } => Some(*payment_hash),
-               }
-       }
-
-       fn mark_fulfilled(&mut self) {
-               let mut session_privs = HashSet::new();
-               core::mem::swap(&mut session_privs, match self {
-                       PendingOutboundPayment::Legacy { session_privs } |
-                       PendingOutboundPayment::Retryable { session_privs, .. } |
-                       PendingOutboundPayment::Fulfilled { session_privs, .. } |
-                       PendingOutboundPayment::Abandoned { session_privs, .. }
-                               => session_privs,
-               });
-               let payment_hash = self.payment_hash();
-               *self = PendingOutboundPayment::Fulfilled { session_privs, payment_hash, timer_ticks_without_htlcs: 0 };
-       }
-
-       fn mark_abandoned(&mut self) -> Result<(), ()> {
-               let mut session_privs = HashSet::new();
-               let our_payment_hash;
-               core::mem::swap(&mut session_privs, match self {
-                       PendingOutboundPayment::Legacy { .. } |
-                       PendingOutboundPayment::Fulfilled { .. } =>
-                               return Err(()),
-                       PendingOutboundPayment::Retryable { session_privs, payment_hash, .. } |
-                       PendingOutboundPayment::Abandoned { session_privs, payment_hash, .. } => {
-                               our_payment_hash = *payment_hash;
-                               session_privs
-                       },
-               });
-               *self = PendingOutboundPayment::Abandoned { session_privs, payment_hash: our_payment_hash };
-               Ok(())
-       }
-
-       /// panics if path is None and !self.is_fulfilled
-       fn remove(&mut self, session_priv: &[u8; 32], path: Option<&Vec<RouteHop>>) -> bool {
-               let remove_res = match self {
-                       PendingOutboundPayment::Legacy { session_privs } |
-                       PendingOutboundPayment::Retryable { session_privs, .. } |
-                       PendingOutboundPayment::Fulfilled { session_privs, .. } |
-                       PendingOutboundPayment::Abandoned { session_privs, .. } => {
-                               session_privs.remove(session_priv)
-                       }
-               };
-               if remove_res {
-                       if let PendingOutboundPayment::Retryable { ref mut pending_amt_msat, ref mut pending_fee_msat, .. } = self {
-                               let path = path.expect("Fulfilling a payment should always come with a path");
-                               let path_last_hop = path.last().expect("Outbound payments must have had a valid path");
-                               *pending_amt_msat -= path_last_hop.fee_msat;
-                               if let Some(fee_msat) = pending_fee_msat.as_mut() {
-                                       *fee_msat -= path.get_path_fees();
-                               }
-                       }
-               }
-               remove_res
-       }
-
-       fn insert(&mut self, session_priv: [u8; 32], path: &Vec<RouteHop>) -> bool {
-               let insert_res = match self {
-                       PendingOutboundPayment::Legacy { session_privs } |
-                       PendingOutboundPayment::Retryable { session_privs, .. } => {
-                               session_privs.insert(session_priv)
-                       }
-                       PendingOutboundPayment::Fulfilled { .. } => false,
-                       PendingOutboundPayment::Abandoned { .. } => false,
-               };
-               if insert_res {
-                       if let PendingOutboundPayment::Retryable { ref mut pending_amt_msat, ref mut pending_fee_msat, .. } = self {
-                               let path_last_hop = path.last().expect("Outbound payments must have had a valid path");
-                               *pending_amt_msat += path_last_hop.fee_msat;
-                               if let Some(fee_msat) = pending_fee_msat.as_mut() {
-                                       *fee_msat += path.get_path_fees();
-                               }
-                       }
-               }
-               insert_res
-       }
-
-       fn remaining_parts(&self) -> usize {
-               match self {
-                       PendingOutboundPayment::Legacy { session_privs } |
-                       PendingOutboundPayment::Retryable { session_privs, .. } |
-                       PendingOutboundPayment::Fulfilled { session_privs, .. } |
-                       PendingOutboundPayment::Abandoned { session_privs, .. } => {
-                               session_privs.len()
-                       }
-               }
-       }
-}
-
 /// SimpleArcChannelManager is useful when you need a ChannelManager with a static lifetime, e.g.
 /// when you're using lightning-net-tokio (since tokio::spawn requires parameters with static
 /// lifetimes). Other times you can afford a reference, which is more efficient, in which case
 /// SimpleRefChannelManager is the more appropriate type. Defining these type aliases prevents
-/// issues such as overly long function definitions. Note that the ChannelManager can take any
-/// type that implements KeysInterface for its keys manager, but this type alias chooses the
-/// concrete type of the KeysManager.
+/// issues such as overly long function definitions. Note that the ChannelManager can take any type
+/// that implements KeysInterface or Router for its keys manager and router, respectively, but this
+/// type alias chooses the concrete types of KeysManager and DefaultRouter.
 ///
 /// (C-not exported) as Arcs don't make sense in bindings
-pub type SimpleArcChannelManager<M, T, F, L> = ChannelManager<Arc<M>, Arc<T>, Arc<KeysManager>, Arc<F>, Arc<L>>;
+pub type SimpleArcChannelManager<M, T, F, L> = ChannelManager<
+       Arc<M>,
+       Arc<T>,
+       Arc<KeysManager>,
+       Arc<F>,
+       Arc<DefaultRouter<
+               Arc<NetworkGraph<Arc<L>>>,
+               Arc<L>,
+               Arc<Mutex<ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>>>
+       >>,
+       Arc<L>
+>;
 
 /// SimpleRefChannelManager is a type alias for a ChannelManager reference, and is the reference
 /// counterpart to the SimpleArcChannelManager type alias. Use this type by default when you don't
 /// need a ChannelManager with a static lifetime. You'll need a static lifetime in cases such as
 /// usage of lightning-net-tokio (since tokio::spawn requires parameters with static lifetimes).
 /// But if this is not necessary, using a reference is more efficient. Defining these type aliases
-/// helps with issues such as long function definitions. Note that the ChannelManager can take any
-/// type that implements KeysInterface for its keys manager, but this type alias chooses the
-/// concrete type of the KeysManager.
+/// issues such as overly long function definitions. Note that the ChannelManager can take any type
+/// that implements KeysInterface or Router for its keys manager and router, respectively, but this
+/// type alias chooses the concrete types of KeysManager and DefaultRouter.
 ///
 /// (C-not exported) as Arcs don't make sense in bindings
-pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, M, T, F, L> = ChannelManager<&'a M, &'b T, &'c KeysManager, &'d F, &'e L>;
+pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = ChannelManager<&'a M, &'b T, &'c KeysManager, &'d F, &'e DefaultRouter<&'f NetworkGraph<&'g L>, &'g L, &'h Mutex<ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>>, &'g L>;
 
 /// Manager which keeps track of a number of channels and sends messages to the appropriate
 /// channel, also tracking HTLC preimages and forwarding onion packets appropriately.
@@ -731,36 +583,42 @@ pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, M, T, F, L> = ChannelManage
 //  |   |
 //  |   |__`claimable_payments`
 //  |   |
-//  |   |__`pending_outbound_payments`
+//  |   |__`pending_outbound_payments` // This field's struct contains a map of pending outbounds
 //  |       |
 //  |       |__`channel_state`
 //  |           |
-//  |           |__`id_to_peer`
-//  |           |
-//  |           |__`short_to_chan_info`
-//  |           |
 //  |           |__`per_peer_state`
 //  |               |
-//  |               |__`outbound_scid_aliases`
-//  |               |
-//  |               |__`best_block`
-//  |               |
-//  |               |__`pending_events`
+//  |               |__`peer_state`
+//  |                   |
+//  |                   |__`id_to_peer`
+//  |                   |
+//  |                   |__`short_to_chan_info`
+//  |                   |
+//  |                   |__`outbound_scid_aliases`
+//  |                   |
+//  |                   |__`best_block`
 //  |                   |
-//  |                   |__`pending_background_events`
+//  |                   |__`pending_events`
+//  |                       |
+//  |                       |__`pending_background_events`
 //
-pub struct ChannelManager<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
-       where M::Target: chain::Watch<<K::Target as KeysInterface>::Signer>,
-        T::Target: BroadcasterInterface,
-        K::Target: KeysInterface,
-        F::Target: FeeEstimator,
-                               L::Target: Logger,
+pub struct ChannelManager<M: Deref, T: Deref, K: Deref, F: Deref, R: Deref, L: Deref>
+where
+       M::Target: chain::Watch<<K::Target as SignerProvider>::Signer>,
+       T::Target: BroadcasterInterface,
+       K::Target: KeysInterface,
+       F::Target: FeeEstimator,
+       R::Target: Router,
+       L::Target: Logger,
 {
        default_configuration: UserConfig,
        genesis_hash: BlockHash,
        fee_estimator: LowerBoundedFeeEstimator<F>,
        chain_monitor: M,
        tx_broadcaster: T,
+       #[allow(unused)]
+       router: R,
 
        /// See `ChannelManager` struct-level documentation for lock order requirements.
        #[cfg(test)]
@@ -771,9 +629,9 @@ pub struct ChannelManager<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
 
        /// See `ChannelManager` struct-level documentation for lock order requirements.
        #[cfg(any(test, feature = "_test_utils"))]
-       pub(super) channel_state: Mutex<ChannelHolder<<K::Target as KeysInterface>::Signer>>,
+       pub(super) channel_state: Mutex<ChannelHolder>,
        #[cfg(not(any(test, feature = "_test_utils")))]
-       channel_state: Mutex<ChannelHolder<<K::Target as KeysInterface>::Signer>>,
+       channel_state: Mutex<ChannelHolder>,
 
        /// Storage for PaymentSecrets and any requirements on future inbound payments before we will
        /// expose them to users via a PaymentClaimable event. HTLCs which do not meet the requirements
@@ -795,7 +653,7 @@ pub struct ChannelManager<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
        /// See `PendingOutboundPayment` documentation for more info.
        ///
        /// See `ChannelManager` struct-level documentation for lock order requirements.
-       pending_outbound_payments: Mutex<HashMap<PaymentId, PendingOutboundPayment>>,
+       pending_outbound_payments: OutboundPayments,
 
        /// SCID/SCID Alias -> forward infos. Key of 0 means payments received.
        ///
@@ -890,15 +748,24 @@ pub struct ChannelManager<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
        /// very far in the past, and can only ever be up to two hours in the future.
        highest_seen_timestamp: AtomicUsize,
 
-       /// The bulk of our storage will eventually be here (channels and message queues and the like).
+       /// The bulk of our storage will eventually be here (message queues and the like). Currently
+       /// the `per_peer_state` stores our channels on a per-peer basis, as well as the peer's latest
+       /// features.
+       ///
        /// If we are connected to a peer we always at least have an entry here, even if no channels
        /// are currently open with that peer.
+       ///
        /// Because adding or removing an entry is rare, we usually take an outer read lock and then
-       /// operate on the inner value freely. Sadly, this prevents parallel operation when opening a
-       /// new channel.
+       /// operate on the inner value freely. This opens up for parallel per-peer operation for
+       /// channels.
+       ///
+       /// Note that the same thread must never acquire two inner `PeerState` locks at the same time.
        ///
        /// See `ChannelManager` struct-level documentation for lock order requirements.
-       per_peer_state: RwLock<HashMap<PublicKey, Mutex<PeerState>>>,
+       #[cfg(not(any(test, feature = "_test_utils")))]
+       per_peer_state: FairRwLock<HashMap<PublicKey, Mutex<PeerState<<K::Target as SignerProvider>::Signer>>>>,
+       #[cfg(any(test, feature = "_test_utils"))]
+       pub(super) per_peer_state: FairRwLock<HashMap<PublicKey, Mutex<PeerState<<K::Target as SignerProvider>::Signer>>>>,
 
        /// See `ChannelManager` struct-level documentation for lock order requirements.
        pending_events: Mutex<Vec<events::Event>>,
@@ -1041,7 +908,7 @@ pub(crate) const MPP_TIMEOUT_TICKS: u8 = 3;
 
 /// The number of ticks of [`ChannelManager::timer_tick_occurred`] until we time-out the
 /// idempotency of payments by [`PaymentId`]. See
-/// [`ChannelManager::remove_stale_resolved_payments`].
+/// [`OutboundPayments::remove_stale_resolved_payments`].
 pub(crate) const IDEMPOTENCY_TIMEOUT_TICKS: u8 = 7;
 
 /// Information needed for constructing an invoice route hint for this channel.
@@ -1270,70 +1137,6 @@ impl ChannelDetails {
        }
 }
 
-/// If a payment fails to send, it can be in one of several states. This enum is returned as the
-/// Err() type describing which state the payment is in, see the description of individual enum
-/// states for more.
-#[derive(Clone, Debug)]
-pub enum PaymentSendFailure {
-       /// A parameter which was passed to send_payment was invalid, preventing us from attempting to
-       /// send the payment at all.
-       ///
-       /// You can freely resend the payment in full (with the parameter error fixed).
-       ///
-       /// Because the payment failed outright, no payment tracking is done, you do not need to call
-       /// [`ChannelManager::abandon_payment`] and [`ChannelManager::retry_payment`] will *not* work
-       /// for this payment.
-       ParameterError(APIError),
-       /// A parameter in a single path which was passed to send_payment was invalid, preventing us
-       /// from attempting to send the payment at all.
-       ///
-       /// You can freely resend the payment in full (with the parameter error fixed).
-       ///
-       /// The results here are ordered the same as the paths in the route object which was passed to
-       /// send_payment.
-       ///
-       /// Because the payment failed outright, no payment tracking is done, you do not need to call
-       /// [`ChannelManager::abandon_payment`] and [`ChannelManager::retry_payment`] will *not* work
-       /// for this payment.
-       PathParameterError(Vec<Result<(), APIError>>),
-       /// All paths which were attempted failed to send, with no channel state change taking place.
-       /// You can freely resend the payment in full (though you probably want to do so over different
-       /// paths than the ones selected).
-       ///
-       /// Because the payment failed outright, no payment tracking is done, you do not need to call
-       /// [`ChannelManager::abandon_payment`] and [`ChannelManager::retry_payment`] will *not* work
-       /// for this payment.
-       AllFailedResendSafe(Vec<APIError>),
-       /// Indicates that a payment for the provided [`PaymentId`] is already in-flight and has not
-       /// yet completed (i.e. generated an [`Event::PaymentSent`]) or been abandoned (via
-       /// [`ChannelManager::abandon_payment`]).
-       ///
-       /// [`Event::PaymentSent`]: events::Event::PaymentSent
-       DuplicatePayment,
-       /// Some paths which were attempted failed to send, though possibly not all. At least some
-       /// paths have irrevocably committed to the HTLC and retrying the payment in full would result
-       /// in over-/re-payment.
-       ///
-       /// The results here are ordered the same as the paths in the route object which was passed to
-       /// send_payment, and any `Err`s which are not [`APIError::MonitorUpdateInProgress`] can be
-       /// safely retried via [`ChannelManager::retry_payment`].
-       ///
-       /// Any entries which contain `Err(APIError::MonitorUpdateInprogress)` or `Ok(())` MUST NOT be
-       /// retried as they will result in over-/re-payment. These HTLCs all either successfully sent
-       /// (in the case of `Ok(())`) or will send once a [`MonitorEvent::Completed`] is provided for
-       /// the next-hop channel with the latest update_id.
-       PartialFailure {
-               /// The errors themselves, in the same order as the route hops.
-               results: Vec<Result<(), APIError>>,
-               /// If some paths failed without irrevocably committing to the new HTLC(s), this will
-               /// contain a [`RouteParameters`] object which can be used to calculate a new route that
-               /// will pay all remaining unpaid balance.
-               failed_paths_retry: Option<RouteParameters>,
-               /// The payment id for the payment, which is now at least partially pending.
-               payment_id: PaymentId,
-       },
-}
-
 /// Route hints used in constructing invoices for [phantom node payents].
 ///
 /// [phantom node payments]: crate::chain::keysinterface::PhantomKeysManager
@@ -1588,12 +1391,14 @@ macro_rules! emit_channel_ready_event {
        }
 }
 
-impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F, L>
-       where M::Target: chain::Watch<<K::Target as KeysInterface>::Signer>,
-        T::Target: BroadcasterInterface,
-        K::Target: KeysInterface,
-        F::Target: FeeEstimator,
-        L::Target: Logger,
+impl<M: Deref, T: Deref, K: Deref, F: Deref, R: Deref, L: Deref> ChannelManager<M, T, K, F, R, L>
+where
+       M::Target: chain::Watch<<K::Target as SignerProvider>::Signer>,
+       T::Target: BroadcasterInterface,
+       K::Target: KeysInterface,
+       F::Target: FeeEstimator,
+       R::Target: Router,
+       L::Target: Logger,
 {
        /// Constructs a new ChannelManager to hold several channels and route between them.
        ///
@@ -1605,7 +1410,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
        /// Users need to notify the new ChannelManager when a new block is connected or
        /// disconnected using its `block_connected` and `block_disconnected` methods, starting
        /// from after `params.latest_hash`.
-       pub fn new(fee_est: F, chain_monitor: M, tx_broadcaster: T, logger: L, keys_manager: K, config: UserConfig, params: ChainParameters) -> Self {
+       pub fn new(fee_est: F, chain_monitor: M, tx_broadcaster: T, router: R, logger: L, keys_manager: K, config: UserConfig, params: ChainParameters) -> Self {
                let mut secp_ctx = Secp256k1::new();
                secp_ctx.seeded_randomize(&keys_manager.get_secure_random_bytes());
                let inbound_pmt_key_material = keys_manager.get_inbound_payment_key_material();
@@ -1616,16 +1421,16 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                        fee_estimator: LowerBoundedFeeEstimator::new(fee_est),
                        chain_monitor,
                        tx_broadcaster,
+                       router,
 
                        best_block: RwLock::new(params.best_block),
 
                        channel_state: Mutex::new(ChannelHolder{
-                               by_id: HashMap::new(),
                                pending_msg_events: Vec::new(),
                        }),
                        outbound_scid_aliases: Mutex::new(HashSet::new()),
                        pending_inbound_payments: Mutex::new(HashMap::new()),
-                       pending_outbound_payments: Mutex::new(HashMap::new()),
+                       pending_outbound_payments: OutboundPayments::new(),
                        forward_htlcs: Mutex::new(HashMap::new()),
                        claimable_payments: Mutex::new(ClaimablePayments { claimable_htlcs: HashMap::new(), pending_claiming_payments: HashMap::new() }),
                        pending_intercepted_htlcs: Mutex::new(HashMap::new()),
@@ -1643,7 +1448,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
 
                        highest_seen_timestamp: AtomicUsize::new(0),
 
-                       per_peer_state: RwLock::new(HashMap::new()),
+                       per_peer_state: FairRwLock::new(HashMap::new()),
 
                        pending_events: Mutex::new(Vec::new()),
                        pending_background_events: Mutex::new(Vec::new()),
@@ -1710,12 +1515,19 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                        return Err(APIError::APIMisuseError { err: format!("Channel value must be at least 1000 satoshis. It was {}", channel_value_satoshis) });
                }
 
-               let channel = {
-                       let per_peer_state = self.per_peer_state.read().unwrap();
-                       match per_peer_state.get(&their_network_key) {
-                               Some(peer_state) => {
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+               // We want to make sure the lock is actually acquired by PersistenceNotifierGuard.
+               debug_assert!(&self.total_consistency_lock.try_write().is_err());
+
+               let mut channel_state = self.channel_state.lock().unwrap();
+               let per_peer_state = self.per_peer_state.read().unwrap();
+
+               match per_peer_state.get(&their_network_key) {
+                       None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", their_network_key) }),
+                       Some(peer_state_mutex) => {
+                               let mut peer_state = peer_state_mutex.lock().unwrap();
+                               let channel = {
                                        let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
-                                       let peer_state = peer_state.lock().unwrap();
                                        let their_features = &peer_state.latest_features;
                                        let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration };
                                        match Channel::new_outbound(&self.fee_estimator, &self.keys_manager, their_network_key,
@@ -1728,92 +1540,91 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                                                        return Err(e);
                                                },
                                        }
-                               },
-                               None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", their_network_key) }),
-                       }
-               };
-               let res = channel.get_open_channel(self.genesis_hash.clone());
-
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
-               // We want to make sure the lock is actually acquired by PersistenceNotifierGuard.
-               debug_assert!(&self.total_consistency_lock.try_write().is_err());
+                               };
+                               let res = channel.get_open_channel(self.genesis_hash.clone());
 
-               let temporary_channel_id = channel.channel_id();
-               let mut channel_state = self.channel_state.lock().unwrap();
-               match channel_state.by_id.entry(temporary_channel_id) {
-                       hash_map::Entry::Occupied(_) => {
-                               if cfg!(fuzzing) {
-                                       return Err(APIError::APIMisuseError { err: "Fuzzy bad RNG".to_owned() });
-                               } else {
-                                       panic!("RNG is bad???");
+                               let temporary_channel_id = channel.channel_id();
+                               match peer_state.channel_by_id.entry(temporary_channel_id) {
+                                       hash_map::Entry::Occupied(_) => {
+                                               if cfg!(fuzzing) {
+                                                       return Err(APIError::APIMisuseError { err: "Fuzzy bad RNG".to_owned() });
+                                               } else {
+                                                       panic!("RNG is bad???");
+                                               }
+                                       },
+                                       hash_map::Entry::Vacant(entry) => { entry.insert(channel); }
                                }
+
+                               channel_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
+                                       node_id: their_network_key,
+                                       msg: res,
+                               });
+                               Ok(temporary_channel_id)
                        },
-                       hash_map::Entry::Vacant(entry) => { entry.insert(channel); }
                }
-               channel_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
-                       node_id: their_network_key,
-                       msg: res,
-               });
-               Ok(temporary_channel_id)
        }
 
-       fn list_channels_with_filter<Fn: FnMut(&(&[u8; 32], &Channel<<K::Target as KeysInterface>::Signer>)) -> bool>(&self, f: Fn) -> Vec<ChannelDetails> {
+       fn list_channels_with_filter<Fn: FnMut(&(&[u8; 32], &Channel<<K::Target as SignerProvider>::Signer>)) -> bool + Copy>(&self, f: Fn) -> Vec<ChannelDetails> {
                let mut res = Vec::new();
+               // Allocate our best estimate of the number of channels we have in the `res`
+               // Vec. Sadly the `short_to_chan_info` map doesn't cover channels without
+               // a scid or a scid alias, and the `id_to_peer` shouldn't be used outside
+               // of the ChannelMonitor handling. Therefore reallocations may still occur, but is
+               // unlikely as the `short_to_chan_info` map often contains 2 entries for
+               // the same channel.
+               res.reserve(self.short_to_chan_info.read().unwrap().len());
                {
-                       let channel_state = self.channel_state.lock().unwrap();
                        let best_block_height = self.best_block.read().unwrap().height();
-                       res.reserve(channel_state.by_id.len());
-                       for (channel_id, channel) in channel_state.by_id.iter().filter(f) {
-                               let balance = channel.get_available_balances();
-                               let (to_remote_reserve_satoshis, to_self_reserve_satoshis) =
-                                       channel.get_holder_counterparty_selected_channel_reserve_satoshis();
-                               res.push(ChannelDetails {
-                                       channel_id: (*channel_id).clone(),
-                                       counterparty: ChannelCounterparty {
-                                               node_id: channel.get_counterparty_node_id(),
-                                               features: InitFeatures::empty(),
-                                               unspendable_punishment_reserve: to_remote_reserve_satoshis,
-                                               forwarding_info: channel.counterparty_forwarding_info(),
-                                               // Ensures that we have actually received the `htlc_minimum_msat` value
-                                               // from the counterparty through the `OpenChannel` or `AcceptChannel`
-                                               // message (as they are always the first message from the counterparty).
-                                               // Else `Channel::get_counterparty_htlc_minimum_msat` could return the
-                                               // default `0` value set by `Channel::new_outbound`.
-                                               outbound_htlc_minimum_msat: if channel.have_received_message() {
-                                                       Some(channel.get_counterparty_htlc_minimum_msat()) } else { None },
-                                               outbound_htlc_maximum_msat: channel.get_counterparty_htlc_maximum_msat(),
-                                       },
-                                       funding_txo: channel.get_funding_txo(),
-                                       // Note that accept_channel (or open_channel) is always the first message, so
-                                       // `have_received_message` indicates that type negotiation has completed.
-                                       channel_type: if channel.have_received_message() { Some(channel.get_channel_type().clone()) } else { None },
-                                       short_channel_id: channel.get_short_channel_id(),
-                                       outbound_scid_alias: if channel.is_usable() { Some(channel.outbound_scid_alias()) } else { None },
-                                       inbound_scid_alias: channel.latest_inbound_scid_alias(),
-                                       channel_value_satoshis: channel.get_value_satoshis(),
-                                       unspendable_punishment_reserve: to_self_reserve_satoshis,
-                                       balance_msat: balance.balance_msat,
-                                       inbound_capacity_msat: balance.inbound_capacity_msat,
-                                       outbound_capacity_msat: balance.outbound_capacity_msat,
-                                       next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat,
-                                       user_channel_id: channel.get_user_id(),
-                                       confirmations_required: channel.minimum_depth(),
-                                       confirmations: Some(channel.get_funding_tx_confirmations(best_block_height)),
-                                       force_close_spend_delay: channel.get_counterparty_selected_contest_delay(),
-                                       is_outbound: channel.is_outbound(),
-                                       is_channel_ready: channel.is_usable(),
-                                       is_usable: channel.is_live(),
-                                       is_public: channel.should_announce(),
-                                       inbound_htlc_minimum_msat: Some(channel.get_holder_htlc_minimum_msat()),
-                                       inbound_htlc_maximum_msat: channel.get_holder_htlc_maximum_msat(),
-                                       config: Some(channel.config()),
-                               });
-                       }
-               }
-               let per_peer_state = self.per_peer_state.read().unwrap();
-               for chan in res.iter_mut() {
-                       if let Some(peer_state) = per_peer_state.get(&chan.counterparty.node_id) {
-                               chan.counterparty.features = peer_state.lock().unwrap().latest_features.clone();
+                       let per_peer_state = self.per_peer_state.read().unwrap();
+                       for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
+                               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                               let peer_state = &mut *peer_state_lock;
+                               for (channel_id, channel) in peer_state.channel_by_id.iter().filter(f) {
+                                       let balance = channel.get_available_balances();
+                                       let (to_remote_reserve_satoshis, to_self_reserve_satoshis) =
+                                               channel.get_holder_counterparty_selected_channel_reserve_satoshis();
+                                       res.push(ChannelDetails {
+                                               channel_id: (*channel_id).clone(),
+                                               counterparty: ChannelCounterparty {
+                                                       node_id: channel.get_counterparty_node_id(),
+                                                       features: peer_state.latest_features.clone(),
+                                                       unspendable_punishment_reserve: to_remote_reserve_satoshis,
+                                                       forwarding_info: channel.counterparty_forwarding_info(),
+                                                       // Ensures that we have actually received the `htlc_minimum_msat` value
+                                                       // from the counterparty through the `OpenChannel` or `AcceptChannel`
+                                                       // message (as they are always the first message from the counterparty).
+                                                       // Else `Channel::get_counterparty_htlc_minimum_msat` could return the
+                                                       // default `0` value set by `Channel::new_outbound`.
+                                                       outbound_htlc_minimum_msat: if channel.have_received_message() {
+                                                               Some(channel.get_counterparty_htlc_minimum_msat()) } else { None },
+                                                       outbound_htlc_maximum_msat: channel.get_counterparty_htlc_maximum_msat(),
+                                               },
+                                               funding_txo: channel.get_funding_txo(),
+                                               // Note that accept_channel (or open_channel) is always the first message, so
+                                               // `have_received_message` indicates that type negotiation has completed.
+                                               channel_type: if channel.have_received_message() { Some(channel.get_channel_type().clone()) } else { None },
+                                               short_channel_id: channel.get_short_channel_id(),
+                                               outbound_scid_alias: if channel.is_usable() { Some(channel.outbound_scid_alias()) } else { None },
+                                               inbound_scid_alias: channel.latest_inbound_scid_alias(),
+                                               channel_value_satoshis: channel.get_value_satoshis(),
+                                               unspendable_punishment_reserve: to_self_reserve_satoshis,
+                                               balance_msat: balance.balance_msat,
+                                               inbound_capacity_msat: balance.inbound_capacity_msat,
+                                               outbound_capacity_msat: balance.outbound_capacity_msat,
+                                               next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat,
+                                               user_channel_id: channel.get_user_id(),
+                                               confirmations_required: channel.minimum_depth(),
+                                               confirmations: Some(channel.get_funding_tx_confirmations(best_block_height)),
+                                               force_close_spend_delay: channel.get_counterparty_selected_contest_delay(),
+                                               is_outbound: channel.is_outbound(),
+                                               is_channel_ready: channel.is_usable(),
+                                               is_usable: channel.is_live(),
+                                               is_public: channel.should_announce(),
+                                               inbound_htlc_minimum_msat: Some(channel.get_holder_htlc_minimum_msat()),
+                                               inbound_htlc_maximum_msat: channel.get_holder_htlc_maximum_msat(),
+                                               config: Some(channel.config()),
+                                       });
+                               }
                        }
                }
                res
@@ -1841,7 +1652,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
        }
 
        /// Helper function that issues the channel close events
-       fn issue_channel_close_events(&self, channel: &Channel<<K::Target as KeysInterface>::Signer>, closure_reason: ClosureReason) {
+       fn issue_channel_close_events(&self, channel: &Channel<<K::Target as SignerProvider>::Signer>, closure_reason: ClosureReason) {
                let mut pending_events_lock = self.pending_events.lock().unwrap();
                match channel.unbroadcasted_funding() {
                        Some(transaction) => {
@@ -1863,52 +1674,49 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                let result: Result<(), _> = loop {
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
-                       match channel_state.by_id.entry(channel_id.clone()) {
-                               hash_map::Entry::Occupied(mut chan_entry) => {
-                                       if *counterparty_node_id != chan_entry.get().get_counterparty_node_id(){
-                                               return Err(APIError::APIMisuseError { err: "The passed counterparty_node_id doesn't match the channel's counterparty node_id".to_owned() });
-                                       }
-                                       let (shutdown_msg, monitor_update, htlcs) = {
-                                               let per_peer_state = self.per_peer_state.read().unwrap();
-                                               match per_peer_state.get(&counterparty_node_id) {
-                                                       Some(peer_state) => {
-                                                               let peer_state = peer_state.lock().unwrap();
-                                                               let their_features = &peer_state.latest_features;
-                                                               chan_entry.get_mut().get_shutdown(&self.keys_manager, their_features, target_feerate_sats_per_1000_weight)?
-                                                       },
-                                                       None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", counterparty_node_id) }),
+                       let per_peer_state = self.per_peer_state.read().unwrap();
+                       if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
+                               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                               let peer_state = &mut *peer_state_lock;
+                               match peer_state.channel_by_id.entry(channel_id.clone()) {
+                                       hash_map::Entry::Occupied(mut chan_entry) => {
+                                               if *counterparty_node_id != chan_entry.get().get_counterparty_node_id(){
+                                                       return Err(APIError::APIMisuseError { err: "The passed counterparty_node_id doesn't match the channel's counterparty node_id".to_owned() });
                                                }
-                                       };
-                                       failed_htlcs = htlcs;
-
-                                       // Update the monitor with the shutdown script if necessary.
-                                       if let Some(monitor_update) = monitor_update {
-                                               let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update);
-                                               let (result, is_permanent) =
-                                                       handle_monitor_update_res!(self, update_res, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
-                                               if is_permanent {
-                                                       remove_channel!(self, chan_entry);
-                                                       break result;
+                                               let (shutdown_msg, monitor_update, htlcs) = chan_entry.get_mut().get_shutdown(&self.keys_manager, &peer_state.latest_features, target_feerate_sats_per_1000_weight)?;
+                                               failed_htlcs = htlcs;
+
+                                               // Update the monitor with the shutdown script if necessary.
+                                               if let Some(monitor_update) = monitor_update {
+                                                       let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update);
+                                                       let (result, is_permanent) =
+                                                               handle_monitor_update_res!(self, update_res, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
+                                                       if is_permanent {
+                                                               remove_channel!(self, chan_entry);
+                                                               break result;
+                                                       }
                                                }
-                                       }
 
-                                       channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
-                                               node_id: *counterparty_node_id,
-                                               msg: shutdown_msg
-                                       });
+                                               channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
+                                                       node_id: *counterparty_node_id,
+                                                       msg: shutdown_msg
+                                               });
 
-                                       if chan_entry.get().is_shutdown() {
-                                               let channel = remove_channel!(self, chan_entry);
-                                               if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) {
-                                                       channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
-                                                               msg: channel_update
-                                                       });
+                                               if chan_entry.get().is_shutdown() {
+                                                       let channel = remove_channel!(self, chan_entry);
+                                                       if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) {
+                                                               channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                                                       msg: channel_update
+                                                               });
+                                                       }
+                                                       self.issue_channel_close_events(&channel, ClosureReason::HolderForceClosed);
                                                }
-                                               self.issue_channel_close_events(&channel, ClosureReason::HolderForceClosed);
-                                       }
-                                       break Ok(());
-                               },
-                               hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()})
+                                               break Ok(());
+                                       },
+                                       hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable { err: "No such channel".to_owned() })
+                               }
+                       } else {
+                               return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", counterparty_node_id) });
                        }
                };
 
@@ -1990,20 +1798,25 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
        fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: &PublicKey, peer_msg: Option<&String>, broadcast: bool)
        -> Result<PublicKey, APIError> {
                let mut chan = {
-                       let mut channel_state_lock = self.channel_state.lock().unwrap();
-                       let channel_state = &mut *channel_state_lock;
-                       if let hash_map::Entry::Occupied(chan) = channel_state.by_id.entry(channel_id.clone()) {
-                               if chan.get().get_counterparty_node_id() != *peer_node_id {
-                                       return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()});
-                               }
-                               if let Some(peer_msg) = peer_msg {
-                                       self.issue_channel_close_events(chan.get(),ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() });
+                       let per_peer_state = self.per_peer_state.read().unwrap();
+                       if let Some(peer_state_mutex) = per_peer_state.get(peer_node_id) {
+                               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                               let peer_state = &mut *peer_state_lock;
+                               if let hash_map::Entry::Occupied(chan) = peer_state.channel_by_id.entry(channel_id.clone()) {
+                                       if chan.get().get_counterparty_node_id() != *peer_node_id {
+                                               return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()});
+                                       }
+                                       if let Some(peer_msg) = peer_msg {
+                                               self.issue_channel_close_events(chan.get(),ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() });
+                                       } else {
+                                               self.issue_channel_close_events(chan.get(),ClosureReason::HolderForceClosed);
+                                       }
+                                       remove_channel!(self, chan)
                                } else {
-                                       self.issue_channel_close_events(chan.get(),ClosureReason::HolderForceClosed);
+                                       return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()});
                                }
-                               remove_channel!(self, chan)
                        } else {
-                               return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()});
+                               return Err(APIError::APIMisuseError{ err: format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", peer_node_id) });
                        }
                };
                log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
@@ -2089,10 +1902,13 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                // Also, ensure that, in the case of an unknown preimage for the received payment hash, our
                // payment logic has enough time to fail the HTLC backward before our onchain logic triggers a
                // channel closure (see HTLC_FAIL_BACK_BUFFER rationale).
-               if (hop_data.outgoing_cltv_value as u64) <= self.best_block.read().unwrap().height() as u64 + HTLC_FAIL_BACK_BUFFER as u64 + 1  {
+               let current_height: u32 = self.best_block.read().unwrap().height();
+               if (hop_data.outgoing_cltv_value as u64) <= current_height as u64 + HTLC_FAIL_BACK_BUFFER as u64 + 1 {
+                       let mut err_data = Vec::with_capacity(12);
+                       err_data.extend_from_slice(&amt_msat.to_be_bytes());
+                       err_data.extend_from_slice(&current_height.to_be_bytes());
                        return Err(ReceiveError {
-                               err_code: 17,
-                               err_data: Vec::new(),
+                               err_code: 0x4000 | 15, err_data,
                                msg: "The final CLTV expiry is too soon to handle",
                        });
                }
@@ -2200,7 +2016,8 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                                        return PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
                                                channel_id: msg.channel_id,
                                                htlc_id: msg.htlc_id,
-                                               reason: onion_utils::build_first_hop_failure_packet(&shared_secret, $err_code, $data),
+                                               reason: HTLCFailReason::reason($err_code, $data.to_vec())
+                                                       .get_encrypted_failure_packet(&shared_secret, &None),
                                        }));
                                }
                        }
@@ -2265,10 +2082,9 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                        // with a short_channel_id of 0. This is important as various things later assume
                        // short_channel_id is non-0 in any ::Forward.
                        if let &PendingHTLCRouting::Forward { ref short_channel_id, .. } = routing {
-                               if let Some((err, code, chan_update)) = loop {
+                               if let Some((err, mut code, chan_update)) = loop {
                                        let id_option = self.short_to_chan_info.read().unwrap().get(&short_channel_id).cloned();
-                                       let mut channel_state = self.channel_state.lock().unwrap();
-                                       let forwarding_id_opt = match id_option {
+                                       let forwarding_chan_info_opt = match id_option {
                                                None => { // unknown_next_peer
                                                        // Note that this is likely a timing oracle for detecting whether an scid is a
                                                        // phantom or an intercept.
@@ -2281,13 +2097,20 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                                                                break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
                                                        }
                                                },
-                                               Some((_cp_id, chan_id)) => Some(chan_id.clone()),
+                                               Some((cp_id, id)) => Some((cp_id.clone(), id.clone())),
                                        };
-                                       let chan_update_opt = if let Some(forwarding_id) = forwarding_id_opt {
-                                               let chan = match channel_state.by_id.get_mut(&forwarding_id) {
+                                       let chan_update_opt = if let Some((counterparty_node_id, forwarding_id)) = forwarding_chan_info_opt {
+                                               let per_peer_state = self.per_peer_state.read().unwrap();
+                                               if let None = per_peer_state.get(&counterparty_node_id) {
+                                                       break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
+                                               }
+                                               let peer_state_mutex = per_peer_state.get(&counterparty_node_id).unwrap();
+                                               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                                               let peer_state = &mut *peer_state_lock;
+                                               let chan = match peer_state.channel_by_id.get_mut(&forwarding_id) {
                                                        None => {
-                                                               // Channel was removed. The short_to_chan_info and by_id maps have
-                                                               // no consistency guarantees.
+                                                               // Channel was removed. The short_to_chan_info and channel_by_id maps
+                                                               // have no consistency guarantees.
                                                                break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
                                                        },
                                                        Some(chan) => chan
@@ -2322,10 +2145,13 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                                                }
                                                chan_update_opt
                                        } else {
-                                               if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + MIN_CLTV_EXPIRY_DELTA as u64 { // incorrect_cltv_expiry
+                                               if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + MIN_CLTV_EXPIRY_DELTA as u64 {
+                                                       // We really should set `incorrect_cltv_expiry` here but as we're not
+                                                       // forwarding over a real channel we can't generate a channel_update
+                                                       // for it. Instead we just return a generic temporary_node_failure.
                                                        break Some((
                                                                "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
-                                                               0x1000 | 13, None,
+                                                               0x2000 | 2, None,
                                                        ));
                                                }
                                                None
@@ -2371,6 +2197,12 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                                                (chan_update.serialized_length() as u16 + 2).write(&mut res).expect("Writes cannot fail");
                                                msgs::ChannelUpdate::TYPE.write(&mut res).expect("Writes cannot fail");
                                                chan_update.write(&mut res).expect("Writes cannot fail");
+                                       } else if code & 0x1000 == 0x1000 {
+                                               // If we're trying to return an error that requires a `channel_update` but
+                                               // we're forwarding to a phantom or intercept "channel" (i.e. cannot
+                                               // generate an update), just use the generic "temporary_node_failure"
+                                               // instead.
+                                               code = 0x2000 | 2;
                                        }
                                        return_err!(err, code, &res.0[..]);
                                }
@@ -2384,8 +2216,8 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
        /// public, and thus should be called whenever the result is going to be passed out in a
        /// [`MessageSendEvent::BroadcastChannelUpdate`] event.
        ///
-       /// May be called with channel_state already locked!
-       fn get_channel_update_for_broadcast(&self, chan: &Channel<<K::Target as KeysInterface>::Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
+       /// May be called with peer_state already locked!
+       fn get_channel_update_for_broadcast(&self, chan: &Channel<<K::Target as SignerProvider>::Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
                if !chan.should_announce() {
                        return Err(LightningError {
                                err: "Cannot broadcast a channel_update for a private channel".to_owned(),
@@ -2403,8 +2235,8 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
        /// is public (only returning an Err if the channel does not yet have an assigned short_id),
        /// and thus MUST NOT be called unless the recipient of the resulting message has already
        /// provided evidence that they know about the existence of the channel.
-       /// May be called with channel_state already locked!
-       fn get_channel_update_for_unicast(&self, chan: &Channel<<K::Target as KeysInterface>::Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
+       /// May be called with peer_state already locked!
+       fn get_channel_update_for_unicast(&self, chan: &Channel<<K::Target as SignerProvider>::Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
                log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.channel_id()));
                let short_channel_id = match chan.get_short_channel_id().or(chan.latest_inbound_scid_alias()) {
                        None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}),
@@ -2413,7 +2245,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
 
                self.get_channel_update_for_onion(short_channel_id, chan)
        }
-       fn get_channel_update_for_onion(&self, short_channel_id: u64, chan: &Channel<<K::Target as KeysInterface>::Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
+       fn get_channel_update_for_onion(&self, short_channel_id: u64, chan: &Channel<<K::Target as SignerProvider>::Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
                log_trace!(self.logger, "Generating channel update for channel {}", log_bytes!(chan.channel_id()));
                let were_node_one = PublicKey::from_secret_key(&self.secp_ctx, &self.our_network_key).serialize()[..] < chan.get_counterparty_node_id().serialize()[..];
 
@@ -2456,74 +2288,77 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 
                let err: Result<(), _> = loop {
-                       let id = match self.short_to_chan_info.read().unwrap().get(&path.first().unwrap().short_channel_id) {
+                       let (counterparty_node_id, id) = match self.short_to_chan_info.read().unwrap().get(&path.first().unwrap().short_channel_id) {
                                None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()}),
-                               Some((_cp_id, chan_id)) => chan_id.clone(),
+                               Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()),
                        };
 
                        let mut channel_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_lock;
-                       if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(id) {
-                               match {
-                                       if chan.get().get_counterparty_node_id() != path.first().unwrap().pubkey {
-                                               return Err(APIError::InvalidRoute{err: "Node ID mismatch on first hop!"});
-                                       }
-                                       if !chan.get().is_live() {
-                                               return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!".to_owned()});
-                                       }
-                                       break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(
-                                               htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute {
-                                                       path: path.clone(),
-                                                       session_priv: session_priv.clone(),
-                                                       first_hop_htlc_msat: htlc_msat,
-                                                       payment_id,
-                                                       payment_secret: payment_secret.clone(),
-                                                       payment_params: payment_params.clone(),
-                                               }, onion_packet, &self.logger),
-                                               chan)
-                               } {
-                                       Some((update_add, commitment_signed, monitor_update)) => {
-                                               let update_err = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update);
-                                               let chan_id = chan.get().channel_id();
-                                               match (update_err,
-                                                       handle_monitor_update_res!(self, update_err, chan,
-                                                               RAACommitmentOrder::CommitmentFirst, false, true))
-                                               {
-                                                       (ChannelMonitorUpdateStatus::PermanentFailure, Err(e)) => break Err(e),
-                                                       (ChannelMonitorUpdateStatus::Completed, Ok(())) => {},
-                                                       (ChannelMonitorUpdateStatus::InProgress, Err(_)) => {
-                                                               // Note that MonitorUpdateInProgress here indicates (per function
-                                                               // docs) that we will resend the commitment update once monitor
-                                                               // updating completes. Therefore, we must return an error
-                                                               // indicating that it is unsafe to retry the payment wholesale,
-                                                               // which we do in the send_payment check for
-                                                               // MonitorUpdateInProgress, below.
-                                                               return Err(APIError::MonitorUpdateInProgress);
-                                                       },
-                                                       _ => unreachable!(),
+                       let per_peer_state = self.per_peer_state.read().unwrap();
+                       if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
+                               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                               let peer_state = &mut *peer_state_lock;
+                               if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(id) {
+                                       match {
+                                               if !chan.get().is_live() {
+                                                       return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!".to_owned()});
                                                }
+                                               break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(
+                                                       htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute {
+                                                               path: path.clone(),
+                                                               session_priv: session_priv.clone(),
+                                                               first_hop_htlc_msat: htlc_msat,
+                                                               payment_id,
+                                                               payment_secret: payment_secret.clone(),
+                                                               payment_params: payment_params.clone(),
+                                                       }, onion_packet, &self.logger),
+                                                       chan)
+                                       } {
+                                               Some((update_add, commitment_signed, monitor_update)) => {
+                                                       let update_err = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update);
+                                                       let chan_id = chan.get().channel_id();
+                                                       match (update_err,
+                                                               handle_monitor_update_res!(self, update_err, chan,
+                                                                       RAACommitmentOrder::CommitmentFirst, false, true))
+                                                       {
+                                                               (ChannelMonitorUpdateStatus::PermanentFailure, Err(e)) => break Err(e),
+                                                               (ChannelMonitorUpdateStatus::Completed, Ok(())) => {},
+                                                               (ChannelMonitorUpdateStatus::InProgress, Err(_)) => {
+                                                                       // Note that MonitorUpdateInProgress here indicates (per function
+                                                                       // docs) that we will resend the commitment update once monitor
+                                                                       // updating completes. Therefore, we must return an error
+                                                                       // indicating that it is unsafe to retry the payment wholesale,
+                                                                       // which we do in the send_payment check for
+                                                                       // MonitorUpdateInProgress, below.
+                                                                       return Err(APIError::MonitorUpdateInProgress);
+                                                               },
+                                                               _ => unreachable!(),
+                                                       }
 
-                                               log_debug!(self.logger, "Sending payment along path resulted in a commitment_signed for channel {}", log_bytes!(chan_id));
-                                               channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
-                                                       node_id: path.first().unwrap().pubkey,
-                                                       updates: msgs::CommitmentUpdate {
-                                                               update_add_htlcs: vec![update_add],
-                                                               update_fulfill_htlcs: Vec::new(),
-                                                               update_fail_htlcs: Vec::new(),
-                                                               update_fail_malformed_htlcs: Vec::new(),
-                                                               update_fee: None,
-                                                               commitment_signed,
-                                                       },
-                                               });
-                                       },
-                                       None => { },
+                                                       log_debug!(self.logger, "Sending payment along path resulted in a commitment_signed for channel {}", log_bytes!(chan_id));
+                                                       channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+                                                               node_id: path.first().unwrap().pubkey,
+                                                               updates: msgs::CommitmentUpdate {
+                                                                       update_add_htlcs: vec![update_add],
+                                                                       update_fulfill_htlcs: Vec::new(),
+                                                                       update_fail_htlcs: Vec::new(),
+                                                                       update_fail_malformed_htlcs: Vec::new(),
+                                                                       update_fee: None,
+                                                                       commitment_signed,
+                                                               },
+                                                       });
+                                               },
+                                               None => { },
+                                       }
+                               } else {
+                                       // The channel was likely removed after we fetched the id from the
+                                       // `short_to_chan_info` map, but before we successfully locked the
+                                       // `channel_by_id` map.
+                                       // This can occur as no consistency guarantees exists between the two maps.
+                                       return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()});
                                }
-                       } else {
-                               // The channel was likely removed after we fetched the id from the
-                               // `short_to_chan_info` map, but before we successfully locked the `by_id` map.
-                               // This can occur as no consistency guarantees exists between the two maps.
-                               return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()});
-                       }
+                       } else { return Err(APIError::InvalidRoute{err: "No peer matching the path's first hop found!" })}
                        return Ok(());
                };
 
@@ -2585,142 +2420,27 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
        /// [`Event::PaymentSent`]: events::Event::PaymentSent
        /// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events
        pub fn send_payment(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>, payment_id: PaymentId) -> Result<(), PaymentSendFailure> {
-               let onion_session_privs = self.add_new_pending_payment(payment_hash, *payment_secret, payment_id, route)?;
-               self.send_payment_internal(route, payment_hash, payment_secret, None, payment_id, None, onion_session_privs)
+               let best_block_height = self.best_block.read().unwrap().height();
+               self.pending_outbound_payments
+                       .send_payment_with_route(route, payment_hash, payment_secret, payment_id, &self.keys_manager, best_block_height,
+                               |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+                               self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
        }
 
        #[cfg(test)]
-       pub(crate) fn test_add_new_pending_payment(&self, payment_hash: PaymentHash, payment_secret: Option<PaymentSecret>, payment_id: PaymentId, route: &Route) -> Result<Vec<[u8; 32]>, PaymentSendFailure> {
-               self.add_new_pending_payment(payment_hash, payment_secret, payment_id, route)
+       fn test_send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>, keysend_preimage: Option<PaymentPreimage>, payment_id: PaymentId, recv_value_msat: Option<u64>, onion_session_privs: Vec<[u8; 32]>) -> Result<(), PaymentSendFailure> {
+               let best_block_height = self.best_block.read().unwrap().height();
+               self.pending_outbound_payments.test_send_payment_internal(route, payment_hash, payment_secret, keysend_preimage, payment_id, recv_value_msat, onion_session_privs, &self.keys_manager, best_block_height,
+                       |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+                       self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
        }
 
-       fn add_new_pending_payment(&self, payment_hash: PaymentHash, payment_secret: Option<PaymentSecret>, payment_id: PaymentId, route: &Route) -> Result<Vec<[u8; 32]>, PaymentSendFailure> {
-               let mut onion_session_privs = Vec::with_capacity(route.paths.len());
-               for _ in 0..route.paths.len() {
-                       onion_session_privs.push(self.keys_manager.get_secure_random_bytes());
-               }
-
-               let mut pending_outbounds = self.pending_outbound_payments.lock().unwrap();
-               match pending_outbounds.entry(payment_id) {
-                       hash_map::Entry::Occupied(_) => Err(PaymentSendFailure::DuplicatePayment),
-                       hash_map::Entry::Vacant(entry) => {
-                               let payment = entry.insert(PendingOutboundPayment::Retryable {
-                                       session_privs: HashSet::new(),
-                                       pending_amt_msat: 0,
-                                       pending_fee_msat: Some(0),
-                                       payment_hash,
-                                       payment_secret,
-                                       starting_block_height: self.best_block.read().unwrap().height(),
-                                       total_msat: route.get_total_amount(),
-                               });
-
-                               for (path, session_priv_bytes) in route.paths.iter().zip(onion_session_privs.iter()) {
-                                       assert!(payment.insert(*session_priv_bytes, path));
-                               }
-
-                               Ok(onion_session_privs)
-                       },
-               }
+       #[cfg(test)]
+       pub(crate) fn test_add_new_pending_payment(&self, payment_hash: PaymentHash, payment_secret: Option<PaymentSecret>, payment_id: PaymentId, route: &Route) -> Result<Vec<[u8; 32]>, PaymentSendFailure> {
+               let best_block_height = self.best_block.read().unwrap().height();
+               self.pending_outbound_payments.test_add_new_pending_payment(payment_hash, payment_secret, payment_id, route, &self.keys_manager, best_block_height)
        }
 
-       fn send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>, keysend_preimage: Option<PaymentPreimage>, payment_id: PaymentId, recv_value_msat: Option<u64>, onion_session_privs: Vec<[u8; 32]>) -> Result<(), PaymentSendFailure> {
-               if route.paths.len() < 1 {
-                       return Err(PaymentSendFailure::ParameterError(APIError::InvalidRoute{err: "There must be at least one path to send over"}));
-               }
-               if payment_secret.is_none() && route.paths.len() > 1 {
-                       return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError{err: "Payment secret is required for multi-path payments".to_string()}));
-               }
-               let mut total_value = 0;
-               let our_node_id = self.get_our_node_id();
-               let mut path_errs = Vec::with_capacity(route.paths.len());
-               'path_check: for path in route.paths.iter() {
-                       if path.len() < 1 || path.len() > 20 {
-                               path_errs.push(Err(APIError::InvalidRoute{err: "Path didn't go anywhere/had bogus size"}));
-                               continue 'path_check;
-                       }
-                       for (idx, hop) in path.iter().enumerate() {
-                               if idx != path.len() - 1 && hop.pubkey == our_node_id {
-                                       path_errs.push(Err(APIError::InvalidRoute{err: "Path went through us but wasn't a simple rebalance loop to us"}));
-                                       continue 'path_check;
-                               }
-                       }
-                       total_value += path.last().unwrap().fee_msat;
-                       path_errs.push(Ok(()));
-               }
-               if path_errs.iter().any(|e| e.is_err()) {
-                       return Err(PaymentSendFailure::PathParameterError(path_errs));
-               }
-               if let Some(amt_msat) = recv_value_msat {
-                       debug_assert!(amt_msat >= total_value);
-                       total_value = amt_msat;
-               }
-
-               let cur_height = self.best_block.read().unwrap().height() + 1;
-               let mut results = Vec::new();
-               debug_assert_eq!(route.paths.len(), onion_session_privs.len());
-               for (path, session_priv) in route.paths.iter().zip(onion_session_privs.into_iter()) {
-                       let mut path_res = self.send_payment_along_path(&path, &route.payment_params, &payment_hash, payment_secret, total_value, cur_height, payment_id, &keysend_preimage, session_priv);
-                       match path_res {
-                               Ok(_) => {},
-                               Err(APIError::MonitorUpdateInProgress) => {
-                                       // While a MonitorUpdateInProgress is an Err(_), the payment is still
-                                       // considered "in flight" and we shouldn't remove it from the
-                                       // PendingOutboundPayment set.
-                               },
-                               Err(_) => {
-                                       let mut pending_outbounds = self.pending_outbound_payments.lock().unwrap();
-                                       if let Some(payment) = pending_outbounds.get_mut(&payment_id) {
-                                               let removed = payment.remove(&session_priv, Some(path));
-                                               debug_assert!(removed, "This can't happen as the payment has an entry for this path added by callers");
-                                       } else {
-                                               debug_assert!(false, "This can't happen as the payment was added by callers");
-                                               path_res = Err(APIError::APIMisuseError { err: "Internal error: payment disappeared during processing. Please report this bug!".to_owned() });
-                                       }
-                               }
-                       }
-                       results.push(path_res);
-               }
-               let mut has_ok = false;
-               let mut has_err = false;
-               let mut pending_amt_unsent = 0;
-               let mut max_unsent_cltv_delta = 0;
-               for (res, path) in results.iter().zip(route.paths.iter()) {
-                       if res.is_ok() { has_ok = true; }
-                       if res.is_err() { has_err = true; }
-                       if let &Err(APIError::MonitorUpdateInProgress) = res {
-                               // MonitorUpdateInProgress is inherently unsafe to retry, so we call it a
-                               // PartialFailure.
-                               has_err = true;
-                               has_ok = true;
-                       } else if res.is_err() {
-                               pending_amt_unsent += path.last().unwrap().fee_msat;
-                               max_unsent_cltv_delta = cmp::max(max_unsent_cltv_delta, path.last().unwrap().cltv_expiry_delta);
-                       }
-               }
-               if has_err && has_ok {
-                       Err(PaymentSendFailure::PartialFailure {
-                               results,
-                               payment_id,
-                               failed_paths_retry: if pending_amt_unsent != 0 {
-                                       if let Some(payment_params) = &route.payment_params {
-                                               Some(RouteParameters {
-                                                       payment_params: payment_params.clone(),
-                                                       final_value_msat: pending_amt_unsent,
-                                                       final_cltv_expiry_delta: max_unsent_cltv_delta,
-                                               })
-                                       } else { None }
-                               } else { None },
-                       })
-               } else if has_err {
-                       // If we failed to send any paths, we should remove the new PaymentId from the
-                       // `pending_outbound_payments` map, as the user isn't expected to `abandon_payment`.
-                       let removed = self.pending_outbound_payments.lock().unwrap().remove(&payment_id).is_some();
-                       debug_assert!(removed, "We should always have a pending payment to remove here");
-                       Err(PaymentSendFailure::AllFailedResendSafe(results.drain(..).map(|r| r.unwrap_err()).collect()))
-               } else {
-                       Ok(())
-               }
-       }
 
        /// Retries a payment along the given [`Route`].
        ///
@@ -2733,94 +2453,36 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
        /// [`send_payment`]: [`ChannelManager::send_payment`]
        /// [`abandon_payment`]: [`ChannelManager::abandon_payment`]
        pub fn retry_payment(&self, route: &Route, payment_id: PaymentId) -> Result<(), PaymentSendFailure> {
-               const RETRY_OVERFLOW_PERCENTAGE: u64 = 10;
-               for path in route.paths.iter() {
-                       if path.len() == 0 {
-                               return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
-                                       err: "length-0 path in route".to_string()
-                               }))
-                       }
-               }
-
-               let mut onion_session_privs = Vec::with_capacity(route.paths.len());
-               for _ in 0..route.paths.len() {
-                       onion_session_privs.push(self.keys_manager.get_secure_random_bytes());
-               }
-
-               let (total_msat, payment_hash, payment_secret) = {
-                       let mut outbounds = self.pending_outbound_payments.lock().unwrap();
-                       match outbounds.get_mut(&payment_id) {
-                               Some(payment) => {
-                                       let res = match payment {
-                                               PendingOutboundPayment::Retryable {
-                                                       total_msat, payment_hash, payment_secret, pending_amt_msat, ..
-                                               } => {
-                                                       let retry_amt_msat: u64 = route.paths.iter().map(|path| path.last().unwrap().fee_msat).sum();
-                                                       if retry_amt_msat + *pending_amt_msat > *total_msat * (100 + RETRY_OVERFLOW_PERCENTAGE) / 100 {
-                                                               return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
-                                                                       err: format!("retry_amt_msat of {} will put pending_amt_msat (currently: {}) more than 10% over total_payment_amt_msat of {}", retry_amt_msat, pending_amt_msat, total_msat).to_string()
-                                                               }))
-                                                       }
-                                                       (*total_msat, *payment_hash, *payment_secret)
-                                               },
-                                               PendingOutboundPayment::Legacy { .. } => {
-                                                       return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
-                                                               err: "Unable to retry payments that were initially sent on LDK versions prior to 0.0.102".to_string()
-                                                       }))
-                                               },
-                                               PendingOutboundPayment::Fulfilled { .. } => {
-                                                       return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
-                                                               err: "Payment already completed".to_owned()
-                                                       }));
-                                               },
-                                               PendingOutboundPayment::Abandoned { .. } => {
-                                                       return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
-                                                               err: "Payment already abandoned (with some HTLCs still pending)".to_owned()
-                                                       }));
-                                               },
-                                       };
-                                       for (path, session_priv_bytes) in route.paths.iter().zip(onion_session_privs.iter()) {
-                                               assert!(payment.insert(*session_priv_bytes, path));
-                                       }
-                                       res
-                               },
-                               None =>
-                                       return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
-                                               err: format!("Payment with ID {} not found", log_bytes!(payment_id.0)),
-                                       })),
-                       }
-               };
-               self.send_payment_internal(route, payment_hash, &payment_secret, None, payment_id, Some(total_msat), onion_session_privs)
+               let best_block_height = self.best_block.read().unwrap().height();
+               self.pending_outbound_payments.retry_payment_with_route(route, payment_id, &self.keys_manager, best_block_height,
+                       |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+                       self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
        }
 
        /// Signals that no further retries for the given payment will occur.
        ///
-       /// After this method returns, any future calls to [`retry_payment`] for the given `payment_id`
-       /// will fail with [`PaymentSendFailure::ParameterError`]. If no such event has been generated,
-       /// an [`Event::PaymentFailed`] event will be generated as soon as there are no remaining
-       /// pending HTLCs for this payment.
+       /// After this method returns, no future calls to [`retry_payment`] for the given `payment_id`
+       /// are allowed. If no [`Event::PaymentFailed`] event had been generated before, one will be
+       /// generated as soon as there are no remaining pending HTLCs for this payment.
        ///
        /// Note that calling this method does *not* prevent a payment from succeeding. You must still
        /// wait until you receive either a [`Event::PaymentFailed`] or [`Event::PaymentSent`] event to
        /// determine the ultimate status of a payment.
        ///
+       /// If an [`Event::PaymentFailed`] event is generated and we restart without this
+       /// [`ChannelManager`] having been persisted, the payment may still be in the pending state
+       /// upon restart. This allows further calls to [`retry_payment`] (and requiring a second call
+       /// to [`abandon_payment`] to mark the payment as failed again). Otherwise, future calls to
+       /// [`retry_payment`] will fail with [`PaymentSendFailure::ParameterError`].
+       ///
+       /// [`abandon_payment`]: Self::abandon_payment
        /// [`retry_payment`]: Self::retry_payment
        /// [`Event::PaymentFailed`]: events::Event::PaymentFailed
        /// [`Event::PaymentSent`]: events::Event::PaymentSent
        pub fn abandon_payment(&self, payment_id: PaymentId) {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
-
-               let mut outbounds = self.pending_outbound_payments.lock().unwrap();
-               if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(payment_id) {
-                       if let Ok(()) = payment.get_mut().mark_abandoned() {
-                               if payment.get().remaining_parts() == 0 {
-                                       self.pending_events.lock().unwrap().push(events::Event::PaymentFailed {
-                                               payment_id,
-                                               payment_hash: payment.get().payment_hash().expect("PendingOutboundPayments::RetriesExceeded always has a payment hash set"),
-                                       });
-                                       payment.remove();
-                               }
-                       }
+               if let Some(payment_failed_ev) = self.pending_outbound_payments.abandon_payment(payment_id) {
+                       self.pending_events.lock().unwrap().push(payment_failed_ev);
                }
        }
 
@@ -2840,103 +2502,85 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
        ///
        /// [`send_payment`]: Self::send_payment
        pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option<PaymentPreimage>, payment_id: PaymentId) -> Result<PaymentHash, PaymentSendFailure> {
-               let preimage = match payment_preimage {
-                       Some(p) => p,
-                       None => PaymentPreimage(self.keys_manager.get_secure_random_bytes()),
-               };
-               let payment_hash = PaymentHash(Sha256::hash(&preimage.0).into_inner());
-               let onion_session_privs = self.add_new_pending_payment(payment_hash, None, payment_id, &route)?;
-
-               match self.send_payment_internal(route, payment_hash, &None, Some(preimage), payment_id, None, onion_session_privs) {
-                       Ok(()) => Ok(payment_hash),
-                       Err(e) => Err(e)
-               }
+               let best_block_height = self.best_block.read().unwrap().height();
+               self.pending_outbound_payments.send_spontaneous_payment(route, payment_preimage, payment_id, &self.keys_manager, best_block_height,
+                       |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+                       self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
        }
 
        /// Send a payment that is probing the given route for liquidity. We calculate the
        /// [`PaymentHash`] of probes based on a static secret and a random [`PaymentId`], which allows
        /// us to easily discern them from real payments.
        pub fn send_probe(&self, hops: Vec<RouteHop>) -> Result<(PaymentHash, PaymentId), PaymentSendFailure> {
-               let payment_id = PaymentId(self.keys_manager.get_secure_random_bytes());
-
-               let payment_hash = self.probing_cookie_from_id(&payment_id);
-
-               if hops.len() < 2 {
-                       return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
-                               err: "No need probing a path with less than two hops".to_string()
-                       }))
-               }
-
-               let route = Route { paths: vec![hops], payment_params: None };
-               let onion_session_privs = self.add_new_pending_payment(payment_hash, None, payment_id, &route)?;
-
-               match self.send_payment_internal(&route, payment_hash, &None, None, payment_id, None, onion_session_privs) {
-                       Ok(()) => Ok((payment_hash, payment_id)),
-                       Err(e) => Err(e)
-               }
-       }
+               let best_block_height = self.best_block.read().unwrap().height();
+               self.pending_outbound_payments.send_probe(hops, self.probing_cookie_secret, &self.keys_manager, best_block_height,
+                       |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+                       self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
+       }
 
        /// Returns whether a payment with the given [`PaymentHash`] and [`PaymentId`] is, in fact, a
        /// payment probe.
+       #[cfg(test)]
        pub(crate) fn payment_is_probe(&self, payment_hash: &PaymentHash, payment_id: &PaymentId) -> bool {
-               let target_payment_hash = self.probing_cookie_from_id(payment_id);
-               target_payment_hash == *payment_hash
-       }
-
-       /// Returns the 'probing cookie' for the given [`PaymentId`].
-       fn probing_cookie_from_id(&self, payment_id: &PaymentId) -> PaymentHash {
-               let mut preimage = [0u8; 64];
-               preimage[..32].copy_from_slice(&self.probing_cookie_secret);
-               preimage[32..].copy_from_slice(&payment_id.0);
-               PaymentHash(Sha256::hash(&preimage).into_inner())
+               outbound_payment::payment_is_probe(payment_hash, payment_id, self.probing_cookie_secret)
        }
 
        /// Handles the generation of a funding transaction, optionally (for tests) with a function
        /// which checks the correctness of the funding transaction given the associated channel.
-       fn funding_transaction_generated_intern<FundingOutput: Fn(&Channel<<K::Target as KeysInterface>::Signer>, &Transaction) -> Result<OutPoint, APIError>>(
-               &self, temporary_channel_id: &[u8; 32], _counterparty_node_id: &PublicKey, funding_transaction: Transaction, find_funding_output: FundingOutput
+       fn funding_transaction_generated_intern<FundingOutput: Fn(&Channel<<K::Target as SignerProvider>::Signer>, &Transaction) -> Result<OutPoint, APIError>>(
+               &self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, funding_transaction: Transaction, find_funding_output: FundingOutput
        ) -> Result<(), APIError> {
-               let (chan, msg) = {
-                       let (res, chan) = match self.channel_state.lock().unwrap().by_id.remove(temporary_channel_id) {
-                               Some(mut chan) => {
-                                       let funding_txo = find_funding_output(&chan, &funding_transaction)?;
-
-                                       (chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger)
-                                               .map_err(|e| if let ChannelError::Close(msg) = e {
-                                                       MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.get_user_id(), chan.force_shutdown(true), None)
-                                               } else { unreachable!(); })
-                                       , chan)
-                               },
-                               None => { return Err(APIError::ChannelUnavailable { err: "No such channel".to_owned() }) },
+               let mut channel_state = self.channel_state.lock().unwrap();
+               let per_peer_state = self.per_peer_state.read().unwrap();
+               if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
+                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                       let peer_state = &mut *peer_state_lock;
+                       let (chan, msg) = {
+                               let (res, chan) = {
+                                       match peer_state.channel_by_id.remove(temporary_channel_id) {
+                                               Some(mut chan) => {
+                                                       let funding_txo = find_funding_output(&chan, &funding_transaction)?;
+
+                                                       (chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger)
+                                                               .map_err(|e| if let ChannelError::Close(msg) = e {
+                                                                       MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.get_user_id(), chan.force_shutdown(true), None)
+                                                               } else { unreachable!(); })
+                                                       , chan)
+                                               },
+                                               None => { return Err(APIError::ChannelUnavailable { err: "No such channel".to_owned() }) },
+                                       }
+                               };
+                               match handle_error!(self, res, chan.get_counterparty_node_id()) {
+                                       Ok(funding_msg) => {
+                                               (chan, funding_msg)
+                                       },
+                                       Err(_) => { return Err(APIError::ChannelUnavailable {
+                                               err: "Error deriving keys or signing initial commitment transactions - either our RNG or our counterparty's RNG is broken or the Signer refused to sign".to_owned()
+                                       }) },
+                               }
                        };
-                       match handle_error!(self, res, chan.get_counterparty_node_id()) {
-                               Ok(funding_msg) => {
-                                       (chan, funding_msg)
-                               },
-                               Err(_) => { return Err(APIError::ChannelUnavailable {
-                                       err: "Error deriving keys or signing initial commitment transactions - either our RNG or our counterparty's RNG is broken or the Signer refused to sign".to_owned()
-                               }) },
-                       }
-               };
 
-               let mut channel_state = self.channel_state.lock().unwrap();
-               channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingCreated {
-                       node_id: chan.get_counterparty_node_id(),
-                       msg,
-               });
-               match channel_state.by_id.entry(chan.channel_id()) {
-                       hash_map::Entry::Occupied(_) => {
-                               panic!("Generated duplicate funding txid?");
-                       },
-                       hash_map::Entry::Vacant(e) => {
-                               let mut id_to_peer = self.id_to_peer.lock().unwrap();
-                               if id_to_peer.insert(chan.channel_id(), chan.get_counterparty_node_id()).is_some() {
-                                       panic!("id_to_peer map already contained funding txid, which shouldn't be possible");
+                       channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingCreated {
+                               node_id: chan.get_counterparty_node_id(),
+                               msg,
+                       });
+                       mem::drop(channel_state);
+                       match peer_state.channel_by_id.entry(chan.channel_id()) {
+                               hash_map::Entry::Occupied(_) => {
+                                       panic!("Generated duplicate funding txid?");
+                               },
+                               hash_map::Entry::Vacant(e) => {
+                                       let mut id_to_peer = self.id_to_peer.lock().unwrap();
+                                       if id_to_peer.insert(chan.channel_id(), chan.get_counterparty_node_id()).is_some() {
+                                               panic!("id_to_peer map already contained funding txid, which shouldn't be possible");
+                                       }
+                                       e.insert(chan);
                                }
-                               e.insert(chan);
                        }
+                       Ok(())
+               } else {
+                       return Err(APIError::APIMisuseError { err: format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id) })
                }
-               Ok(())
        }
 
        #[cfg(test)]
@@ -3061,31 +2705,33 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                {
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
-                       for channel_id in channel_ids {
-                               let channel_counterparty_node_id = channel_state.by_id.get(channel_id)
-                                       .ok_or(APIError::ChannelUnavailable {
-                                               err: format!("Channel with ID {} was not found", log_bytes!(*channel_id)),
-                                       })?
-                                       .get_counterparty_node_id();
-                               if channel_counterparty_node_id != *counterparty_node_id {
-                                       return Err(APIError::APIMisuseError {
-                                               err: "counterparty node id mismatch".to_owned(),
-                                       });
-                               }
-                       }
-                       for channel_id in channel_ids {
-                               let channel = channel_state.by_id.get_mut(channel_id).unwrap();
-                               if !channel.update_config(config) {
-                                       continue;
+                       let per_peer_state = self.per_peer_state.read().unwrap();
+                       if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
+                               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                               let peer_state = &mut *peer_state_lock;
+                               for channel_id in channel_ids {
+                                       if !peer_state.channel_by_id.contains_key(channel_id) {
+                                               return Err(APIError::ChannelUnavailable {
+                                                       err: format!("Channel with ID {} was not found", log_bytes!(*channel_id)),
+                                               });
+                                       }
                                }
-                               if let Ok(msg) = self.get_channel_update_for_broadcast(channel) {
-                                       channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg });
-                               } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
-                                       channel_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
-                                               node_id: channel.get_counterparty_node_id(),
-                                               msg,
-                                       });
+                               for channel_id in channel_ids {
+                                       let channel = peer_state.channel_by_id.get_mut(channel_id).unwrap();
+                                       if !channel.update_config(config) {
+                                               continue;
+                                       }
+                                       if let Ok(msg) = self.get_channel_update_for_broadcast(channel) {
+                                               channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg });
+                                       } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
+                                               channel_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
+                                                       node_id: channel.get_counterparty_node_id(),
+                                                       msg,
+                                               });
+                                       }
                                }
+                       } else {
+                               return Err(APIError::APIMisuseError{ err: format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id) });
                        }
                }
                Ok(())
@@ -3113,21 +2759,30 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
        /// [`HTLCIntercepted`]: events::Event::HTLCIntercepted
        // TODO: when we move to deciding the best outbound channel at forward time, only take
        // `next_node_id` and not `next_hop_channel_id`
-       pub fn forward_intercepted_htlc(&self, intercept_id: InterceptId, next_hop_channel_id: &[u8; 32], _next_node_id: PublicKey, amt_to_forward_msat: u64) -> Result<(), APIError> {
+       pub fn forward_intercepted_htlc(&self, intercept_id: InterceptId, next_hop_channel_id: &[u8; 32], next_node_id: PublicKey, amt_to_forward_msat: u64) -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 
-               let next_hop_scid = match self.channel_state.lock().unwrap().by_id.get(next_hop_channel_id) {
-                       Some(chan) => {
-                               if !chan.is_usable() {
-                                       return Err(APIError::ChannelUnavailable {
-                                               err: format!("Channel with id {} not fully established", log_bytes!(*next_hop_channel_id))
+               let next_hop_scid = {
+                       let peer_state_lock = self.per_peer_state.read().unwrap();
+                       if let Some(peer_state_mutex) = peer_state_lock.get(&next_node_id) {
+                               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                               let peer_state = &mut *peer_state_lock;
+                               match peer_state.channel_by_id.get(next_hop_channel_id) {
+                                       Some(chan) => {
+                                               if !chan.is_usable() {
+                                                       return Err(APIError::ChannelUnavailable {
+                                                               err: format!("Channel with id {} not fully established", log_bytes!(*next_hop_channel_id))
+                                                       })
+                                               }
+                                               chan.get_short_channel_id().unwrap_or(chan.outbound_scid_alias())
+                                       },
+                                       None => return Err(APIError::ChannelUnavailable {
+                                               err: format!("Channel with id {} not found", log_bytes!(*next_hop_channel_id))
                                        })
                                }
-                               chan.get_short_channel_id().unwrap_or(chan.outbound_scid_alias())
-                       },
-                       None => return Err(APIError::ChannelUnavailable {
-                               err: format!("Channel with id {} not found", log_bytes!(*next_hop_channel_id))
-                       })
+                       } else {
+                               return Err(APIError::APIMisuseError{ err: format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", next_node_id) });
+                       }
                };
 
                let payment = self.pending_intercepted_htlcs.lock().unwrap().remove(&intercept_id)
@@ -3197,7 +2852,6 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                let mut new_events = Vec::new();
                let mut failed_forwards = Vec::new();
                let mut phantom_receives: Vec<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new();
-               let mut handle_errors = Vec::new();
                {
                        let mut forward_htlcs = HashMap::new();
                        mem::swap(&mut forward_htlcs, &mut self.forward_htlcs.lock().unwrap());
@@ -3298,23 +2952,27 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                                                        }
                                                }
                                        }
-                                       let forward_chan_id = match self.short_to_chan_info.read().unwrap().get(&short_chan_id) {
-                                               Some((_cp_id, chan_id)) => chan_id.clone(),
+                                       let (counterparty_node_id, forward_chan_id) = match self.short_to_chan_info.read().unwrap().get(&short_chan_id) {
+                                               Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()),
                                                None => {
                                                        forwarding_channel_not_found!();
                                                        continue;
                                                }
                                        };
-                                       let mut channel_state_lock = self.channel_state.lock().unwrap();
-                                       let channel_state = &mut *channel_state_lock;
-                                       match channel_state.by_id.entry(forward_chan_id) {
+                                       let per_peer_state = self.per_peer_state.read().unwrap();
+                                       if let None = per_peer_state.get(&counterparty_node_id) {
+                                               forwarding_channel_not_found!();
+                                               continue;
+                                       }
+                                       let peer_state_mutex = per_peer_state.get(&counterparty_node_id).unwrap();
+                                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                                       let peer_state = &mut *peer_state_lock;
+                                       match peer_state.channel_by_id.entry(forward_chan_id) {
                                                hash_map::Entry::Vacant(_) => {
                                                        forwarding_channel_not_found!();
                                                        continue;
                                                },
                                                hash_map::Entry::Occupied(mut chan) => {
-                                                       let mut add_htlc_msgs = Vec::new();
-                                                       let mut fail_htlc_msgs = Vec::new();
                                                        for forward_info in pending_forwards.drain(..) {
                                                                match forward_info {
                                                                        HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
@@ -3333,34 +2991,21 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                                                                                        // Phantom payments are only PendingHTLCRouting::Receive.
                                                                                        phantom_shared_secret: None,
                                                                                });
-                                                                               match chan.get_mut().send_htlc(outgoing_amt_msat, payment_hash, outgoing_cltv_value, htlc_source.clone(), onion_packet, &self.logger) {
-                                                                                       Err(e) => {
-                                                                                               if let ChannelError::Ignore(msg) = e {
-                                                                                                       log_trace!(self.logger, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(payment_hash.0), msg);
-                                                                                               } else {
-                                                                                                       panic!("Stated return value requirements in send_htlc() were not met");
-                                                                                               }
-                                                                                               let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan.get());
-                                                                                               failed_forwards.push((htlc_source, payment_hash,
-                                                                                                       HTLCFailReason::reason(failure_code, data),
-                                                                                                       HTLCDestination::NextHopChannel { node_id: Some(chan.get().get_counterparty_node_id()), channel_id: forward_chan_id }
-                                                                                               ));
-                                                                                               continue;
-                                                                                       },
-                                                                                       Ok(update_add) => {
-                                                                                               match update_add {
-                                                                                                       Some(msg) => { add_htlc_msgs.push(msg); },
-                                                                                                       None => {
-                                                                                                               // Nothing to do here...we're waiting on a remote
-                                                                                                               // revoke_and_ack before we can add anymore HTLCs. The Channel
-                                                                                                               // will automatically handle building the update_add_htlc and
-                                                                                                               // commitment_signed messages when we can.
-                                                                                                               // TODO: Do some kind of timer to set the channel as !is_live()
-                                                                                                               // as we don't really want others relying on us relaying through
-                                                                                                               // this channel currently :/.
-                                                                                                       }
-                                                                                               }
+                                                                               if let Err(e) = chan.get_mut().queue_add_htlc(outgoing_amt_msat,
+                                                                                       payment_hash, outgoing_cltv_value, htlc_source.clone(),
+                                                                                       onion_packet, &self.logger)
+                                                                               {
+                                                                                       if let ChannelError::Ignore(msg) = e {
+                                                                                               log_trace!(self.logger, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(payment_hash.0), msg);
+                                                                                       } else {
+                                                                                               panic!("Stated return value requirements in send_htlc() were not met");
                                                                                        }
+                                                                                       let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan.get());
+                                                                                       failed_forwards.push((htlc_source, payment_hash,
+                                                                                               HTLCFailReason::reason(failure_code, data),
+                                                                                               HTLCDestination::NextHopChannel { node_id: Some(chan.get().get_counterparty_node_id()), channel_id: forward_chan_id }
+                                                                                       ));
+                                                                                       continue;
                                                                                }
                                                                        },
                                                                        HTLCForwardInfo::AddHTLC { .. } => {
@@ -3368,77 +3013,22 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                                                                        },
                                                                        HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
                                                                                log_trace!(self.logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
-                                                                               match chan.get_mut().get_update_fail_htlc(htlc_id, err_packet, &self.logger) {
-                                                                                       Err(e) => {
-                                                                                               if let ChannelError::Ignore(msg) = e {
-                                                                                                       log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
-                                                                                               } else {
-                                                                                                       panic!("Stated return value requirements in get_update_fail_htlc() were not met");
-                                                                                               }
-                                                                                               // fail-backs are best-effort, we probably already have one
-                                                                                               // pending, and if not that's OK, if not, the channel is on
-                                                                                               // the chain and sending the HTLC-Timeout is their problem.
-                                                                                               continue;
-                                                                                       },
-                                                                                       Ok(Some(msg)) => { fail_htlc_msgs.push(msg); },
-                                                                                       Ok(None) => {
-                                                                                               // Nothing to do here...we're waiting on a remote
-                                                                                               // revoke_and_ack before we can update the commitment
-                                                                                               // transaction. The Channel will automatically handle
-                                                                                               // building the update_fail_htlc and commitment_signed
-                                                                                               // messages when we can.
-                                                                                               // We don't need any kind of timer here as they should fail
-                                                                                               // the channel onto the chain if they can't get our
-                                                                                               // update_fail_htlc in time, it's not our problem.
+                                                                               if let Err(e) = chan.get_mut().queue_fail_htlc(
+                                                                                       htlc_id, err_packet, &self.logger
+                                                                               ) {
+                                                                                       if let ChannelError::Ignore(msg) = e {
+                                                                                               log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
+                                                                                       } else {
+                                                                                               panic!("Stated return value requirements in queue_fail_htlc() were not met");
                                                                                        }
+                                                                                       // fail-backs are best-effort, we probably already have one
+                                                                                       // pending, and if not that's OK, if not, the channel is on
+                                                                                       // the chain and sending the HTLC-Timeout is their problem.
+                                                                                       continue;
                                                                                }
                                                                        },
                                                                }
                                                        }
-
-                                                       if !add_htlc_msgs.is_empty() || !fail_htlc_msgs.is_empty() {
-                                                               let (commitment_msg, monitor_update) = match chan.get_mut().send_commitment(&self.logger) {
-                                                                       Ok(res) => res,
-                                                                       Err(e) => {
-                                                                               // We surely failed send_commitment due to bad keys, in that case
-                                                                               // close channel and then send error message to peer.
-                                                                               let counterparty_node_id = chan.get().get_counterparty_node_id();
-                                                                               let err: Result<(), _>  = match e {
-                                                                                       ChannelError::Ignore(_) | ChannelError::Warn(_) => {
-                                                                                               panic!("Stated return value requirements in send_commitment() were not met");
-                                                                                       }
-                                                                                       ChannelError::Close(msg) => {
-                                                                                               log_trace!(self.logger, "Closing channel {} due to Close-required error: {}", log_bytes!(chan.key()[..]), msg);
-                                                                                               let mut channel = remove_channel!(self, chan);
-                                                                                               // ChannelClosed event is generated by handle_error for us.
-                                                                                               Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel.channel_id(), channel.get_user_id(), channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok()))
-                                                                                       },
-                                                                               };
-                                                                               handle_errors.push((counterparty_node_id, err));
-                                                                               continue;
-                                                                       }
-                                                               };
-                                                               match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
-                                                                       ChannelMonitorUpdateStatus::Completed => {},
-                                                                       e => {
-                                                                               handle_errors.push((chan.get().get_counterparty_node_id(), handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, false, true)));
-                                                                               continue;
-                                                                       }
-                                                               }
-                                                               log_debug!(self.logger, "Forwarding HTLCs resulted in a commitment update with {} HTLCs added and {} HTLCs failed for channel {}",
-                                                                       add_htlc_msgs.len(), fail_htlc_msgs.len(), log_bytes!(chan.get().channel_id()));
-                                                               channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
-                                                                       node_id: chan.get().get_counterparty_node_id(),
-                                                                       updates: msgs::CommitmentUpdate {
-                                                                               update_add_htlcs: add_htlc_msgs,
-                                                                               update_fulfill_htlcs: Vec::new(),
-                                                                               update_fail_htlcs: fail_htlc_msgs,
-                                                                               update_fail_malformed_htlcs: Vec::new(),
-                                                                               update_fee: None,
-                                                                               commitment_signed: commitment_msg,
-                                                                       },
-                                                               });
-                                                       }
                                                }
                                        }
                                } else {
@@ -3503,7 +3093,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
 
                                                                macro_rules! check_total_value {
                                                                        ($payment_data: expr, $payment_preimage: expr) => {{
-                                                                               let mut payment_received_generated = false;
+                                                                               let mut payment_claimable_generated = false;
                                                                                let purpose = || {
                                                                                        events::PaymentPurpose::InvoicePayment {
                                                                                                payment_preimage: $payment_preimage,
@@ -3554,14 +3144,14 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                                                                                                via_channel_id: Some(prev_channel_id),
                                                                                                via_user_channel_id: Some(prev_user_channel_id),
                                                                                        });
-                                                                                       payment_received_generated = true;
+                                                                                       payment_claimable_generated = true;
                                                                                } else {
                                                                                        // Nothing to do - we haven't reached the total
                                                                                        // payment value yet, wait until we receive more
                                                                                        // MPP parts.
                                                                                        htlcs.push(claimable_htlc);
                                                                                }
-                                                                               payment_received_generated
+                                                                               payment_claimable_generated
                                                                        }}
                                                                }
 
@@ -3629,8 +3219,8 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                                                                                                log_bytes!(payment_hash.0), payment_data.total_msat, inbound_payment.get().min_value_msat.unwrap());
                                                                                        fail_htlc!(claimable_htlc, payment_hash);
                                                                                } else {
-                                                                                       let payment_received_generated = check_total_value!(payment_data, inbound_payment.get().payment_preimage);
-                                                                                       if payment_received_generated {
+                                                                                       let payment_claimable_generated = check_total_value!(payment_data, inbound_payment.get().payment_preimage);
+                                                                                       if payment_claimable_generated {
                                                                                                inbound_payment.remove_entry();
                                                                                        }
                                                                                }
@@ -3651,9 +3241,11 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                }
                self.forward_htlcs(&mut phantom_receives);
 
-               for (counterparty_node_id, err) in handle_errors.drain(..) {
-                       let _ = handle_error!(self, err, counterparty_node_id);
-               }
+               // Freeing the holding cell here is relatively redundant - in practice we'll do it when we
+               // next get a `get_and_clear_pending_msg_events` call, but some tests rely on it, and it's
+               // nice to do the work now if we can rather than while we're trying to get messages in the
+               // network stack.
+               self.check_free_holding_cells();
 
                if new_events.is_empty() { return }
                let mut events = self.pending_events.lock().unwrap();
@@ -3691,59 +3283,24 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                self.process_background_events();
        }
 
-       fn update_channel_fee(&self, pending_msg_events: &mut Vec<events::MessageSendEvent>, chan_id: &[u8; 32], chan: &mut Channel<<K::Target as KeysInterface>::Signer>, new_feerate: u32) -> (bool, NotifyOption, Result<(), MsgHandleErrInternal>) {
-               if !chan.is_outbound() { return (true, NotifyOption::SkipPersist, Ok(())); }
+       fn update_channel_fee(&self, chan_id: &[u8; 32], chan: &mut Channel<<K::Target as SignerProvider>::Signer>, new_feerate: u32) -> NotifyOption {
+               if !chan.is_outbound() { return NotifyOption::SkipPersist; }
                // If the feerate has decreased by less than half, don't bother
                if new_feerate <= chan.get_feerate() && new_feerate * 2 > chan.get_feerate() {
                        log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {}.",
                                log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate);
-                       return (true, NotifyOption::SkipPersist, Ok(()));
+                       return NotifyOption::SkipPersist;
                }
                if !chan.is_live() {
                        log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).",
                                log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate);
-                       return (true, NotifyOption::SkipPersist, Ok(()));
+                       return NotifyOption::SkipPersist;
                }
                log_trace!(self.logger, "Channel {} qualifies for a feerate change from {} to {}.",
                        log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate);
 
-               let mut retain_channel = true;
-               let res = match chan.send_update_fee_and_commit(new_feerate, &self.logger) {
-                       Ok(res) => Ok(res),
-                       Err(e) => {
-                               let (drop, res) = convert_chan_err!(self, e, chan, chan_id);
-                               if drop { retain_channel = false; }
-                               Err(res)
-                       }
-               };
-               let ret_err = match res {
-                       Ok(Some((update_fee, commitment_signed, monitor_update))) => {
-                               match self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) {
-                                       ChannelMonitorUpdateStatus::Completed => {
-                                               pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
-                                                       node_id: chan.get_counterparty_node_id(),
-                                                       updates: msgs::CommitmentUpdate {
-                                                               update_add_htlcs: Vec::new(),
-                                                               update_fulfill_htlcs: Vec::new(),
-                                                               update_fail_htlcs: Vec::new(),
-                                                               update_fail_malformed_htlcs: Vec::new(),
-                                                               update_fee: Some(update_fee),
-                                                               commitment_signed,
-                                                       },
-                                               });
-                                               Ok(())
-                                       },
-                                       e => {
-                                               let (res, drop) = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, chan_id, COMMITMENT_UPDATE_ONLY);
-                                               if drop { retain_channel = false; }
-                                               res
-                                       }
-                               }
-                       },
-                       Ok(None) => Ok(()),
-                       Err(e) => Err(e),
-               };
-               (retain_channel, NotifyOption::DoPersist, ret_err)
+               chan.queue_update_fee(new_feerate, &self.logger);
+               NotifyOption::DoPersist
        }
 
        #[cfg(fuzzing)]
@@ -3757,64 +3314,20 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
 
                        let new_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
 
-                       let mut handle_errors = Vec::new();
-                       {
-                               let mut channel_state_lock = self.channel_state.lock().unwrap();
-                               let channel_state = &mut *channel_state_lock;
-                               let pending_msg_events = &mut channel_state.pending_msg_events;
-                               channel_state.by_id.retain(|chan_id, chan| {
-                                       let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(pending_msg_events, chan_id, chan, new_feerate);
+                       let per_peer_state = self.per_peer_state.read().unwrap();
+                       for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
+                               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                               let peer_state = &mut *peer_state_lock;
+                               for (chan_id, chan) in peer_state.channel_by_id.iter_mut() {
+                                       let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate);
                                        if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
-                                       if err.is_err() {
-                                               handle_errors.push(err);
-                                       }
-                                       retain_channel
-                               });
+                               }
                        }
 
                        should_persist
                });
        }
 
-       fn remove_stale_resolved_payments(&self) {
-               // If an outbound payment was completed, and no pending HTLCs remain, we should remove it
-               // from the map. However, if we did that immediately when the last payment HTLC is claimed,
-               // this could race the user making a duplicate send_payment call and our idempotency
-               // guarantees would be violated. Instead, we wait a few timer ticks to do the actual
-               // removal. This should be more than sufficient to ensure the idempotency of any
-               // `send_payment` calls that were made at the same time the `PaymentSent` event was being
-               // processed.
-               let mut pending_outbound_payments = self.pending_outbound_payments.lock().unwrap();
-               let pending_events = self.pending_events.lock().unwrap();
-               pending_outbound_payments.retain(|payment_id, payment| {
-                       if let PendingOutboundPayment::Fulfilled { session_privs, timer_ticks_without_htlcs, .. } = payment {
-                               let mut no_remaining_entries = session_privs.is_empty();
-                               if no_remaining_entries {
-                                       for ev in pending_events.iter() {
-                                               match ev {
-                                                       events::Event::PaymentSent { payment_id: Some(ev_payment_id), .. } |
-                                                       events::Event::PaymentPathSuccessful { payment_id: ev_payment_id, .. } |
-                                                       events::Event::PaymentPathFailed { payment_id: Some(ev_payment_id), .. } => {
-                                                               if payment_id == ev_payment_id {
-                                                                       no_remaining_entries = false;
-                                                                       break;
-                                                               }
-                                                       },
-                                                       _ => {},
-                                               }
-                                       }
-                               }
-                               if no_remaining_entries {
-                                       *timer_ticks_without_htlcs += 1;
-                                       *timer_ticks_without_htlcs <= IDEMPOTENCY_TIMEOUT_TICKS
-                               } else {
-                                       *timer_ticks_without_htlcs = 0;
-                                       true
-                               }
-                       } else { true }
-               });
-       }
-
        /// Performs actions which should happen on startup and roughly once per minute thereafter.
        ///
        /// This currently includes:
@@ -3834,57 +3347,57 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
 
                        let new_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
 
-                       let mut handle_errors = Vec::new();
+                       let mut handle_errors: Vec<(Result<(), _>, _)> = Vec::new();
                        let mut timed_out_mpp_htlcs = Vec::new();
                        {
                                let mut channel_state_lock = self.channel_state.lock().unwrap();
                                let channel_state = &mut *channel_state_lock;
                                let pending_msg_events = &mut channel_state.pending_msg_events;
-                               channel_state.by_id.retain(|chan_id, chan| {
-                                       let counterparty_node_id = chan.get_counterparty_node_id();
-                                       let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(pending_msg_events, chan_id, chan, new_feerate);
-                                       if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
-                                       if err.is_err() {
-                                               handle_errors.push((err, counterparty_node_id));
-                                       }
-                                       if !retain_channel { return false; }
-
-                                       if let Err(e) = chan.timer_check_closing_negotiation_progress() {
-                                               let (needs_close, err) = convert_chan_err!(self, e, chan, chan_id);
-                                               handle_errors.push((Err(err), chan.get_counterparty_node_id()));
-                                               if needs_close { return false; }
-                                       }
+                               let per_peer_state = self.per_peer_state.read().unwrap();
+                               for (counterparty_node_id, peer_state_mutex) in per_peer_state.iter() {
+                                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                                       let peer_state = &mut *peer_state_lock;
+                                       peer_state.channel_by_id.retain(|chan_id, chan| {
+                                               let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate);
+                                               if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
+
+                                               if let Err(e) = chan.timer_check_closing_negotiation_progress() {
+                                                       let (needs_close, err) = convert_chan_err!(self, e, chan, chan_id);
+                                                       handle_errors.push((Err(err), *counterparty_node_id));
+                                                       if needs_close { return false; }
+                                               }
 
-                                       match chan.channel_update_status() {
-                                               ChannelUpdateStatus::Enabled if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged),
-                                               ChannelUpdateStatus::Disabled if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged),
-                                               ChannelUpdateStatus::DisabledStaged if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Enabled),
-                                               ChannelUpdateStatus::EnabledStaged if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Disabled),
-                                               ChannelUpdateStatus::DisabledStaged if !chan.is_live() => {
-                                                       if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
-                                                               pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
-                                                                       msg: update
-                                                               });
-                                                       }
-                                                       should_persist = NotifyOption::DoPersist;
-                                                       chan.set_channel_update_status(ChannelUpdateStatus::Disabled);
-                                               },
-                                               ChannelUpdateStatus::EnabledStaged if chan.is_live() => {
-                                                       if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
-                                                               pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
-                                                                       msg: update
-                                                               });
-                                                       }
-                                                       should_persist = NotifyOption::DoPersist;
-                                                       chan.set_channel_update_status(ChannelUpdateStatus::Enabled);
-                                               },
-                                               _ => {},
-                                       }
+                                               match chan.channel_update_status() {
+                                                       ChannelUpdateStatus::Enabled if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged),
+                                                       ChannelUpdateStatus::Disabled if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged),
+                                                       ChannelUpdateStatus::DisabledStaged if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Enabled),
+                                                       ChannelUpdateStatus::EnabledStaged if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Disabled),
+                                                       ChannelUpdateStatus::DisabledStaged if !chan.is_live() => {
+                                                               if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
+                                                                       pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                                                               msg: update
+                                                                       });
+                                                               }
+                                                               should_persist = NotifyOption::DoPersist;
+                                                               chan.set_channel_update_status(ChannelUpdateStatus::Disabled);
+                                                       },
+                                                       ChannelUpdateStatus::EnabledStaged if chan.is_live() => {
+                                                               if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
+                                                                       pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                                                               msg: update
+                                                                       });
+                                                               }
+                                                               should_persist = NotifyOption::DoPersist;
+                                                               chan.set_channel_update_status(ChannelUpdateStatus::Enabled);
+                                                       },
+                                                       _ => {},
+                                               }
 
-                                       chan.maybe_expire_prev_config();
+                                               chan.maybe_expire_prev_config();
 
-                                       true
-                               });
+                                               true
+                                       });
+                               }
                        }
 
                        self.claimable_payments.lock().unwrap().claimable_htlcs.retain(|payment_hash, (_, htlcs)| {
@@ -3920,7 +3433,14 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                                let _ = handle_error!(self, err, counterparty_node_id);
                        }
 
-                       self.remove_stale_resolved_payments();
+                       self.pending_outbound_payments.remove_stale_resolved_payments(&self.pending_events);
+
+                       // Technically we don't need to do this here, but if we have holding cell entries in a
+                       // channel that need freeing, it's better to do that here and block a background task
+                       // than block the message queueing pipeline.
+                       if self.check_free_holding_cells() {
+                               should_persist = NotifyOption::DoPersist;
+                       }
 
                        should_persist
                });
@@ -3960,7 +3480,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
        ///
        /// This is for failures on the channel on which the HTLC was *received*, not failures
        /// forwarding
-       fn get_htlc_inbound_temp_fail_err_and_data(&self, desired_err_code: u16, chan: &Channel<<K::Target as KeysInterface>::Signer>) -> (u16, Vec<u8>) {
+       fn get_htlc_inbound_temp_fail_err_and_data(&self, desired_err_code: u16, chan: &Channel<<K::Target as SignerProvider>::Signer>) -> (u16, Vec<u8>) {
                // We can't be sure what SCID was used when relaying inbound towards us, so we have to
                // guess somewhat. If its a public channel, we figure best to just use the real SCID (as
                // we're not leaking that we have a channel with the counterparty), otherwise we try to use
@@ -3980,7 +3500,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
 
        /// Gets an HTLC onion failure code and error data for an `UPDATE` error, given the error code
        /// that we want to return and a channel.
-       fn get_htlc_temp_fail_err_and_data(&self, desired_err_code: u16, scid: u64, chan: &Channel<<K::Target as KeysInterface>::Signer>) -> (u16, Vec<u8>) {
+       fn get_htlc_temp_fail_err_and_data(&self, desired_err_code: u16, scid: u64, chan: &Channel<<K::Target as SignerProvider>::Signer>) -> (u16, Vec<u8>) {
                debug_assert_eq!(desired_err_code & 0x1000, 0x1000);
                if let Ok(upd) = self.get_channel_update_for_onion(scid, chan) {
                        let mut enc = VecWriter(Vec::with_capacity(upd.serialized_length() + 6));
@@ -4009,13 +3529,19 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                &self, mut htlcs_to_fail: Vec<(HTLCSource, PaymentHash)>, channel_id: [u8; 32],
                counterparty_node_id: &PublicKey
        ) {
-               let (failure_code, onion_failure_data) =
-                       match self.channel_state.lock().unwrap().by_id.entry(channel_id) {
-                               hash_map::Entry::Occupied(chan_entry) => {
-                                       self.get_htlc_inbound_temp_fail_err_and_data(0x1000|7, &chan_entry.get())
-                               },
-                               hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new())
-                       };
+               let (failure_code, onion_failure_data) = {
+                       let per_peer_state = self.per_peer_state.read().unwrap();
+                       if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
+                               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                               let peer_state = &mut *peer_state_lock;
+                               match peer_state.channel_by_id.entry(channel_id) {
+                                       hash_map::Entry::Occupied(chan_entry) => {
+                                               self.get_htlc_inbound_temp_fail_err_and_data(0x1000|7, &chan_entry.get())
+                                       },
+                                       hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new())
+                               }
+                       } else { (0x4000|10, Vec::new()) }
+               };
 
                for (htlc_src, payment_hash) in htlcs_to_fail.drain(..) {
                        let reason = HTLCFailReason::reason(failure_code, onion_failure_data.clone());
@@ -4027,13 +3553,15 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
        /// Fails an HTLC backwards to the sender of it to us.
        /// Note that we do not assume that channels corresponding to failed HTLCs are still available.
        fn fail_htlc_backwards_internal(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) {
-               #[cfg(debug_assertions)]
+               #[cfg(all(debug_assertions, feature = "std"))]
                {
-                       // Ensure that the `channel_state` lock is not held when calling this function.
+                       // Ensure that the `channel_state` and no peer state channel storage lock is not held
+                       // when calling this function.
                        // This ensures that future code doesn't introduce a lock_order requirement for
-                       // `forward_htlcs` to be locked after the `channel_state` lock, which calling this
-                       // function with the `channel_state` locked would.
+                       // `forward_htlcs` to be locked after the `channel_state` and `per_peer_state` locks,
+                       // which calling this function with the locks aquired would.
                        assert!(self.channel_state.try_lock().is_ok());
+                       assert!(self.per_peer_state.try_write().is_ok());
                }
 
                //TODO: There is a timing attack here where if a node fails an HTLC back to us they can
@@ -4046,152 +3574,11 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                // being fully configured. See the docs for `ChannelManagerReadArgs` for more.
                match source {
                        HTLCSource::OutboundRoute { ref path, ref session_priv, ref payment_id, ref payment_params, .. } => {
-                               let mut session_priv_bytes = [0; 32];
-                               session_priv_bytes.copy_from_slice(&session_priv[..]);
-                               let mut outbounds = self.pending_outbound_payments.lock().unwrap();
-                               let mut all_paths_failed = false;
-                               let mut full_failure_ev = None;
-                               if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(*payment_id) {
-                                       if !payment.get_mut().remove(&session_priv_bytes, Some(&path)) {
-                                               log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0));
-                                               return;
-                                       }
-                                       if payment.get().is_fulfilled() {
-                                               log_trace!(self.logger, "Received failure of HTLC with payment_hash {} after payment completion", log_bytes!(payment_hash.0));
-                                               return;
-                                       }
-                                       if payment.get().remaining_parts() == 0 {
-                                               all_paths_failed = true;
-                                               if payment.get().abandoned() {
-                                                       full_failure_ev = Some(events::Event::PaymentFailed {
-                                                               payment_id: *payment_id,
-                                                               payment_hash: payment.get().payment_hash().expect("PendingOutboundPayments::RetriesExceeded always has a payment hash set"),
-                                                       });
-                                                       payment.remove();
-                                               }
-                                       }
-                               } else {
-                                       log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0));
-                                       return;
-                               }
-                               let mut retry = if let Some(payment_params_data) = payment_params {
-                                       let path_last_hop = path.last().expect("Outbound payments must have had a valid path");
-                                       Some(RouteParameters {
-                                               payment_params: payment_params_data.clone(),
-                                               final_value_msat: path_last_hop.fee_msat,
-                                               final_cltv_expiry_delta: path_last_hop.cltv_expiry_delta,
-                                       })
-                               } else { None };
-                               log_trace!(self.logger, "Failing outbound payment HTLC with payment_hash {}", log_bytes!(payment_hash.0));
-
-                               let path_failure = match &onion_error {
-                                       &HTLCFailReason::LightningError { ref err } => {
-#[cfg(test)]
-                                               let (network_update, short_channel_id, payment_retryable, onion_error_code, onion_error_data) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone());
-#[cfg(not(test))]
-                                               let (network_update, short_channel_id, payment_retryable, _, _) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone());
-
-                                               if self.payment_is_probe(payment_hash, &payment_id) {
-                                                       if !payment_retryable {
-                                                               events::Event::ProbeSuccessful {
-                                                                       payment_id: *payment_id,
-                                                                       payment_hash: payment_hash.clone(),
-                                                                       path: path.clone(),
-                                                               }
-                                                       } else {
-                                                               events::Event::ProbeFailed {
-                                                                       payment_id: *payment_id,
-                                                                       payment_hash: payment_hash.clone(),
-                                                                       path: path.clone(),
-                                                                       short_channel_id,
-                                                               }
-                                                       }
-                                               } else {
-                                                       // TODO: If we decided to blame ourselves (or one of our channels) in
-                                                       // process_onion_failure we should close that channel as it implies our
-                                                       // next-hop is needlessly blaming us!
-                                                       if let Some(scid) = short_channel_id {
-                                                               retry.as_mut().map(|r| r.payment_params.previously_failed_channels.push(scid));
-                                                       }
-                                                       events::Event::PaymentPathFailed {
-                                                               payment_id: Some(*payment_id),
-                                                               payment_hash: payment_hash.clone(),
-                                                               payment_failed_permanently: !payment_retryable,
-                                                               network_update,
-                                                               all_paths_failed,
-                                                               path: path.clone(),
-                                                               short_channel_id,
-                                                               retry,
-                                                               #[cfg(test)]
-                                                               error_code: onion_error_code,
-                                                               #[cfg(test)]
-                                                               error_data: onion_error_data
-                                                       }
-                                               }
-                                       },
-                                       &HTLCFailReason::Reason {
-#[cfg(test)]
-                                                       ref failure_code,
-#[cfg(test)]
-                                                       ref data,
-                                                       .. } => {
-                                               // we get a fail_malformed_htlc from the first hop
-                                               // TODO: We'd like to generate a NetworkUpdate for temporary
-                                               // failures here, but that would be insufficient as find_route
-                                               // generally ignores its view of our own channels as we provide them via
-                                               // ChannelDetails.
-                                               // TODO: For non-temporary failures, we really should be closing the
-                                               // channel here as we apparently can't relay through them anyway.
-                                               let scid = path.first().unwrap().short_channel_id;
-                                               retry.as_mut().map(|r| r.payment_params.previously_failed_channels.push(scid));
-
-                                               if self.payment_is_probe(payment_hash, &payment_id) {
-                                                       events::Event::ProbeFailed {
-                                                               payment_id: *payment_id,
-                                                               payment_hash: payment_hash.clone(),
-                                                               path: path.clone(),
-                                                               short_channel_id: Some(scid),
-                                                       }
-                                               } else {
-                                                       events::Event::PaymentPathFailed {
-                                                               payment_id: Some(*payment_id),
-                                                               payment_hash: payment_hash.clone(),
-                                                               payment_failed_permanently: false,
-                                                               network_update: None,
-                                                               all_paths_failed,
-                                                               path: path.clone(),
-                                                               short_channel_id: Some(scid),
-                                                               retry,
-#[cfg(test)]
-                                                               error_code: Some(*failure_code),
-#[cfg(test)]
-                                                               error_data: Some(data.clone()),
-                                                       }
-                                               }
-                                       }
-                               };
-                               let mut pending_events = self.pending_events.lock().unwrap();
-                               pending_events.push(path_failure);
-                               if let Some(ev) = full_failure_ev { pending_events.push(ev); }
+                               self.pending_outbound_payments.fail_htlc(source, payment_hash, onion_error, path, session_priv, payment_id, payment_params, self.probing_cookie_secret, &self.secp_ctx, &self.pending_events, &self.logger);
                        },
                        HTLCSource::PreviousHopData(HTLCPreviousHopData { ref short_channel_id, ref htlc_id, ref incoming_packet_shared_secret, ref phantom_shared_secret, ref outpoint }) => {
-                               let err_packet = match onion_error {
-                                       HTLCFailReason::Reason { ref failure_code, ref data } => {
-                                               log_trace!(self.logger, "Failing HTLC with payment_hash {} backwards from us with code {}", log_bytes!(payment_hash.0), failure_code);
-                                               if let Some(phantom_ss) = phantom_shared_secret {
-                                                       let phantom_packet = onion_utils::build_failure_packet(phantom_ss, *failure_code, &data[..]).encode();
-                                                       let encrypted_phantom_packet = onion_utils::encrypt_failure_packet(phantom_ss, &phantom_packet);
-                                                       onion_utils::encrypt_failure_packet(incoming_packet_shared_secret, &encrypted_phantom_packet.data[..])
-                                               } else {
-                                                       let packet = onion_utils::build_failure_packet(incoming_packet_shared_secret, *failure_code, &data[..]).encode();
-                                                       onion_utils::encrypt_failure_packet(incoming_packet_shared_secret, &packet)
-                                               }
-                                       },
-                                       HTLCFailReason::LightningError { err } => {
-                                               log_trace!(self.logger, "Failing HTLC with payment_hash {} backwards with pre-built LightningError", log_bytes!(payment_hash.0));
-                                               onion_utils::encrypt_failure_packet(incoming_packet_shared_secret, &err.data)
-                                       }
-                               };
+                               log_trace!(self.logger, "Failing HTLC with payment_hash {} backwards from us with {:?}", log_bytes!(payment_hash.0), onion_error);
+                               let err_packet = onion_error.get_encrypted_failure_packet(incoming_packet_shared_secret, phantom_shared_secret);
 
                                let mut forward_event = None;
                                let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
@@ -4238,7 +3625,6 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
        /// [`process_pending_events`]: EventsProvider::process_pending_events
        /// [`create_inbound_payment`]: Self::create_inbound_payment
        /// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash
-       /// [`get_and_clear_pending_msg_events`]: MessageSendEventsProvider::get_and_clear_pending_msg_events
        pub fn claim_funds(&self, payment_preimage: PaymentPreimage) {
                let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
 
@@ -4271,10 +3657,14 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                };
                debug_assert!(!sources.is_empty());
 
-               // If we are claiming an MPP payment, we have to take special care to ensure that each
-               // channel exists before claiming all of the payments (inside one lock).
-               // Note that channel existance is sufficient as we should always get a monitor update
-               // which will take care of the real HTLC claim enforcement.
+               // If we are claiming an MPP payment, we check that all channels which contain a claimable
+               // HTLC still exist. While this isn't guaranteed to remain true if a channel closes while
+               // we're claiming (or even after we claim, before the commitment update dance completes),
+               // it should be a relatively rare race, and we'd rather not claim HTLCs that require us to
+               // go on-chain (and lose the on-chain fee to do so) than just reject the payment.
+               //
+               // Note that we'll still always get our funds - as long as the generated
+               // `ChannelMonitorUpdate` makes it out to the relevant monitor we can claim on-chain.
                //
                // If we find an HTLC which we would need to claim but for which we do not have a
                // channel, we will fail all parts of the MPP payment. While we could wait and see if
@@ -4286,19 +3676,27 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                let mut expected_amt_msat = None;
                let mut valid_mpp = true;
                let mut errs = Vec::new();
-               let mut claimed_any_htlcs = false;
-               let mut channel_state_lock = self.channel_state.lock().unwrap();
-               let channel_state = &mut *channel_state_lock;
+               let mut channel_state = Some(self.channel_state.lock().unwrap());
+               let mut per_peer_state = Some(self.per_peer_state.read().unwrap());
                for htlc in sources.iter() {
-                       let chan_id = match self.short_to_chan_info.read().unwrap().get(&htlc.prev_hop.short_channel_id) {
-                               Some((_cp_id, chan_id)) => chan_id.clone(),
+                       let (counterparty_node_id, chan_id) = match self.short_to_chan_info.read().unwrap().get(&htlc.prev_hop.short_channel_id) {
+                               Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()),
                                None => {
                                        valid_mpp = false;
                                        break;
                                }
                        };
 
-                       if let None = channel_state.by_id.get(&chan_id) {
+                       if let None = per_peer_state.as_ref().unwrap().get(&counterparty_node_id) {
+                               valid_mpp = false;
+                               break;
+                       }
+
+                       let peer_state_mutex = per_peer_state.as_ref().unwrap().get(&counterparty_node_id).unwrap();
+                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                       let peer_state = &mut *peer_state_lock;
+
+                       if let None = peer_state.channel_by_id.get(&chan_id) {
                                valid_mpp = false;
                                break;
                        }
@@ -4309,6 +3707,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                                valid_mpp = false;
                                break;
                        }
+
                        expected_amt_msat = Some(htlc.total_msat);
                        if let OnionPayload::Spontaneous(_) = &htlc.onion_payload {
                                // We don't currently support MPP for spontaneous payments, so just check
@@ -4325,12 +3724,14 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                }
                if sources.is_empty() || expected_amt_msat.is_none() {
                        mem::drop(channel_state);
+                       mem::drop(per_peer_state);
                        self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
                        log_info!(self.logger, "Attempted to claim an incomplete payment which no longer had any available HTLCs!");
                        return;
                }
                if claimable_amt_msat != expected_amt_msat.unwrap() {
                        mem::drop(channel_state);
+                       mem::drop(per_peer_state);
                        self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
                        log_info!(self.logger, "Attempted to claim an incomplete payment, expected {} msat, had {} available to claim.",
                                expected_amt_msat.unwrap(), claimable_amt_msat);
@@ -4338,27 +3739,22 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                }
                if valid_mpp {
                        for htlc in sources.drain(..) {
-                               match self.claim_funds_from_hop(&mut channel_state_lock, htlc.prev_hop, payment_preimage) {
-                                       ClaimFundsFromHop::MonitorUpdateFail(pk, err, _) => {
-                                               if let msgs::ErrorAction::IgnoreError = err.err.action {
-                                                       // We got a temporary failure updating monitor, but will claim the
-                                                       // HTLC when the monitor updating is restored (or on chain).
-                                                       log_error!(self.logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err);
-                                                       claimed_any_htlcs = true;
-                                               } else { errs.push((pk, err)); }
-                                       },
-                                       ClaimFundsFromHop::PrevHopForceClosed => unreachable!("We already checked for channel existence, we can't fail here!"),
-                                       ClaimFundsFromHop::DuplicateClaim => {
-                                               // While we should never get here in most cases, if we do, it likely
-                                               // indicates that the HTLC was timed out some time ago and is no longer
-                                               // available to be claimed. Thus, it does not make sense to set
-                                               // `claimed_any_htlcs`.
-                                       },
-                                       ClaimFundsFromHop::Success(_) => claimed_any_htlcs = true,
+                               if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); }
+                               if per_peer_state.is_none() { per_peer_state = Some(self.per_peer_state.read().unwrap()); }
+                               if let Err((pk, err)) = self.claim_funds_from_hop(channel_state.take().unwrap(), per_peer_state.take().unwrap(),
+                                       htlc.prev_hop, payment_preimage,
+                                       |_| Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash }))
+                               {
+                                       if let msgs::ErrorAction::IgnoreError = err.err.action {
+                                               // We got a temporary failure updating monitor, but will claim the
+                                               // HTLC when the monitor updating is restored (or on chain).
+                                               log_error!(self.logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err);
+                                       } else { errs.push((pk, err)); }
                                }
                        }
                }
-               mem::drop(channel_state_lock);
+               mem::drop(channel_state);
+               mem::drop(per_peer_state);
                if !valid_mpp {
                        for htlc in sources.drain(..) {
                                let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec();
@@ -4368,14 +3764,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                                let receiver = HTLCDestination::FailedPayment { payment_hash };
                                self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
                        }
-               }
-
-               let ClaimingPayment { amount_msat, payment_purpose: purpose, receiver_node_id } =
-                       self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash).unwrap();
-               if claimed_any_htlcs {
-                       self.pending_events.lock().unwrap().push(events::Event::PaymentClaimed {
-                               payment_hash, purpose, amount_msat, receiver_node_id: Some(receiver_node_id),
-                       });
+                       self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
                }
 
                // Now we can handle any errors which were generated.
@@ -4385,194 +3774,166 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                }
        }
 
-       fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard<ChannelHolder<<K::Target as KeysInterface>::Signer>>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> ClaimFundsFromHop {
+       fn claim_funds_from_hop<ComplFunc: FnOnce(Option<u64>) -> Option<MonitorUpdateCompletionAction>>(&self,
+               mut channel_state_lock: MutexGuard<ChannelHolder>,
+               per_peer_state_lock: RwLockReadGuard<HashMap<PublicKey, Mutex<PeerState<<K::Target as SignerProvider>::Signer>>>>,
+               prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage, completion_action: ComplFunc)
+       -> Result<(), (PublicKey, MsgHandleErrInternal)> {
                //TODO: Delay the claimed_funds relaying just like we do outbound relay!
 
                let chan_id = prev_hop.outpoint.to_channel_id();
-               let channel_state = &mut **channel_state_lock;
-               if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(chan_id) {
-                       match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) {
-                               Ok(msgs_monitor_option) => {
-                                       if let UpdateFulfillCommitFetch::NewClaim { msgs, htlc_value_msat, monitor_update } = msgs_monitor_option {
+               let channel_state = &mut *channel_state_lock;
+
+               let counterparty_node_id_opt = match self.short_to_chan_info.read().unwrap().get(&prev_hop.short_channel_id) {
+                       Some((cp_id, _dup_chan_id)) => Some(cp_id.clone()),
+                       None => None
+               };
+
+               let (found_channel, mut peer_state_opt) = if counterparty_node_id_opt.is_some() && per_peer_state_lock.get(&counterparty_node_id_opt.unwrap()).is_some() {
+                       let peer_mutex = per_peer_state_lock.get(&counterparty_node_id_opt.unwrap()).unwrap();
+                       let peer_state = peer_mutex.lock().unwrap();
+                       let found_channel = peer_state.channel_by_id.contains_key(&chan_id);
+                       (found_channel, Some(peer_state))
+               }  else { (false, None) };
+
+               if found_channel {
+                       if let hash_map::Entry::Occupied(mut chan) = peer_state_opt.as_mut().unwrap().channel_by_id.entry(chan_id) {
+                               let counterparty_node_id = chan.get().get_counterparty_node_id();
+                               match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) {
+                                       Ok(msgs_monitor_option) => {
+                                               if let UpdateFulfillCommitFetch::NewClaim { msgs, htlc_value_msat, monitor_update } = msgs_monitor_option {
+                                                       match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
+                                                               ChannelMonitorUpdateStatus::Completed => {},
+                                                               e => {
+                                                                       log_given_level!(self.logger, if e == ChannelMonitorUpdateStatus::PermanentFailure { Level::Error } else { Level::Debug },
+                                                                               "Failed to update channel monitor with preimage {:?}: {:?}",
+                                                                               payment_preimage, e);
+                                                                       let err = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err();
+                                                                       mem::drop(channel_state_lock);
+                                                                       mem::drop(peer_state_opt);
+                                                                       mem::drop(per_peer_state_lock);
+                                                                       self.handle_monitor_update_completion_actions(completion_action(Some(htlc_value_msat)));
+                                                                       return Err((counterparty_node_id, err));
+                                                               }
+                                                       }
+                                                       if let Some((msg, commitment_signed)) = msgs {
+                                                               log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}",
+                                                                       log_bytes!(payment_preimage.0), log_bytes!(chan.get().channel_id()));
+                                                               channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+                                                                       node_id: chan.get().get_counterparty_node_id(),
+                                                                       updates: msgs::CommitmentUpdate {
+                                                                               update_add_htlcs: Vec::new(),
+                                                                               update_fulfill_htlcs: vec![msg],
+                                                                               update_fail_htlcs: Vec::new(),
+                                                                               update_fail_malformed_htlcs: Vec::new(),
+                                                                               update_fee: None,
+                                                                               commitment_signed,
+                                                                       }
+                                                               });
+                                                       }
+                                                       mem::drop(channel_state_lock);
+                                                       mem::drop(peer_state_opt);
+                                                       mem::drop(per_peer_state_lock);
+                                                       self.handle_monitor_update_completion_actions(completion_action(Some(htlc_value_msat)));
+                                                       Ok(())
+                                               } else {
+                                                       Ok(())
+                                               }
+                                       },
+                                       Err((e, monitor_update)) => {
                                                match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
                                                        ChannelMonitorUpdateStatus::Completed => {},
                                                        e => {
-                                                               log_given_level!(self.logger, if e == ChannelMonitorUpdateStatus::PermanentFailure { Level::Error } else { Level::Debug },
-                                                                       "Failed to update channel monitor with preimage {:?}: {:?}",
+                                                               // TODO: This needs to be handled somehow - if we receive a monitor update
+                                                               // with a preimage we *must* somehow manage to propagate it to the upstream
+                                                               // channel, or we must have an ability to receive the same update and try
+                                                               // again on restart.
+                                                               log_given_level!(self.logger, if e == ChannelMonitorUpdateStatus::PermanentFailure { Level::Error } else { Level::Info },
+                                                                       "Failed to update channel monitor with preimage {:?} immediately prior to force-close: {:?}",
                                                                        payment_preimage, e);
-                                                               return ClaimFundsFromHop::MonitorUpdateFail(
-                                                                       chan.get().get_counterparty_node_id(),
-                                                                       handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(),
-                                                                       Some(htlc_value_msat)
-                                                               );
-                                                       }
+                                                       },
                                                }
-                                               if let Some((msg, commitment_signed)) = msgs {
-                                                       log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}",
-                                                               log_bytes!(payment_preimage.0), log_bytes!(chan.get().channel_id()));
-                                                       channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
-                                                               node_id: chan.get().get_counterparty_node_id(),
-                                                               updates: msgs::CommitmentUpdate {
-                                                                       update_add_htlcs: Vec::new(),
-                                                                       update_fulfill_htlcs: vec![msg],
-                                                                       update_fail_htlcs: Vec::new(),
-                                                                       update_fail_malformed_htlcs: Vec::new(),
-                                                                       update_fee: None,
-                                                                       commitment_signed,
-                                                               }
-                                                       });
+                                               let (drop, res) = convert_chan_err!(self, e, chan.get_mut(), &chan_id);
+                                               if drop {
+                                                       chan.remove_entry();
                                                }
-                                               return ClaimFundsFromHop::Success(htlc_value_msat);
-                                       } else {
-                                               return ClaimFundsFromHop::DuplicateClaim;
-                                       }
-                               },
-                               Err((e, monitor_update)) => {
-                                       match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
-                                               ChannelMonitorUpdateStatus::Completed => {},
-                                               e => {
-                                                       log_given_level!(self.logger, if e == ChannelMonitorUpdateStatus::PermanentFailure { Level::Error } else { Level::Info },
-                                                               "Failed to update channel monitor with preimage {:?} immediately prior to force-close: {:?}",
-                                                               payment_preimage, e);
-                                               },
-                                       }
-                                       let counterparty_node_id = chan.get().get_counterparty_node_id();
-                                       let (drop, res) = convert_chan_err!(self, e, chan.get_mut(), &chan_id);
-                                       if drop {
-                                               chan.remove_entry();
-                                       }
-                                       return ClaimFundsFromHop::MonitorUpdateFail(counterparty_node_id, res, None);
-                               },
+                                               mem::drop(channel_state_lock);
+                                               mem::drop(peer_state_opt);
+                                               mem::drop(per_peer_state_lock);
+                                               self.handle_monitor_update_completion_actions(completion_action(None));
+                                               Err((counterparty_node_id, res))
+                                       },
+                               }
+                       } else {
+                               // We've held the peer_state mutex since finding the channel and setting
+                               // found_channel to true, so the channel can't have been dropped.
+                               unreachable!()
                        }
-               } else { return ClaimFundsFromHop::PrevHopForceClosed }
-       }
-
-       fn finalize_claims(&self, mut sources: Vec<HTLCSource>) {
-               let mut outbounds = self.pending_outbound_payments.lock().unwrap();
-               let mut pending_events = self.pending_events.lock().unwrap();
-               for source in sources.drain(..) {
-                       if let HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } = source {
-                               let mut session_priv_bytes = [0; 32];
-                               session_priv_bytes.copy_from_slice(&session_priv[..]);
-                               if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(payment_id) {
-                                       assert!(payment.get().is_fulfilled());
-                                       if payment.get_mut().remove(&session_priv_bytes, None) {
-                                               pending_events.push(
-                                                       events::Event::PaymentPathSuccessful {
-                                                               payment_id,
-                                                               payment_hash: payment.get().payment_hash(),
-                                                               path,
-                                                       }
-                                               );
-                                       }
-                               }
+               } else {
+                       let preimage_update = ChannelMonitorUpdate {
+                               update_id: CLOSED_CHANNEL_UPDATE_ID,
+                               updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
+                                       payment_preimage,
+                               }],
+                       };
+                       // We update the ChannelMonitor on the backward link, after
+                       // receiving an `update_fulfill_htlc` from the forward link.
+                       let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, preimage_update);
+                       if update_res != ChannelMonitorUpdateStatus::Completed {
+                               // TODO: This needs to be handled somehow - if we receive a monitor update
+                               // with a preimage we *must* somehow manage to propagate it to the upstream
+                               // channel, or we must have an ability to receive the same event and try
+                               // again on restart.
+                               log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
+                                       payment_preimage, update_res);
                        }
+                       mem::drop(channel_state_lock);
+                       mem::drop(peer_state_opt);
+                       mem::drop(per_peer_state_lock);
+                       // Note that we do process the completion action here. This totally could be a
+                       // duplicate claim, but we have no way of knowing without interrogating the
+                       // `ChannelMonitor` we've provided the above update to. Instead, note that `Event`s are
+                       // generally always allowed to be duplicative (and it's specifically noted in
+                       // `PaymentForwarded`).
+                       self.handle_monitor_update_completion_actions(completion_action(None));
+                       Ok(())
                }
        }
 
-       fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<<K::Target as KeysInterface>::Signer>>, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option<u64>, from_onchain: bool, next_channel_id: [u8; 32]) {
+       fn finalize_claims(&self, sources: Vec<HTLCSource>) {
+               self.pending_outbound_payments.finalize_claims(sources, &self.pending_events);
+       }
+
+       fn claim_funds_internal(&self, channel_state_lock: MutexGuard<ChannelHolder>, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option<u64>, from_onchain: bool, next_channel_id: [u8; 32]) {
                match source {
                        HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
                                mem::drop(channel_state_lock);
-                               let mut session_priv_bytes = [0; 32];
-                               session_priv_bytes.copy_from_slice(&session_priv[..]);
-                               let mut outbounds = self.pending_outbound_payments.lock().unwrap();
-                               if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(payment_id) {
-                                       let mut pending_events = self.pending_events.lock().unwrap();
-                                       if !payment.get().is_fulfilled() {
-                                               let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
-                                               let fee_paid_msat = payment.get().get_pending_fee_msat();
-                                               pending_events.push(
-                                                       events::Event::PaymentSent {
-                                                               payment_id: Some(payment_id),
-                                                               payment_preimage,
-                                                               payment_hash,
-                                                               fee_paid_msat,
-                                                       }
-                                               );
-                                               payment.get_mut().mark_fulfilled();
-                                       }
-
-                                       if from_onchain {
-                                               // We currently immediately remove HTLCs which were fulfilled on-chain.
-                                               // This could potentially lead to removing a pending payment too early,
-                                               // with a reorg of one block causing us to re-add the fulfilled payment on
-                                               // restart.
-                                               // TODO: We should have a second monitor event that informs us of payments
-                                               // irrevocably fulfilled.
-                                               if payment.get_mut().remove(&session_priv_bytes, Some(&path)) {
-                                                       let payment_hash = Some(PaymentHash(Sha256::hash(&payment_preimage.0).into_inner()));
-                                                       pending_events.push(
-                                                               events::Event::PaymentPathSuccessful {
-                                                                       payment_id,
-                                                                       payment_hash,
-                                                                       path,
-                                                               }
-                                                       );
-                                               }
-                                       }
-                               } else {
-                                       log_trace!(self.logger, "Received duplicative fulfill for HTLC with payment_preimage {}", log_bytes!(payment_preimage.0));
-                               }
+                               self.pending_outbound_payments.claim_htlc(payment_id, payment_preimage, session_priv, path, from_onchain, &self.pending_events, &self.logger);
                        },
                        HTLCSource::PreviousHopData(hop_data) => {
                                let prev_outpoint = hop_data.outpoint;
-                               let res = self.claim_funds_from_hop(&mut channel_state_lock, hop_data, payment_preimage);
-                               let claimed_htlc = if let ClaimFundsFromHop::DuplicateClaim = res { false } else { true };
-                               let htlc_claim_value_msat = match res {
-                                       ClaimFundsFromHop::MonitorUpdateFail(_, _, amt_opt) => amt_opt,
-                                       ClaimFundsFromHop::Success(amt) => Some(amt),
-                                       _ => None,
-                               };
-                               if let ClaimFundsFromHop::PrevHopForceClosed = res {
-                                       let preimage_update = ChannelMonitorUpdate {
-                                               update_id: CLOSED_CHANNEL_UPDATE_ID,
-                                               updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
-                                                       payment_preimage: payment_preimage.clone(),
-                                               }],
-                                       };
-                                       // We update the ChannelMonitor on the backward link, after
-                                       // receiving an offchain preimage event from the forward link (the
-                                       // event being update_fulfill_htlc).
-                                       let update_res = self.chain_monitor.update_channel(prev_outpoint, preimage_update);
-                                       if update_res != ChannelMonitorUpdateStatus::Completed {
-                                               // TODO: This needs to be handled somehow - if we receive a monitor update
-                                               // with a preimage we *must* somehow manage to propagate it to the upstream
-                                               // channel, or we must have an ability to receive the same event and try
-                                               // again on restart.
-                                               log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
-                                                       payment_preimage, update_res);
-                                       }
-                                       // Note that we do *not* set `claimed_htlc` to false here. In fact, this
-                                       // totally could be a duplicate claim, but we have no way of knowing
-                                       // without interrogating the `ChannelMonitor` we've provided the above
-                                       // update to. Instead, we simply document in `PaymentForwarded` that this
-                                       // can happen.
-                               }
-                               mem::drop(channel_state_lock);
-                               if let ClaimFundsFromHop::MonitorUpdateFail(pk, err, _) = res {
+                               let res = self.claim_funds_from_hop(channel_state_lock, self.per_peer_state.read().unwrap(), hop_data, payment_preimage,
+                                       |htlc_claim_value_msat| {
+                                               if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
+                                                       let fee_earned_msat = if let Some(claimed_htlc_value) = htlc_claim_value_msat {
+                                                               Some(claimed_htlc_value - forwarded_htlc_value)
+                                                       } else { None };
+
+                                                       let prev_channel_id = Some(prev_outpoint.to_channel_id());
+                                                       let next_channel_id = Some(next_channel_id);
+
+                                                       Some(MonitorUpdateCompletionAction::EmitEvent { event: events::Event::PaymentForwarded {
+                                                               fee_earned_msat,
+                                                               claim_from_onchain_tx: from_onchain,
+                                                               prev_channel_id,
+                                                               next_channel_id,
+                                                       }})
+                                               } else { None }
+                                       });
+                               if let Err((pk, err)) = res {
                                        let result: Result<(), _> = Err(err);
                                        let _ = handle_error!(self, result, pk);
                                }
-
-                               if claimed_htlc {
-                                       if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
-                                               let fee_earned_msat = if let Some(claimed_htlc_value) = htlc_claim_value_msat {
-                                                       Some(claimed_htlc_value - forwarded_htlc_value)
-                                               } else { None };
-
-                                               let mut pending_events = self.pending_events.lock().unwrap();
-                                               let prev_channel_id = Some(prev_outpoint.to_channel_id());
-                                               let next_channel_id = Some(next_channel_id);
-
-                                               pending_events.push(events::Event::PaymentForwarded {
-                                                       fee_earned_msat,
-                                                       claim_from_onchain_tx: from_onchain,
-                                                       prev_channel_id,
-                                                       next_channel_id,
-                                               });
-                                       }
-                               }
                        },
                }
        }
@@ -4582,10 +3943,28 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                self.our_network_pubkey.clone()
        }
 
+       fn handle_monitor_update_completion_actions<I: IntoIterator<Item=MonitorUpdateCompletionAction>>(&self, actions: I) {
+               for action in actions.into_iter() {
+                       match action {
+                               MonitorUpdateCompletionAction::PaymentClaimed { payment_hash } => {
+                                       let payment = self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
+                                       if let Some(ClaimingPayment { amount_msat, payment_purpose: purpose, receiver_node_id }) = payment {
+                                               self.pending_events.lock().unwrap().push(events::Event::PaymentClaimed {
+                                                       payment_hash, purpose, amount_msat, receiver_node_id: Some(receiver_node_id),
+                                               });
+                                       }
+                               },
+                               MonitorUpdateCompletionAction::EmitEvent { event } => {
+                                       self.pending_events.lock().unwrap().push(event);
+                               },
+                       }
+               }
+       }
+
        /// Handles a channel reentering a functional state, either due to reconnect or a monitor
        /// update completion.
        fn handle_channel_resumption(&self, pending_msg_events: &mut Vec<MessageSendEvent>,
-               channel: &mut Channel<<K::Target as KeysInterface>::Signer>, raa: Option<msgs::RevokeAndACK>,
+               channel: &mut Channel<<K::Target as SignerProvider>::Signer>, raa: Option<msgs::RevokeAndACK>,
                commitment_update: Option<msgs::CommitmentUpdate>, order: RAACommitmentOrder,
                pending_forwards: Vec<(PendingHTLCInfo, u64)>, funding_broadcastable: Option<Transaction>,
                channel_ready: Option<msgs::ChannelReady>, announcement_sigs: Option<msgs::AnnouncementSignatures>)
@@ -4645,22 +4024,41 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                htlc_forwards
        }
 
-       fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64) {
+       fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64, counterparty_node_id: Option<&PublicKey>) {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 
                let htlc_forwards;
                let (mut pending_failures, finalized_claims, counterparty_node_id) = {
                        let mut channel_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_lock;
-                       let mut channel = match channel_state.by_id.entry(funding_txo.to_channel_id()) {
-                               hash_map::Entry::Occupied(chan) => chan,
-                               hash_map::Entry::Vacant(_) => return,
+                       let counterparty_node_id = match counterparty_node_id {
+                               Some(cp_id) => cp_id.clone(),
+                               None => {
+                                       // TODO: Once we can rely on the counterparty_node_id from the
+                                       // monitor event, this and the id_to_peer map should be removed.
+                                       let id_to_peer = self.id_to_peer.lock().unwrap();
+                                       match id_to_peer.get(&funding_txo.to_channel_id()) {
+                                               Some(cp_id) => cp_id.clone(),
+                                               None => return,
+                                       }
+                               }
+                       };
+                       let per_peer_state = self.per_peer_state.read().unwrap();
+                       let mut peer_state_lock;
+                       let mut channel = {
+                               if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
+                                       peer_state_lock = peer_state_mutex.lock().unwrap();
+                                       let peer_state = &mut *peer_state_lock;
+                                       match peer_state.channel_by_id.entry(funding_txo.to_channel_id()){
+                                               hash_map::Entry::Occupied(chan) => chan,
+                                               hash_map::Entry::Vacant(_) => return,
+                                       }
+                               } else { return }
                        };
                        if !channel.get().is_awaiting_monitor_update() || channel.get().get_latest_monitor_update_id() != highest_applied_update_id {
                                return;
                        }
 
-                       let counterparty_node_id = channel.get().get_counterparty_node_id();
                        let updates = channel.get_mut().monitor_updating_restored(&self.logger, self.get_our_node_id(), self.genesis_hash, self.best_block.read().unwrap().height());
                        let channel_update = if updates.channel_ready.is_some() && channel.get().is_usable() {
                                // We only send a channel_update in the case where we are just now sending a
@@ -4739,36 +4137,43 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
 
                let mut channel_state_lock = self.channel_state.lock().unwrap();
                let channel_state = &mut *channel_state_lock;
-               match channel_state.by_id.entry(temporary_channel_id.clone()) {
-                       hash_map::Entry::Occupied(mut channel) => {
-                               if !channel.get().inbound_is_awaiting_accept() {
-                                       return Err(APIError::APIMisuseError { err: "The channel isn't currently awaiting to be accepted.".to_owned() });
-                               }
-                               if *counterparty_node_id != channel.get().get_counterparty_node_id() {
-                                       return Err(APIError::APIMisuseError { err: "The passed counterparty_node_id doesn't match the channel's counterparty node_id".to_owned() });
-                               }
-                               if accept_0conf {
-                                       channel.get_mut().set_0conf();
-                               } else if channel.get().get_channel_type().requires_zero_conf() {
-                                       let send_msg_err_event = events::MessageSendEvent::HandleError {
+               let per_peer_state = self.per_peer_state.read().unwrap();
+               if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
+                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                       let peer_state = &mut *peer_state_lock;
+                       match peer_state.channel_by_id.entry(temporary_channel_id.clone()) {
+                               hash_map::Entry::Occupied(mut channel) => {
+                                       if !channel.get().inbound_is_awaiting_accept() {
+                                               return Err(APIError::APIMisuseError { err: "The channel isn't currently awaiting to be accepted.".to_owned() });
+                                       }
+                                       if *counterparty_node_id != channel.get().get_counterparty_node_id() {
+                                               return Err(APIError::APIMisuseError { err: "The passed counterparty_node_id doesn't match the channel's counterparty node_id".to_owned() });
+                                       }
+                                       if accept_0conf {
+                                               channel.get_mut().set_0conf();
+                                       } else if channel.get().get_channel_type().requires_zero_conf() {
+                                               let send_msg_err_event = events::MessageSendEvent::HandleError {
+                                                       node_id: channel.get().get_counterparty_node_id(),
+                                                       action: msgs::ErrorAction::SendErrorMessage{
+                                                               msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "No zero confirmation channels accepted".to_owned(), }
+                                                       }
+                                               };
+                                               channel_state.pending_msg_events.push(send_msg_err_event);
+                                               let _ = remove_channel!(self, channel);
+                                               return Err(APIError::APIMisuseError { err: "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned() });
+                                       }
+
+                                       channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
                                                node_id: channel.get().get_counterparty_node_id(),
-                                               action: msgs::ErrorAction::SendErrorMessage{
-                                                       msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "No zero confirmation channels accepted".to_owned(), }
-                                               }
-                                       };
-                                       channel_state.pending_msg_events.push(send_msg_err_event);
-                                       let _ = remove_channel!(self, channel);
-                                       return Err(APIError::APIMisuseError { err: "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned() });
+                                               msg: channel.get_mut().accept_inbound_channel(user_channel_id),
+                                       });
+                               }
+                               hash_map::Entry::Vacant(_) => {
+                                       return Err(APIError::ChannelUnavailable { err: "Can't accept a channel that doesn't exist".to_owned() });
                                }
-
-                               channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
-                                       node_id: channel.get().get_counterparty_node_id(),
-                                       msg: channel.get_mut().accept_inbound_channel(user_channel_id),
-                               });
-                       }
-                       hash_map::Entry::Vacant(_) => {
-                               return Err(APIError::ChannelUnavailable { err: "Can't accept a channel that doesn't exist".to_owned() });
                        }
+               } else {
+                       return Err(APIError::APIMisuseError { err: format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id) });
                }
                Ok(())
        }
@@ -4799,52 +4204,64 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                };
                let mut channel_state_lock = self.channel_state.lock().unwrap();
                let channel_state = &mut *channel_state_lock;
-               match channel_state.by_id.entry(channel.channel_id()) {
-                       hash_map::Entry::Occupied(_) => {
-                               self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias);
-                               return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision!".to_owned(), msg.temporary_channel_id.clone()))
-                       },
-                       hash_map::Entry::Vacant(entry) => {
-                               if !self.default_configuration.manually_accept_inbound_channels {
-                                       if channel.get_channel_type().requires_zero_conf() {
-                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), msg.temporary_channel_id.clone()));
-                                       }
-                                       channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
-                                               node_id: counterparty_node_id.clone(),
-                                               msg: channel.accept_inbound_channel(user_channel_id),
-                                       });
-                               } else {
-                                       let mut pending_events = self.pending_events.lock().unwrap();
-                                       pending_events.push(
-                                               events::Event::OpenChannelRequest {
-                                                       temporary_channel_id: msg.temporary_channel_id.clone(),
-                                                       counterparty_node_id: counterparty_node_id.clone(),
-                                                       funding_satoshis: msg.funding_satoshis,
-                                                       push_msat: msg.push_msat,
-                                                       channel_type: channel.get_channel_type().clone(),
+               let per_peer_state = self.per_peer_state.read().unwrap();
+               if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
+                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                       let peer_state = &mut *peer_state_lock;
+                       match peer_state.channel_by_id.entry(channel.channel_id()) {
+                               hash_map::Entry::Occupied(_) => {
+                                       self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias);
+                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision for the same peer!".to_owned(), msg.temporary_channel_id.clone()))
+                               },
+                               hash_map::Entry::Vacant(entry) => {
+                                       if !self.default_configuration.manually_accept_inbound_channels {
+                                               if channel.get_channel_type().requires_zero_conf() {
+                                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), msg.temporary_channel_id.clone()));
                                                }
-                                       );
-                               }
+                                               channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
+                                                       node_id: counterparty_node_id.clone(),
+                                                       msg: channel.accept_inbound_channel(user_channel_id),
+                                               });
+                                       } else {
+                                               let mut pending_events = self.pending_events.lock().unwrap();
+                                               pending_events.push(
+                                                       events::Event::OpenChannelRequest {
+                                                               temporary_channel_id: msg.temporary_channel_id.clone(),
+                                                               counterparty_node_id: counterparty_node_id.clone(),
+                                                               funding_satoshis: msg.funding_satoshis,
+                                                               push_msat: msg.push_msat,
+                                                               channel_type: channel.get_channel_type().clone(),
+                                                       }
+                                               );
+                                       }
 
-                               entry.insert(channel);
+                                       entry.insert(channel);
+                               }
                        }
+               } else {
+                       return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id.clone()))
                }
                Ok(())
        }
 
        fn internal_accept_channel(&self, counterparty_node_id: &PublicKey, their_features: InitFeatures, msg: &msgs::AcceptChannel) -> Result<(), MsgHandleErrInternal> {
                let (value, output_script, user_id) = {
-                       let mut channel_lock = self.channel_state.lock().unwrap();
-                       let channel_state = &mut *channel_lock;
-                       match channel_state.by_id.entry(msg.temporary_channel_id) {
-                               hash_map::Entry::Occupied(mut chan) => {
-                                       if chan.get().get_counterparty_node_id() != *counterparty_node_id {
-                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.temporary_channel_id));
-                                       }
-                                       try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &their_features), chan);
-                                       (chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id())
-                               },
-                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id))
+                       let per_peer_state = self.per_peer_state.read().unwrap();
+                       if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
+                               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                               let peer_state = &mut *peer_state_lock;
+                               match peer_state.channel_by_id.entry(msg.temporary_channel_id) {
+                                       hash_map::Entry::Occupied(mut chan) => {
+                                               if chan.get().get_counterparty_node_id() != *counterparty_node_id {
+                                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.temporary_channel_id));
+                                               }
+                                               try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &their_features), chan);
+                                               (chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id())
+                                       },
+                                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id))
+                               }
+                       } else {
+                               return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
                        }
                };
                let mut pending_events = self.pending_events.lock().unwrap();
@@ -4859,21 +4276,28 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
        }
 
        fn internal_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> {
+               let mut channel_state_lock = self.channel_state.lock().unwrap();
+               let channel_state = &mut *channel_state_lock;
+               let per_peer_state = self.per_peer_state.read().unwrap();
                let ((funding_msg, monitor, mut channel_ready), mut chan) = {
                        let best_block = *self.best_block.read().unwrap();
-                       let mut channel_lock = self.channel_state.lock().unwrap();
-                       let channel_state = &mut *channel_lock;
-                       match channel_state.by_id.entry(msg.temporary_channel_id.clone()) {
-                               hash_map::Entry::Occupied(mut chan) => {
-                                       if chan.get().get_counterparty_node_id() != *counterparty_node_id {
-                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.temporary_channel_id));
-                                       }
-                                       (try_chan_entry!(self, chan.get_mut().funding_created(msg, best_block, &self.keys_manager, &self.logger), chan), chan.remove())
-                               },
-                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id))
+                       if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
+                               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                               let peer_state = &mut *peer_state_lock;
+                               match peer_state.channel_by_id.entry(msg.temporary_channel_id) {
+                                       hash_map::Entry::Occupied(mut chan) => {
+                                               if chan.get().get_counterparty_node_id() != *counterparty_node_id {
+                                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.temporary_channel_id));
+                                               }
+                                               (try_chan_entry!(self, chan.get_mut().funding_created(msg, best_block, &self.keys_manager, &self.logger), chan), chan.remove())
+                                       },
+                                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id))
+                               }
+                       } else {
+                               return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
                        }
                };
-               // Because we have exclusive ownership of the channel here we can release the channel_state
+               // Because we have exclusive ownership of the channel here we can release the peer_state
                // lock before watch_channel
                match self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor) {
                        ChannelMonitorUpdateStatus::Completed => {},
@@ -4898,9 +4322,13 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                                channel_ready = None; // Don't send the channel_ready now
                        },
                }
-               let mut channel_state_lock = self.channel_state.lock().unwrap();
-               let channel_state = &mut *channel_state_lock;
-               match channel_state.by_id.entry(funding_msg.channel_id) {
+               // It's safe to unwrap as we've held the `per_peer_state` read lock since checking that the
+               // peer exists, despite the inner PeerState potentially having no channels after removing
+               // the channel above.
+               let peer_state_mutex = per_peer_state.get(counterparty_node_id).unwrap();
+               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+               let peer_state = &mut *peer_state_lock;
+               match peer_state.channel_by_id.entry(funding_msg.channel_id) {
                        hash_map::Entry::Occupied(_) => {
                                return Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id".to_owned(), funding_msg.channel_id))
                        },
@@ -4934,36 +4362,43 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                        let best_block = *self.best_block.read().unwrap();
                        let mut channel_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_lock;
-                       match channel_state.by_id.entry(msg.channel_id) {
-                               hash_map::Entry::Occupied(mut chan) => {
-                                       if chan.get().get_counterparty_node_id() != *counterparty_node_id {
-                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
-                                       }
-                                       let (monitor, funding_tx, channel_ready) = match chan.get_mut().funding_signed(&msg, best_block, &self.keys_manager, &self.logger) {
-                                               Ok(update) => update,
-                                               Err(e) => try_chan_entry!(self, Err(e), chan),
-                                       };
-                                       match self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) {
-                                               ChannelMonitorUpdateStatus::Completed => {},
-                                               e => {
-                                                       let mut res = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::RevokeAndACKFirst, channel_ready.is_some(), OPTIONALLY_RESEND_FUNDING_LOCKED);
-                                                       if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
-                                                               // We weren't able to watch the channel to begin with, so no updates should be made on
-                                                               // it. Previously, full_stack_target found an (unreachable) panic when the
-                                                               // monitor update contained within `shutdown_finish` was applied.
-                                                               if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
-                                                                       shutdown_finish.0.take();
+                       let per_peer_state = self.per_peer_state.read().unwrap();
+                       if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
+                               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                               let peer_state = &mut *peer_state_lock;
+                               match peer_state.channel_by_id.entry(msg.channel_id) {
+                                       hash_map::Entry::Occupied(mut chan) => {
+                                               if chan.get().get_counterparty_node_id() != *counterparty_node_id {
+                                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
+                                               }
+                                               let (monitor, funding_tx, channel_ready) = match chan.get_mut().funding_signed(&msg, best_block, &self.keys_manager, &self.logger) {
+                                                       Ok(update) => update,
+                                                       Err(e) => try_chan_entry!(self, Err(e), chan),
+                                               };
+                                               match self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) {
+                                                       ChannelMonitorUpdateStatus::Completed => {},
+                                                       e => {
+                                                               let mut res = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::RevokeAndACKFirst, channel_ready.is_some(), OPTIONALLY_RESEND_FUNDING_LOCKED);
+                                                               if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
+                                                                       // We weren't able to watch the channel to begin with, so no updates should be made on
+                                                                       // it. Previously, full_stack_target found an (unreachable) panic when the
+                                                                       // monitor update contained within `shutdown_finish` was applied.
+                                                                       if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
+                                                                               shutdown_finish.0.take();
+                                                                       }
                                                                }
-                                                       }
-                                                       return res
-                                               },
-                                       }
-                                       if let Some(msg) = channel_ready {
-                                               send_channel_ready!(self, channel_state.pending_msg_events, chan.get(), msg);
-                                       }
-                                       funding_tx
-                               },
-                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
+                                                               return res
+                                                       },
+                                               }
+                                               if let Some(msg) = channel_ready {
+                                                       send_channel_ready!(self, channel_state.pending_msg_events, chan.get(), msg);
+                                               }
+                                               funding_tx
+                                       },
+                                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
+                               }
+                       } else {
+                               return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                        }
                };
                log_info!(self.logger, "Broadcasting funding transaction with txid {}", funding_tx.txid());
@@ -4974,39 +4409,46 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
        fn internal_channel_ready(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReady) -> Result<(), MsgHandleErrInternal> {
                let mut channel_state_lock = self.channel_state.lock().unwrap();
                let channel_state = &mut *channel_state_lock;
-               match channel_state.by_id.entry(msg.channel_id) {
-                       hash_map::Entry::Occupied(mut chan) => {
-                               if chan.get().get_counterparty_node_id() != *counterparty_node_id {
-                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
-                               }
-                               let announcement_sigs_opt = try_chan_entry!(self, chan.get_mut().channel_ready(&msg, self.get_our_node_id(),
-                                       self.genesis_hash.clone(), &self.best_block.read().unwrap(), &self.logger), chan);
-                               if let Some(announcement_sigs) = announcement_sigs_opt {
-                                       log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(chan.get().channel_id()));
-                                       channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
-                                               node_id: counterparty_node_id.clone(),
-                                               msg: announcement_sigs,
-                                       });
-                               } else if chan.get().is_usable() {
-                                       // If we're sending an announcement_signatures, we'll send the (public)
-                                       // channel_update after sending a channel_announcement when we receive our
-                                       // counterparty's announcement_signatures. Thus, we only bother to send a
-                                       // channel_update here if the channel is not public, i.e. we're not sending an
-                                       // announcement_signatures.
-                                       log_trace!(self.logger, "Sending private initial channel_update for our counterparty on channel {}", log_bytes!(chan.get().channel_id()));
-                                       if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) {
-                                               channel_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
+               let per_peer_state = self.per_peer_state.read().unwrap();
+               if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
+                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                       let peer_state = &mut *peer_state_lock;
+                       match peer_state.channel_by_id.entry(msg.channel_id) {
+                               hash_map::Entry::Occupied(mut chan) => {
+                                       if chan.get().get_counterparty_node_id() != *counterparty_node_id {
+                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
+                                       }
+                                       let announcement_sigs_opt = try_chan_entry!(self, chan.get_mut().channel_ready(&msg, self.get_our_node_id(),
+                                               self.genesis_hash.clone(), &self.best_block.read().unwrap(), &self.logger), chan);
+                                       if let Some(announcement_sigs) = announcement_sigs_opt {
+                                               log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(chan.get().channel_id()));
+                                               channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
                                                        node_id: counterparty_node_id.clone(),
-                                                       msg,
+                                                       msg: announcement_sigs,
                                                });
+                                       } else if chan.get().is_usable() {
+                                               // If we're sending an announcement_signatures, we'll send the (public)
+                                               // channel_update after sending a channel_announcement when we receive our
+                                               // counterparty's announcement_signatures. Thus, we only bother to send a
+                                               // channel_update here if the channel is not public, i.e. we're not sending an
+                                               // announcement_signatures.
+                                               log_trace!(self.logger, "Sending private initial channel_update for our counterparty on channel {}", log_bytes!(chan.get().channel_id()));
+                                               if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) {
+                                                       channel_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
+                                                               node_id: counterparty_node_id.clone(),
+                                                               msg,
+                                                       });
+                                               }
                                        }
-                               }
 
-                               emit_channel_ready_event!(self, chan.get_mut());
+                                       emit_channel_ready_event!(self, chan.get_mut());
 
-                               Ok(())
-                       },
-                       hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
+                                       Ok(())
+                               },
+                               hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
+                       }
+               } else {
+                       Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                }
        }
 
@@ -5015,43 +4457,49 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                let result: Result<(), _> = loop {
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
+                       let per_peer_state = self.per_peer_state.read().unwrap();
+                       if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
+                               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                               let peer_state = &mut *peer_state_lock;
+                               match peer_state.channel_by_id.entry(msg.channel_id.clone()) {
+                                       hash_map::Entry::Occupied(mut chan_entry) => {
+                                               if chan_entry.get().get_counterparty_node_id() != *counterparty_node_id {
+                                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
+                                               }
 
-                       match channel_state.by_id.entry(msg.channel_id.clone()) {
-                               hash_map::Entry::Occupied(mut chan_entry) => {
-                                       if chan_entry.get().get_counterparty_node_id() != *counterparty_node_id {
-                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
-                                       }
-
-                                       if !chan_entry.get().received_shutdown() {
-                                               log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.",
-                                                       log_bytes!(msg.channel_id),
-                                                       if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" });
-                                       }
+                                               if !chan_entry.get().received_shutdown() {
+                                                       log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.",
+                                                               log_bytes!(msg.channel_id),
+                                                               if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" });
+                                               }
 
-                                       let (shutdown, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.keys_manager, &their_features, &msg), chan_entry);
-                                       dropped_htlcs = htlcs;
-
-                                       // Update the monitor with the shutdown script if necessary.
-                                       if let Some(monitor_update) = monitor_update {
-                                               let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update);
-                                               let (result, is_permanent) =
-                                                       handle_monitor_update_res!(self, update_res, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
-                                               if is_permanent {
-                                                       remove_channel!(self, chan_entry);
-                                                       break result;
+                                               let (shutdown, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.keys_manager, &their_features, &msg), chan_entry);
+                                               dropped_htlcs = htlcs;
+
+                                               // Update the monitor with the shutdown script if necessary.
+                                               if let Some(monitor_update) = monitor_update {
+                                                       let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update);
+                                                       let (result, is_permanent) =
+                                                               handle_monitor_update_res!(self, update_res, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
+                                                       if is_permanent {
+                                                               remove_channel!(self, chan_entry);
+                                                               break result;
+                                                       }
                                                }
-                                       }
 
-                                       if let Some(msg) = shutdown {
-                                               channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
-                                                       node_id: *counterparty_node_id,
-                                                       msg,
-                                               });
-                                       }
+                                               if let Some(msg) = shutdown {
+                                                       channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
+                                                               node_id: *counterparty_node_id,
+                                                               msg,
+                                                       });
+                                               }
 
-                                       break Ok(());
-                               },
-                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
+                                               break Ok(());
+                                       },
+                                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
+                               }
+                       } else {
+                               return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                        }
                };
                for htlc_source in dropped_htlcs.drain(..) {
@@ -5068,28 +4516,35 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                let (tx, chan_option) = {
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
-                       match channel_state.by_id.entry(msg.channel_id.clone()) {
-                               hash_map::Entry::Occupied(mut chan_entry) => {
-                                       if chan_entry.get().get_counterparty_node_id() != *counterparty_node_id {
-                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
-                                       }
-                                       let (closing_signed, tx) = try_chan_entry!(self, chan_entry.get_mut().closing_signed(&self.fee_estimator, &msg), chan_entry);
-                                       if let Some(msg) = closing_signed {
-                                               channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
-                                                       node_id: counterparty_node_id.clone(),
-                                                       msg,
-                                               });
-                                       }
-                                       if tx.is_some() {
-                                               // We're done with this channel, we've got a signed closing transaction and
-                                               // will send the closing_signed back to the remote peer upon return. This
-                                               // also implies there are no pending HTLCs left on the channel, so we can
-                                               // fully delete it from tracking (the channel monitor is still around to
-                                               // watch for old state broadcasts)!
-                                               (tx, Some(remove_channel!(self, chan_entry)))
-                                       } else { (tx, None) }
-                               },
-                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
+                       let per_peer_state = self.per_peer_state.read().unwrap();
+                       if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
+                               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                               let peer_state = &mut *peer_state_lock;
+                               match peer_state.channel_by_id.entry(msg.channel_id.clone()) {
+                                       hash_map::Entry::Occupied(mut chan_entry) => {
+                                               if chan_entry.get().get_counterparty_node_id() != *counterparty_node_id {
+                                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
+                                               }
+                                               let (closing_signed, tx) = try_chan_entry!(self, chan_entry.get_mut().closing_signed(&self.fee_estimator, &msg), chan_entry);
+                                               if let Some(msg) = closing_signed {
+                                                       channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
+                                                               node_id: counterparty_node_id.clone(),
+                                                               msg,
+                                                       });
+                                               }
+                                               if tx.is_some() {
+                                                       // We're done with this channel, we've got a signed closing transaction and
+                                                       // will send the closing_signed back to the remote peer upon return. This
+                                                       // also implies there are no pending HTLCs left on the channel, so we can
+                                                       // fully delete it from tracking (the channel monitor is still around to
+                                                       // watch for old state broadcasts)!
+                                                       (tx, Some(remove_channel!(self, chan_entry)))
+                                               } else { (tx, None) }
+                                       },
+                                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
+                               }
+                       } else {
+                               return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                        }
                };
                if let Some(broadcast_tx) = tx {
@@ -5119,49 +4574,59 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                //but we should prevent it anyway.
 
                let pending_forward_info = self.decode_update_add_htlc_onion(msg);
-               let mut channel_state_lock = self.channel_state.lock().unwrap();
-               let channel_state = &mut *channel_state_lock;
-
-               match channel_state.by_id.entry(msg.channel_id) {
-                       hash_map::Entry::Occupied(mut chan) => {
-                               if chan.get().get_counterparty_node_id() != *counterparty_node_id {
-                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
-                               }
-
-                               let create_pending_htlc_status = |chan: &Channel<<K::Target as KeysInterface>::Signer>, pending_forward_info: PendingHTLCStatus, error_code: u16| {
-                                       // If the update_add is completely bogus, the call will Err and we will close,
-                                       // but if we've sent a shutdown and they haven't acknowledged it yet, we just
-                                       // want to reject the new HTLC and fail it backwards instead of forwarding.
-                                       match pending_forward_info {
-                                               PendingHTLCStatus::Forward(PendingHTLCInfo { ref incoming_shared_secret, .. }) => {
-                                                       let reason = if (error_code & 0x1000) != 0 {
-                                                               let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan);
-                                                               onion_utils::build_first_hop_failure_packet(incoming_shared_secret, real_code, &error_data)
-                                                       } else {
-                                                               onion_utils::build_first_hop_failure_packet(incoming_shared_secret, error_code, &[])
-                                                       };
-                                                       let msg = msgs::UpdateFailHTLC {
-                                                               channel_id: msg.channel_id,
-                                                               htlc_id: msg.htlc_id,
-                                                               reason
-                                                       };
-                                                       PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg))
-                                               },
-                                               _ => pending_forward_info
+               let per_peer_state = self.per_peer_state.read().unwrap();
+               if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
+                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                       let peer_state = &mut *peer_state_lock;
+                       match peer_state.channel_by_id.entry(msg.channel_id) {
+                               hash_map::Entry::Occupied(mut chan) => {
+                                       if chan.get().get_counterparty_node_id() != *counterparty_node_id {
+                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                        }
-                               };
-                               try_chan_entry!(self, chan.get_mut().update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.logger), chan);
-                       },
-                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
+
+                                       let create_pending_htlc_status = |chan: &Channel<<K::Target as SignerProvider>::Signer>, pending_forward_info: PendingHTLCStatus, error_code: u16| {
+                                               // If the update_add is completely bogus, the call will Err and we will close,
+                                               // but if we've sent a shutdown and they haven't acknowledged it yet, we just
+                                               // want to reject the new HTLC and fail it backwards instead of forwarding.
+                                               match pending_forward_info {
+                                                       PendingHTLCStatus::Forward(PendingHTLCInfo { ref incoming_shared_secret, .. }) => {
+                                                               let reason = if (error_code & 0x1000) != 0 {
+                                                                       let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan);
+                                                                       HTLCFailReason::reason(real_code, error_data)
+                                                               } else {
+                                                                       HTLCFailReason::from_failure_code(error_code)
+                                                               }.get_encrypted_failure_packet(incoming_shared_secret, &None);
+                                                               let msg = msgs::UpdateFailHTLC {
+                                                                       channel_id: msg.channel_id,
+                                                                       htlc_id: msg.htlc_id,
+                                                                       reason
+                                                               };
+                                                               PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg))
+                                                       },
+                                                       _ => pending_forward_info
+                                               }
+                                       };
+                                       try_chan_entry!(self, chan.get_mut().update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.logger), chan);
+                               },
+                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
+                       }
+               } else {
+                       return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                }
                Ok(())
        }
 
        fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> {
-               let mut channel_lock = self.channel_state.lock().unwrap();
+               let channel_lock = self.channel_state.lock().unwrap();
                let (htlc_source, forwarded_htlc_value) = {
-                       let channel_state = &mut *channel_lock;
-                       match channel_state.by_id.entry(msg.channel_id) {
+                       let per_peer_state = self.per_peer_state.read().unwrap();
+                       if let None = per_peer_state.get(counterparty_node_id) {
+                               return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id));
+                       }
+                       let peer_state_mutex = per_peer_state.get(counterparty_node_id).unwrap();
+                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                       let peer_state = &mut *peer_state_lock;
+                       match peer_state.channel_by_id.entry(msg.channel_id) {
                                hash_map::Entry::Occupied(mut chan) => {
                                        if chan.get().get_counterparty_node_id() != *counterparty_node_id {
                                                return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
@@ -5176,83 +4641,100 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
        }
 
        fn internal_update_fail_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), MsgHandleErrInternal> {
-               let mut channel_lock = self.channel_state.lock().unwrap();
-               let channel_state = &mut *channel_lock;
-               match channel_state.by_id.entry(msg.channel_id) {
-                       hash_map::Entry::Occupied(mut chan) => {
-                               if chan.get().get_counterparty_node_id() != *counterparty_node_id {
-                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
-                               }
-                               try_chan_entry!(self, chan.get_mut().update_fail_htlc(&msg, HTLCFailReason::LightningError { err: msg.reason.clone() }), chan);
-                       },
-                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
+               let per_peer_state = self.per_peer_state.read().unwrap();
+               if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
+                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                       let peer_state = &mut *peer_state_lock;
+                       match peer_state.channel_by_id.entry(msg.channel_id) {
+                               hash_map::Entry::Occupied(mut chan) => {
+                                       if chan.get().get_counterparty_node_id() != *counterparty_node_id {
+                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
+                                       }
+                                       try_chan_entry!(self, chan.get_mut().update_fail_htlc(&msg, HTLCFailReason::from_msg(msg)), chan);
+                               },
+                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
+                       }
+               } else {
+                       return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id));
                }
                Ok(())
        }
 
        fn internal_update_fail_malformed_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), MsgHandleErrInternal> {
-               let mut channel_lock = self.channel_state.lock().unwrap();
-               let channel_state = &mut *channel_lock;
-               match channel_state.by_id.entry(msg.channel_id) {
-                       hash_map::Entry::Occupied(mut chan) => {
-                               if chan.get().get_counterparty_node_id() != *counterparty_node_id {
-                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
-                               }
-                               if (msg.failure_code & 0x8000) == 0 {
-                                       let chan_err: ChannelError = ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set".to_owned());
-                                       try_chan_entry!(self, Err(chan_err), chan);
-                               }
-                               try_chan_entry!(self, chan.get_mut().update_fail_malformed_htlc(&msg, HTLCFailReason::from_failure_code(msg.failure_code)), chan);
-                               Ok(())
-                       },
-                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
+               let per_peer_state = self.per_peer_state.read().unwrap();
+               if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
+                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                       let peer_state = &mut *peer_state_lock;
+                       match peer_state.channel_by_id.entry(msg.channel_id) {
+                               hash_map::Entry::Occupied(mut chan) => {
+                                       if chan.get().get_counterparty_node_id() != *counterparty_node_id {
+                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
+                                       }
+                                       if (msg.failure_code & 0x8000) == 0 {
+                                               let chan_err: ChannelError = ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set".to_owned());
+                                               try_chan_entry!(self, Err(chan_err), chan);
+                                       }
+                                       try_chan_entry!(self, chan.get_mut().update_fail_malformed_htlc(&msg, HTLCFailReason::reason(msg.failure_code, msg.sha256_of_onion.to_vec())), chan);
+                                       Ok(())
+                               },
+                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
+                       }
+               } else {
+                       return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                }
        }
 
        fn internal_commitment_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(), MsgHandleErrInternal> {
                let mut channel_state_lock = self.channel_state.lock().unwrap();
                let channel_state = &mut *channel_state_lock;
-               match channel_state.by_id.entry(msg.channel_id) {
-                       hash_map::Entry::Occupied(mut chan) => {
-                               if chan.get().get_counterparty_node_id() != *counterparty_node_id {
-                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
-                               }
-                               let (revoke_and_ack, commitment_signed, monitor_update) =
-                                       match chan.get_mut().commitment_signed(&msg, &self.logger) {
-                                               Err((None, e)) => try_chan_entry!(self, Err(e), chan),
-                                               Err((Some(update), e)) => {
-                                                       assert!(chan.get().is_awaiting_monitor_update());
-                                                       let _ = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), update);
-                                                       try_chan_entry!(self, Err(e), chan);
-                                                       unreachable!();
-                                               },
-                                               Ok(res) => res
-                                       };
-                               let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update);
-                               if let Err(e) = handle_monitor_update_res!(self, update_res, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some()) {
-                                       return Err(e);
-                               }
+               let per_peer_state = self.per_peer_state.read().unwrap();
+               if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
+                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                       let peer_state = &mut *peer_state_lock;
+                       match peer_state.channel_by_id.entry(msg.channel_id) {
+                               hash_map::Entry::Occupied(mut chan) => {
+                                       if chan.get().get_counterparty_node_id() != *counterparty_node_id {
+                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
+                                       }
+                                       let (revoke_and_ack, commitment_signed, monitor_update) =
+                                               match chan.get_mut().commitment_signed(&msg, &self.logger) {
+                                                       Err((None, e)) => try_chan_entry!(self, Err(e), chan),
+                                                       Err((Some(update), e)) => {
+                                                               assert!(chan.get().is_awaiting_monitor_update());
+                                                               let _ = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), update);
+                                                               try_chan_entry!(self, Err(e), chan);
+                                                               unreachable!();
+                                                       },
+                                                       Ok(res) => res
+                                               };
+                                       let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update);
+                                       if let Err(e) = handle_monitor_update_res!(self, update_res, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some()) {
+                                               return Err(e);
+                                       }
 
-                               channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
-                                       node_id: counterparty_node_id.clone(),
-                                       msg: revoke_and_ack,
-                               });
-                               if let Some(msg) = commitment_signed {
-                                       channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+                                       channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
                                                node_id: counterparty_node_id.clone(),
-                                               updates: msgs::CommitmentUpdate {
-                                                       update_add_htlcs: Vec::new(),
-                                                       update_fulfill_htlcs: Vec::new(),
-                                                       update_fail_htlcs: Vec::new(),
-                                                       update_fail_malformed_htlcs: Vec::new(),
-                                                       update_fee: None,
-                                                       commitment_signed: msg,
-                                               },
+                                               msg: revoke_and_ack,
                                        });
-                               }
-                               Ok(())
-                       },
-                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
+                                       if let Some(msg) = commitment_signed {
+                                               channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+                                                       node_id: counterparty_node_id.clone(),
+                                                       updates: msgs::CommitmentUpdate {
+                                                               update_add_htlcs: Vec::new(),
+                                                               update_fulfill_htlcs: Vec::new(),
+                                                               update_fail_htlcs: Vec::new(),
+                                                               update_fail_malformed_htlcs: Vec::new(),
+                                                               update_fee: None,
+                                                               commitment_signed: msg,
+                                                       },
+                                               });
+                                       }
+                                       Ok(())
+                               },
+                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
+                       }
+               } else {
+                       return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                }
        }
 
@@ -5353,47 +4835,54 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                let res = loop {
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
-                       match channel_state.by_id.entry(msg.channel_id) {
-                               hash_map::Entry::Occupied(mut chan) => {
-                                       if chan.get().get_counterparty_node_id() != *counterparty_node_id {
-                                               break Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
-                                       }
-                                       let was_paused_for_mon_update = chan.get().is_awaiting_monitor_update();
-                                       let raa_updates = break_chan_entry!(self,
-                                               chan.get_mut().revoke_and_ack(&msg, &self.logger), chan);
-                                       htlcs_to_fail = raa_updates.holding_cell_failed_htlcs;
-                                       let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), raa_updates.monitor_update);
-                                       if was_paused_for_mon_update {
-                                               assert!(update_res != ChannelMonitorUpdateStatus::Completed);
-                                               assert!(raa_updates.commitment_update.is_none());
-                                               assert!(raa_updates.accepted_htlcs.is_empty());
-                                               assert!(raa_updates.failed_htlcs.is_empty());
-                                               assert!(raa_updates.finalized_claimed_htlcs.is_empty());
-                                               break Err(MsgHandleErrInternal::ignore_no_close("Existing pending monitor update prevented responses to RAA".to_owned()));
-                                       }
-                                       if update_res != ChannelMonitorUpdateStatus::Completed {
-                                               if let Err(e) = handle_monitor_update_res!(self, update_res, chan,
-                                                               RAACommitmentOrder::CommitmentFirst, false,
-                                                               raa_updates.commitment_update.is_some(), false,
-                                                               raa_updates.accepted_htlcs, raa_updates.failed_htlcs,
-                                                               raa_updates.finalized_claimed_htlcs) {
-                                                       break Err(e);
-                                               } else { unreachable!(); }
-                                       }
-                                       if let Some(updates) = raa_updates.commitment_update {
-                                               channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
-                                                       node_id: counterparty_node_id.clone(),
-                                                       updates,
-                                               });
-                                       }
-                                       break Ok((raa_updates.accepted_htlcs, raa_updates.failed_htlcs,
-                                                       raa_updates.finalized_claimed_htlcs,
-                                                       chan.get().get_short_channel_id()
-                                                               .unwrap_or(chan.get().outbound_scid_alias()),
-                                                       chan.get().get_funding_txo().unwrap(),
-                                                       chan.get().get_user_id()))
-                               },
-                               hash_map::Entry::Vacant(_) => break Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
+                       let per_peer_state = self.per_peer_state.read().unwrap();
+                       if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
+                               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                               let peer_state = &mut *peer_state_lock;
+                               match peer_state.channel_by_id.entry(msg.channel_id) {
+                                       hash_map::Entry::Occupied(mut chan) => {
+                                               if chan.get().get_counterparty_node_id() != *counterparty_node_id {
+                                                       break Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
+                                               }
+                                               let was_paused_for_mon_update = chan.get().is_awaiting_monitor_update();
+                                               let raa_updates = break_chan_entry!(self,
+                                                       chan.get_mut().revoke_and_ack(&msg, &self.logger), chan);
+                                               htlcs_to_fail = raa_updates.holding_cell_failed_htlcs;
+                                               let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), raa_updates.monitor_update);
+                                               if was_paused_for_mon_update {
+                                                       assert!(update_res != ChannelMonitorUpdateStatus::Completed);
+                                                       assert!(raa_updates.commitment_update.is_none());
+                                                       assert!(raa_updates.accepted_htlcs.is_empty());
+                                                       assert!(raa_updates.failed_htlcs.is_empty());
+                                                       assert!(raa_updates.finalized_claimed_htlcs.is_empty());
+                                                       break Err(MsgHandleErrInternal::ignore_no_close("Existing pending monitor update prevented responses to RAA".to_owned()));
+                                               }
+                                               if update_res != ChannelMonitorUpdateStatus::Completed {
+                                                       if let Err(e) = handle_monitor_update_res!(self, update_res, chan,
+                                                                       RAACommitmentOrder::CommitmentFirst, false,
+                                                                       raa_updates.commitment_update.is_some(), false,
+                                                                       raa_updates.accepted_htlcs, raa_updates.failed_htlcs,
+                                                                       raa_updates.finalized_claimed_htlcs) {
+                                                               break Err(e);
+                                                       } else { unreachable!(); }
+                                               }
+                                               if let Some(updates) = raa_updates.commitment_update {
+                                                       channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+                                                               node_id: counterparty_node_id.clone(),
+                                                               updates,
+                                                       });
+                                               }
+                                               break Ok((raa_updates.accepted_htlcs, raa_updates.failed_htlcs,
+                                                               raa_updates.finalized_claimed_htlcs,
+                                                               chan.get().get_short_channel_id()
+                                                                       .unwrap_or(chan.get().outbound_scid_alias()),
+                                                               chan.get().get_funding_txo().unwrap(),
+                                                               chan.get().get_user_id()))
+                                       },
+                                       hash_map::Entry::Vacant(_) => break Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
+                               }
+                       } else {
+                               break Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                        }
                };
                self.fail_holding_cell_htlcs(htlcs_to_fail, msg.channel_id, counterparty_node_id);
@@ -5414,16 +4903,21 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
        }
 
        fn internal_update_fee(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), MsgHandleErrInternal> {
-               let mut channel_lock = self.channel_state.lock().unwrap();
-               let channel_state = &mut *channel_lock;
-               match channel_state.by_id.entry(msg.channel_id) {
-                       hash_map::Entry::Occupied(mut chan) => {
-                               if chan.get().get_counterparty_node_id() != *counterparty_node_id {
-                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
-                               }
-                               try_chan_entry!(self, chan.get_mut().update_fee(&self.fee_estimator, &msg, &self.logger), chan);
-                       },
-                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
+               let per_peer_state = self.per_peer_state.read().unwrap();
+               if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
+                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                       let peer_state = &mut *peer_state_lock;
+                       match peer_state.channel_by_id.entry(msg.channel_id) {
+                               hash_map::Entry::Occupied(mut chan) => {
+                                       if chan.get().get_counterparty_node_id() != *counterparty_node_id {
+                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
+                                       }
+                                       try_chan_entry!(self, chan.get_mut().update_fee(&self.fee_estimator, &msg, &self.logger), chan);
+                               },
+                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
+                       }
+               } else {
+                       return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id));
                }
                Ok(())
        }
@@ -5431,61 +4925,72 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
        fn internal_announcement_signatures(&self, counterparty_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), MsgHandleErrInternal> {
                let mut channel_state_lock = self.channel_state.lock().unwrap();
                let channel_state = &mut *channel_state_lock;
+               let per_peer_state = self.per_peer_state.read().unwrap();
+               if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
+                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                       let peer_state = &mut *peer_state_lock;
+                       match peer_state.channel_by_id.entry(msg.channel_id) {
+                               hash_map::Entry::Occupied(mut chan) => {
+                                       if chan.get().get_counterparty_node_id() != *counterparty_node_id {
+                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
+                                       }
+                                       if !chan.get().is_usable() {
+                                               return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it".to_owned(), action: msgs::ErrorAction::IgnoreError}));
+                                       }
 
-               match channel_state.by_id.entry(msg.channel_id) {
-                       hash_map::Entry::Occupied(mut chan) => {
-                               if chan.get().get_counterparty_node_id() != *counterparty_node_id {
-                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
-                               }
-                               if !chan.get().is_usable() {
-                                       return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it".to_owned(), action: msgs::ErrorAction::IgnoreError}));
-                               }
-
-                               channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
-                                       msg: try_chan_entry!(self, chan.get_mut().announcement_signatures(
-                                               self.get_our_node_id(), self.genesis_hash.clone(), self.best_block.read().unwrap().height(), msg), chan),
-                                       // Note that announcement_signatures fails if the channel cannot be announced,
-                                       // so get_channel_update_for_broadcast will never fail by the time we get here.
-                                       update_msg: self.get_channel_update_for_broadcast(chan.get()).unwrap(),
-                               });
-                       },
-                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
+                                       channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
+                                               msg: try_chan_entry!(self, chan.get_mut().announcement_signatures(
+                                                       self.get_our_node_id(), self.genesis_hash.clone(), self.best_block.read().unwrap().height(), msg), chan),
+                                               // Note that announcement_signatures fails if the channel cannot be announced,
+                                               // so get_channel_update_for_broadcast will never fail by the time we get here.
+                                               update_msg: self.get_channel_update_for_broadcast(chan.get()).unwrap(),
+                                       });
+                               },
+                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
+                       }
+               } else {
+                       return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id));
                }
                Ok(())
        }
 
        /// Returns ShouldPersist if anything changed, otherwise either SkipPersist or an Err.
        fn internal_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) -> Result<NotifyOption, MsgHandleErrInternal> {
-               let chan_id = match self.short_to_chan_info.read().unwrap().get(&msg.contents.short_channel_id) {
-                       Some((_cp_id, chan_id)) => chan_id.clone(),
+               let (chan_counterparty_node_id, chan_id) = match self.short_to_chan_info.read().unwrap().get(&msg.contents.short_channel_id) {
+                       Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()),
                        None => {
                                // It's not a local channel
                                return Ok(NotifyOption::SkipPersist)
                        }
                };
-               let mut channel_state_lock = self.channel_state.lock().unwrap();
-               let channel_state = &mut *channel_state_lock;
-               match channel_state.by_id.entry(chan_id) {
-                       hash_map::Entry::Occupied(mut chan) => {
-                               if chan.get().get_counterparty_node_id() != *counterparty_node_id {
-                                       if chan.get().should_announce() {
-                                               // If the announcement is about a channel of ours which is public, some
-                                               // other peer may simply be forwarding all its gossip to us. Don't provide
-                                               // a scary-looking error message and return Ok instead.
+               let per_peer_state = self.per_peer_state.read().unwrap();
+               if let Some(peer_state_mutex) = per_peer_state.get(&chan_counterparty_node_id) {
+                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                       let peer_state = &mut *peer_state_lock;
+                       match peer_state.channel_by_id.entry(chan_id) {
+                               hash_map::Entry::Occupied(mut chan) => {
+                                       if chan.get().get_counterparty_node_id() != *counterparty_node_id {
+                                               if chan.get().should_announce() {
+                                                       // If the announcement is about a channel of ours which is public, some
+                                                       // other peer may simply be forwarding all its gossip to us. Don't provide
+                                                       // a scary-looking error message and return Ok instead.
+                                                       return Ok(NotifyOption::SkipPersist);
+                                               }
+                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id));
+                                       }
+                                       let were_node_one = self.get_our_node_id().serialize()[..] < chan.get().get_counterparty_node_id().serialize()[..];
+                                       let msg_from_node_one = msg.contents.flags & 1 == 0;
+                                       if were_node_one == msg_from_node_one {
                                                return Ok(NotifyOption::SkipPersist);
+                                       } else {
+                                               log_debug!(self.logger, "Received channel_update for channel {}.", log_bytes!(chan_id));
+                                               try_chan_entry!(self, chan.get_mut().channel_update(&msg), chan);
                                        }
-                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id));
-                               }
-                               let were_node_one = self.get_our_node_id().serialize()[..] < chan.get().get_counterparty_node_id().serialize()[..];
-                               let msg_from_node_one = msg.contents.flags & 1 == 0;
-                               if were_node_one == msg_from_node_one {
-                                       return Ok(NotifyOption::SkipPersist);
-                               } else {
-                                       log_debug!(self.logger, "Received channel_update for channel {}.", log_bytes!(chan_id));
-                                       try_chan_entry!(self, chan.get_mut().channel_update(&msg), chan);
-                               }
-                       },
-                       hash_map::Entry::Vacant(_) => return Ok(NotifyOption::SkipPersist)
+                               },
+                               hash_map::Entry::Vacant(_) => return Ok(NotifyOption::SkipPersist)
+                       }
+               } else {
+                       return Ok(NotifyOption::SkipPersist)
                }
                Ok(NotifyOption::DoPersist)
        }
@@ -5495,46 +5000,53 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                let need_lnd_workaround = {
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
+                       let per_peer_state = self.per_peer_state.read().unwrap();
 
-                       match channel_state.by_id.entry(msg.channel_id) {
-                               hash_map::Entry::Occupied(mut chan) => {
-                                       if chan.get().get_counterparty_node_id() != *counterparty_node_id {
-                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
-                                       }
-                                       // Currently, we expect all holding cell update_adds to be dropped on peer
-                                       // disconnect, so Channel's reestablish will never hand us any holding cell
-                                       // freed HTLCs to fail backwards. If in the future we no longer drop pending
-                                       // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here.
-                                       let responses = try_chan_entry!(self, chan.get_mut().channel_reestablish(
-                                               msg, &self.logger, self.our_network_pubkey.clone(), self.genesis_hash,
-                                               &*self.best_block.read().unwrap()), chan);
-                                       let mut channel_update = None;
-                                       if let Some(msg) = responses.shutdown_msg {
-                                               channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
-                                                       node_id: counterparty_node_id.clone(),
-                                                       msg,
-                                               });
-                                       } else if chan.get().is_usable() {
-                                               // If the channel is in a usable state (ie the channel is not being shut
-                                               // down), send a unicast channel_update to our counterparty to make sure
-                                               // they have the latest channel parameters.
-                                               if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) {
-                                                       channel_update = Some(events::MessageSendEvent::SendChannelUpdate {
-                                                               node_id: chan.get().get_counterparty_node_id(),
+                       if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
+                               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                               let peer_state = &mut *peer_state_lock;
+                               match peer_state.channel_by_id.entry(msg.channel_id) {
+                                       hash_map::Entry::Occupied(mut chan) => {
+                                               if chan.get().get_counterparty_node_id() != *counterparty_node_id {
+                                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
+                                               }
+                                               // Currently, we expect all holding cell update_adds to be dropped on peer
+                                               // disconnect, so Channel's reestablish will never hand us any holding cell
+                                               // freed HTLCs to fail backwards. If in the future we no longer drop pending
+                                               // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here.
+                                               let responses = try_chan_entry!(self, chan.get_mut().channel_reestablish(
+                                                       msg, &self.logger, self.our_network_pubkey.clone(), self.genesis_hash,
+                                                       &*self.best_block.read().unwrap()), chan);
+                                               let mut channel_update = None;
+                                               if let Some(msg) = responses.shutdown_msg {
+                                                       channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
+                                                               node_id: counterparty_node_id.clone(),
                                                                msg,
                                                        });
+                                               } else if chan.get().is_usable() {
+                                                       // If the channel is in a usable state (ie the channel is not being shut
+                                                       // down), send a unicast channel_update to our counterparty to make sure
+                                                       // they have the latest channel parameters.
+                                                       if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) {
+                                                               channel_update = Some(events::MessageSendEvent::SendChannelUpdate {
+                                                                       node_id: chan.get().get_counterparty_node_id(),
+                                                                       msg,
+                                                               });
+                                                       }
                                                }
-                                       }
-                                       let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take();
-                                       htlc_forwards = self.handle_channel_resumption(
-                                               &mut channel_state.pending_msg_events, chan.get_mut(), responses.raa, responses.commitment_update, responses.order,
-                                               Vec::new(), None, responses.channel_ready, responses.announcement_sigs);
-                                       if let Some(upd) = channel_update {
-                                               channel_state.pending_msg_events.push(upd);
-                                       }
-                                       need_lnd_workaround
-                               },
-                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
+                                               let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take();
+                                               htlc_forwards = self.handle_channel_resumption(
+                                                       &mut channel_state.pending_msg_events, chan.get_mut(), responses.raa, responses.commitment_update, responses.order,
+                                                       Vec::new(), None, responses.channel_ready, responses.announcement_sigs);
+                                               if let Some(upd) = channel_update {
+                                                       channel_state.pending_msg_events.push(upd);
+                                               }
+                                               need_lnd_workaround
+                                       },
+                                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
+                               }
+                       } else {
+                               return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id));
                        }
                };
 
@@ -5571,32 +5083,47 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                                        MonitorEvent::UpdateFailed(funding_outpoint) => {
                                                let mut channel_lock = self.channel_state.lock().unwrap();
                                                let channel_state = &mut *channel_lock;
-                                               let by_id = &mut channel_state.by_id;
-                                               let pending_msg_events = &mut channel_state.pending_msg_events;
-                                               if let hash_map::Entry::Occupied(chan_entry) = by_id.entry(funding_outpoint.to_channel_id()) {
-                                                       let mut chan = remove_channel!(self, chan_entry);
-                                                       failed_channels.push(chan.force_shutdown(false));
-                                                       if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
-                                                               pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
-                                                                       msg: update
-                                                               });
+                                               let counterparty_node_id_opt = match counterparty_node_id {
+                                                       Some(cp_id) => Some(cp_id),
+                                                       None => {
+                                                               // TODO: Once we can rely on the counterparty_node_id from the
+                                                               // monitor event, this and the id_to_peer map should be removed.
+                                                               let id_to_peer = self.id_to_peer.lock().unwrap();
+                                                               id_to_peer.get(&funding_outpoint.to_channel_id()).cloned()
+                                                       }
+                                               };
+                                               if let Some(counterparty_node_id) = counterparty_node_id_opt {
+                                                       let per_peer_state = self.per_peer_state.read().unwrap();
+                                                       if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
+                                                               let pending_msg_events = &mut channel_state.pending_msg_events;
+                                                               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                                                               let peer_state = &mut *peer_state_lock;
+                                                               if let hash_map::Entry::Occupied(chan_entry) = peer_state.channel_by_id.entry(funding_outpoint.to_channel_id()) {
+                                                                       let mut chan = remove_channel!(self, chan_entry);
+                                                                       failed_channels.push(chan.force_shutdown(false));
+                                                                       if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
+                                                                               pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                                                                       msg: update
+                                                                               });
+                                                                       }
+                                                                       let reason = if let MonitorEvent::UpdateFailed(_) = monitor_event {
+                                                                               ClosureReason::ProcessingError { err: "Failed to persist ChannelMonitor update during chain sync".to_string() }
+                                                                       } else {
+                                                                               ClosureReason::CommitmentTxConfirmed
+                                                                       };
+                                                                       self.issue_channel_close_events(&chan, reason);
+                                                                       pending_msg_events.push(events::MessageSendEvent::HandleError {
+                                                                               node_id: chan.get_counterparty_node_id(),
+                                                                               action: msgs::ErrorAction::SendErrorMessage {
+                                                                                       msg: msgs::ErrorMessage { channel_id: chan.channel_id(), data: "Channel force-closed".to_owned() }
+                                                                               },
+                                                                       });
+                                                               }
                                                        }
-                                                       let reason = if let MonitorEvent::UpdateFailed(_) = monitor_event {
-                                                               ClosureReason::ProcessingError { err: "Failed to persist ChannelMonitor update during chain sync".to_string() }
-                                                       } else {
-                                                               ClosureReason::CommitmentTxConfirmed
-                                                       };
-                                                       self.issue_channel_close_events(&chan, reason);
-                                                       pending_msg_events.push(events::MessageSendEvent::HandleError {
-                                                               node_id: chan.get_counterparty_node_id(),
-                                                               action: msgs::ErrorAction::SendErrorMessage {
-                                                                       msg: msgs::ErrorMessage { channel_id: chan.channel_id(), data: "Channel force-closed".to_owned() }
-                                                               },
-                                                       });
                                                }
                                        },
                                        MonitorEvent::Completed { funding_txo, monitor_update_id } => {
-                                               self.channel_monitor_updated(&funding_txo, monitor_update_id);
+                                               self.channel_monitor_updated(&funding_txo, monitor_update_id, counterparty_node_id.as_ref());
                                        },
                                }
                        }
@@ -5620,11 +5147,6 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
        /// Check the holding cell in each channel and free any pending HTLCs in them if possible.
        /// Returns whether there were any updates such as if pending HTLCs were freed or a monitor
        /// update was applied.
-       ///
-       /// This should only apply to HTLCs which were added to the holding cell because we were
-       /// waiting on a monitor update to finish. In that case, we don't want to free the holding cell
-       /// directly in `channel_monitor_updated` as it may introduce deadlocks calling back into user
-       /// code to inform them of a channel monitor update.
        fn check_free_holding_cells(&self) -> bool {
                let mut has_monitor_update = false;
                let mut failed_htlcs = Vec::new();
@@ -5632,45 +5154,49 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                {
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
-                       let by_id = &mut channel_state.by_id;
                        let pending_msg_events = &mut channel_state.pending_msg_events;
+                       let per_peer_state = self.per_peer_state.read().unwrap();
 
-                       by_id.retain(|channel_id, chan| {
-                               match chan.maybe_free_holding_cell_htlcs(&self.logger) {
-                                       Ok((commitment_opt, holding_cell_failed_htlcs)) => {
-                                               if !holding_cell_failed_htlcs.is_empty() {
-                                                       failed_htlcs.push((
-                                                               holding_cell_failed_htlcs,
-                                                               *channel_id,
-                                                               chan.get_counterparty_node_id()
-                                                       ));
-                                               }
-                                               if let Some((commitment_update, monitor_update)) = commitment_opt {
-                                                       match self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) {
-                                                               ChannelMonitorUpdateStatus::Completed => {
-                                                                       pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
-                                                                               node_id: chan.get_counterparty_node_id(),
-                                                                               updates: commitment_update,
-                                                                       });
-                                                               },
-                                                               e => {
-                                                                       has_monitor_update = true;
-                                                                       let (res, close_channel) = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, channel_id, COMMITMENT_UPDATE_ONLY);
-                                                                       handle_errors.push((chan.get_counterparty_node_id(), res));
-                                                                       if close_channel { return false; }
-                                                               },
+                       for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
+                               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                               let peer_state = &mut *peer_state_lock;
+                               peer_state.channel_by_id.retain(|channel_id, chan| {
+                                       match chan.maybe_free_holding_cell_htlcs(&self.logger) {
+                                               Ok((commitment_opt, holding_cell_failed_htlcs)) => {
+                                                       if !holding_cell_failed_htlcs.is_empty() {
+                                                               failed_htlcs.push((
+                                                                       holding_cell_failed_htlcs,
+                                                                       *channel_id,
+                                                                       chan.get_counterparty_node_id()
+                                                               ));
+                                                       }
+                                                       if let Some((commitment_update, monitor_update)) = commitment_opt {
+                                                               match self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) {
+                                                                       ChannelMonitorUpdateStatus::Completed => {
+                                                                               pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+                                                                                       node_id: chan.get_counterparty_node_id(),
+                                                                                       updates: commitment_update,
+                                                                               });
+                                                                       },
+                                                                       e => {
+                                                                               has_monitor_update = true;
+                                                                               let (res, close_channel) = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, channel_id, COMMITMENT_UPDATE_ONLY);
+                                                                               handle_errors.push((chan.get_counterparty_node_id(), res));
+                                                                               if close_channel { return false; }
+                                                                       },
+                                                               }
                                                        }
+                                                       true
+                                               },
+                                               Err(e) => {
+                                                       let (close_channel, res) = convert_chan_err!(self, e, chan, channel_id);
+                                                       handle_errors.push((chan.get_counterparty_node_id(), Err(res)));
+                                                       // ChannelClosed event is generated by handle_error for us
+                                                       !close_channel
                                                }
-                                               true
-                                       },
-                                       Err(e) => {
-                                               let (close_channel, res) = convert_chan_err!(self, e, chan, channel_id);
-                                               handle_errors.push((chan.get_counterparty_node_id(), Err(res)));
-                                               // ChannelClosed event is generated by handle_error for us
-                                               !close_channel
                                        }
-                               }
-                       });
+                               });
+                       }
                }
 
                let has_update = has_monitor_update || !failed_htlcs.is_empty() || !handle_errors.is_empty();
@@ -5694,43 +5220,47 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                {
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
-                       let by_id = &mut channel_state.by_id;
                        let pending_msg_events = &mut channel_state.pending_msg_events;
+                       let per_peer_state = self.per_peer_state.read().unwrap();
 
-                       by_id.retain(|channel_id, chan| {
-                               match chan.maybe_propose_closing_signed(&self.fee_estimator, &self.logger) {
-                                       Ok((msg_opt, tx_opt)) => {
-                                               if let Some(msg) = msg_opt {
-                                                       has_update = true;
-                                                       pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
-                                                               node_id: chan.get_counterparty_node_id(), msg,
-                                                       });
-                                               }
-                                               if let Some(tx) = tx_opt {
-                                                       // We're done with this channel. We got a closing_signed and sent back
-                                                       // a closing_signed with a closing transaction to broadcast.
-                                                       if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
-                                                               pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
-                                                                       msg: update
+                       for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
+                               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                               let peer_state = &mut *peer_state_lock;
+                               peer_state.channel_by_id.retain(|channel_id, chan| {
+                                       match chan.maybe_propose_closing_signed(&self.fee_estimator, &self.logger) {
+                                               Ok((msg_opt, tx_opt)) => {
+                                                       if let Some(msg) = msg_opt {
+                                                               has_update = true;
+                                                               pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
+                                                                       node_id: chan.get_counterparty_node_id(), msg,
                                                                });
                                                        }
+                                                       if let Some(tx) = tx_opt {
+                                                               // We're done with this channel. We got a closing_signed and sent back
+                                                               // a closing_signed with a closing transaction to broadcast.
+                                                               if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
+                                                                       pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                                                               msg: update
+                                                                       });
+                                                               }
 
-                                                       self.issue_channel_close_events(chan, ClosureReason::CooperativeClosure);
+                                                               self.issue_channel_close_events(chan, ClosureReason::CooperativeClosure);
 
-                                                       log_info!(self.logger, "Broadcasting {}", log_tx!(tx));
-                                                       self.tx_broadcaster.broadcast_transaction(&tx);
-                                                       update_maps_on_chan_removal!(self, chan);
-                                                       false
-                                               } else { true }
-                                       },
-                                       Err(e) => {
-                                               has_update = true;
-                                               let (close_channel, res) = convert_chan_err!(self, e, chan, channel_id);
-                                               handle_errors.push((chan.get_counterparty_node_id(), Err(res)));
-                                               !close_channel
+                                                               log_info!(self.logger, "Broadcasting {}", log_tx!(tx));
+                                                               self.tx_broadcaster.broadcast_transaction(&tx);
+                                                               update_maps_on_chan_removal!(self, chan);
+                                                               false
+                                                       } else { true }
+                                               },
+                                               Err(e) => {
+                                                       has_update = true;
+                                                       let (close_channel, res) = convert_chan_err!(self, e, chan, channel_id);
+                                                       handle_errors.push((chan.get_counterparty_node_id(), Err(res)));
+                                                       !close_channel
+                                               }
                                        }
-                               }
-                       });
+                               });
+                       }
                }
 
                for (counterparty_node_id, err) in handle_errors.drain(..) {
@@ -5960,10 +5490,15 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
        pub fn compute_inflight_htlcs(&self) -> InFlightHtlcs {
                let mut inflight_htlcs = InFlightHtlcs::new();
 
-               for chan in self.channel_state.lock().unwrap().by_id.values() {
-                       for (htlc_source, _) in chan.inflight_htlc_sources() {
-                               if let HTLCSource::OutboundRoute { path, .. } = htlc_source {
-                                       inflight_htlcs.process_path(path, self.get_our_node_id());
+               let per_peer_state = self.per_peer_state.read().unwrap();
+               for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
+                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                       let peer_state = &mut *peer_state_lock;
+                       for chan in peer_state.channel_by_id.values() {
+                               for (htlc_source, _) in chan.inflight_htlc_sources() {
+                                       if let HTLCSource::OutboundRoute { path, .. } = htlc_source {
+                                               inflight_htlcs.process_path(path, self.get_our_node_id());
+                                       }
                                }
                        }
                }
@@ -5987,12 +5522,12 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
 
        #[cfg(test)]
        pub fn has_pending_payments(&self) -> bool {
-               !self.pending_outbound_payments.lock().unwrap().is_empty()
+               self.pending_outbound_payments.has_pending_payments()
        }
 
        #[cfg(test)]
        pub fn clear_pending_payments(&self) {
-               self.pending_outbound_payments.lock().unwrap().clear()
+               self.pending_outbound_payments.clear_pending_payments()
        }
 
        /// Processes any events asynchronously in the order they were generated since the last call
@@ -6029,12 +5564,14 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
        }
 }
 
-impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> MessageSendEventsProvider for ChannelManager<M, T, K, F, L>
-       where M::Target: chain::Watch<<K::Target as KeysInterface>::Signer>,
-        T::Target: BroadcasterInterface,
-        K::Target: KeysInterface,
-        F::Target: FeeEstimator,
-                               L::Target: Logger,
+impl<M: Deref, T: Deref, K: Deref, F: Deref, R: Deref, L: Deref> MessageSendEventsProvider for ChannelManager<M, T, K, F, R, L>
+where
+       M::Target: chain::Watch<<K::Target as SignerProvider>::Signer>,
+       T::Target: BroadcasterInterface,
+       K::Target: KeysInterface,
+       F::Target: FeeEstimator,
+       R::Target: Router,
+       L::Target: Logger,
 {
        fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
                let events = RefCell::new(Vec::new());
@@ -6068,12 +5605,13 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> MessageSendEventsProvider
        }
 }
 
-impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> EventsProvider for ChannelManager<M, T, K, F, L>
+impl<M: Deref, T: Deref, K: Deref, F: Deref, R: Deref, L: Deref> EventsProvider for ChannelManager<M, T, K, F, R, L>
 where
-       M::Target: chain::Watch<<K::Target as KeysInterface>::Signer>,
+       M::Target: chain::Watch<<K::Target as SignerProvider>::Signer>,
        T::Target: BroadcasterInterface,
        K::Target: KeysInterface,
        F::Target: FeeEstimator,
+       R::Target: Router,
        L::Target: Logger,
 {
        /// Processes events that must be periodically handled.
@@ -6104,12 +5642,13 @@ where
        }
 }
 
-impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> chain::Listen for ChannelManager<M, T, K, F, L>
+impl<M: Deref, T: Deref, K: Deref, F: Deref, R: Deref, L: Deref> chain::Listen for ChannelManager<M, T, K, F, R, L>
 where
-       M::Target: chain::Watch<<K::Target as KeysInterface>::Signer>,
+       M::Target: chain::Watch<<K::Target as SignerProvider>::Signer>,
        T::Target: BroadcasterInterface,
        K::Target: KeysInterface,
        F::Target: FeeEstimator,
+       R::Target: Router,
        L::Target: Logger,
 {
        fn filtered_block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
@@ -6141,12 +5680,13 @@ where
        }
 }
 
-impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> chain::Confirm for ChannelManager<M, T, K, F, L>
+impl<M: Deref, T: Deref, K: Deref, F: Deref, R: Deref, L: Deref> chain::Confirm for ChannelManager<M, T, K, F, R, L>
 where
-       M::Target: chain::Watch<<K::Target as KeysInterface>::Signer>,
+       M::Target: chain::Watch<<K::Target as SignerProvider>::Signer>,
        T::Target: BroadcasterInterface,
        K::Target: KeysInterface,
        F::Target: FeeEstimator,
+       R::Target: Router,
        L::Target: Logger,
 {
        fn transactions_confirmed(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
@@ -6206,11 +5746,14 @@ where
        }
 
        fn get_relevant_txids(&self) -> Vec<(Txid, Option<BlockHash>)> {
-               let channel_state = self.channel_state.lock().unwrap();
-               let mut res = Vec::with_capacity(channel_state.by_id.len());
-               for chan in channel_state.by_id.values() {
-                       if let (Some(funding_txo), block_hash) = (chan.get_funding_txo(), chan.get_funding_tx_confirmed_in()) {
-                               res.push((funding_txo.txid, block_hash));
+               let mut res = Vec::with_capacity(self.short_to_chan_info.read().unwrap().len());
+               for (_cp_id, peer_state_mutex) in self.per_peer_state.read().unwrap().iter() {
+                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                       let peer_state = &mut *peer_state_lock;
+                       for chan in peer_state.channel_by_id.values() {
+                               if let (Some(funding_txo), block_hash) = (chan.get_funding_txo(), chan.get_funding_tx_confirmed_in()) {
+                                       res.push((funding_txo.txid, block_hash));
+                               }
                        }
                }
                res
@@ -6228,18 +5771,19 @@ where
        }
 }
 
-impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F, L>
+impl<M: Deref, T: Deref, K: Deref, F: Deref, R: Deref, L: Deref> ChannelManager<M, T, K, F, R, L>
 where
-       M::Target: chain::Watch<<K::Target as KeysInterface>::Signer>,
+       M::Target: chain::Watch<<K::Target as SignerProvider>::Signer>,
        T::Target: BroadcasterInterface,
        K::Target: KeysInterface,
        F::Target: FeeEstimator,
+       R::Target: Router,
        L::Target: Logger,
 {
        /// Calls a function which handles an on-chain event (blocks dis/connected, transactions
        /// un/confirmed, etc) on each channel, handling any resulting errors or messages generated by
        /// the function.
-       fn do_chain_event<FN: Fn(&mut Channel<<K::Target as KeysInterface>::Signer>) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>>
+       fn do_chain_event<FN: Fn(&mut Channel<<K::Target as SignerProvider>::Signer>) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>>
                        (&self, height_opt: Option<u32>, f: FN) {
                // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
                // during initialization prior to the chain_monitor being fully configured in some cases.
@@ -6251,86 +5795,91 @@ where
                        let mut channel_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_lock;
                        let pending_msg_events = &mut channel_state.pending_msg_events;
-                       channel_state.by_id.retain(|_, channel| {
-                               let res = f(channel);
-                               if let Ok((channel_ready_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res {
-                                       for (source, payment_hash) in timed_out_pending_htlcs.drain(..) {
-                                               let (failure_code, data) = self.get_htlc_inbound_temp_fail_err_and_data(0x1000|14 /* expiry_too_soon */, &channel);
-                                               timed_out_htlcs.push((source, payment_hash, HTLCFailReason::reason(failure_code, data),
-                                                       HTLCDestination::NextHopChannel { node_id: Some(channel.get_counterparty_node_id()), channel_id: channel.channel_id() }));
-                                       }
-                                       if let Some(channel_ready) = channel_ready_opt {
-                                               send_channel_ready!(self, pending_msg_events, channel, channel_ready);
-                                               if channel.is_usable() {
-                                                       log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id()));
-                                                       if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
-                                                               pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
-                                                                       node_id: channel.get_counterparty_node_id(),
-                                                                       msg,
-                                                               });
+                       let per_peer_state = self.per_peer_state.read().unwrap();
+                       for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
+                               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                               let peer_state = &mut *peer_state_lock;
+                               peer_state.channel_by_id.retain(|_, channel| {
+                                       let res = f(channel);
+                                       if let Ok((channel_ready_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res {
+                                               for (source, payment_hash) in timed_out_pending_htlcs.drain(..) {
+                                                       let (failure_code, data) = self.get_htlc_inbound_temp_fail_err_and_data(0x1000|14 /* expiry_too_soon */, &channel);
+                                                       timed_out_htlcs.push((source, payment_hash, HTLCFailReason::reason(failure_code, data),
+                                                               HTLCDestination::NextHopChannel { node_id: Some(channel.get_counterparty_node_id()), channel_id: channel.channel_id() }));
+                                               }
+                                               if let Some(channel_ready) = channel_ready_opt {
+                                                       send_channel_ready!(self, pending_msg_events, channel, channel_ready);
+                                                       if channel.is_usable() {
+                                                               log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id()));
+                                                               if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
+                                                                       pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
+                                                                               node_id: channel.get_counterparty_node_id(),
+                                                                               msg,
+                                                                       });
+                                                               }
+                                                       } else {
+                                                               log_trace!(self.logger, "Sending channel_ready WITHOUT channel_update for {}", log_bytes!(channel.channel_id()));
                                                        }
-                                               } else {
-                                                       log_trace!(self.logger, "Sending channel_ready WITHOUT channel_update for {}", log_bytes!(channel.channel_id()));
                                                }
-                                       }
 
-                                       emit_channel_ready_event!(self, channel);
+                                               emit_channel_ready_event!(self, channel);
 
-                                       if let Some(announcement_sigs) = announcement_sigs {
-                                               log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(channel.channel_id()));
-                                               pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
-                                                       node_id: channel.get_counterparty_node_id(),
-                                                       msg: announcement_sigs,
-                                               });
-                                               if let Some(height) = height_opt {
-                                                       if let Some(announcement) = channel.get_signed_channel_announcement(self.get_our_node_id(), self.genesis_hash, height) {
-                                                               pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
-                                                                       msg: announcement,
-                                                                       // Note that announcement_signatures fails if the channel cannot be announced,
-                                                                       // so get_channel_update_for_broadcast will never fail by the time we get here.
-                                                                       update_msg: self.get_channel_update_for_broadcast(channel).unwrap(),
-                                                               });
+                                               if let Some(announcement_sigs) = announcement_sigs {
+                                                       log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(channel.channel_id()));
+                                                       pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
+                                                               node_id: channel.get_counterparty_node_id(),
+                                                               msg: announcement_sigs,
+                                                       });
+                                                       if let Some(height) = height_opt {
+                                                               if let Some(announcement) = channel.get_signed_channel_announcement(self.get_our_node_id(), self.genesis_hash, height) {
+                                                                       pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
+                                                                               msg: announcement,
+                                                                               // Note that announcement_signatures fails if the channel cannot be announced,
+                                                                               // so get_channel_update_for_broadcast will never fail by the time we get here.
+                                                                               update_msg: self.get_channel_update_for_broadcast(channel).unwrap(),
+                                                                       });
+                                                               }
                                                        }
                                                }
-                                       }
-                                       if channel.is_our_channel_ready() {
-                                               if let Some(real_scid) = channel.get_short_channel_id() {
-                                                       // If we sent a 0conf channel_ready, and now have an SCID, we add it
-                                                       // to the short_to_chan_info map here. Note that we check whether we
-                                                       // can relay using the real SCID at relay-time (i.e.
-                                                       // enforce option_scid_alias then), and if the funding tx is ever
-                                                       // un-confirmed we force-close the channel, ensuring short_to_chan_info
-                                                       // is always consistent.
-                                                       let mut short_to_chan_info = self.short_to_chan_info.write().unwrap();
-                                                       let scid_insert = short_to_chan_info.insert(real_scid, (channel.get_counterparty_node_id(), channel.channel_id()));
-                                                       assert!(scid_insert.is_none() || scid_insert.unwrap() == (channel.get_counterparty_node_id(), channel.channel_id()),
-                                                               "SCIDs should never collide - ensure you weren't behind by a full {} blocks when creating channels",
-                                                               fake_scid::MAX_SCID_BLOCKS_FROM_NOW);
+                                               if channel.is_our_channel_ready() {
+                                                       if let Some(real_scid) = channel.get_short_channel_id() {
+                                                               // If we sent a 0conf channel_ready, and now have an SCID, we add it
+                                                               // to the short_to_chan_info map here. Note that we check whether we
+                                                               // can relay using the real SCID at relay-time (i.e.
+                                                               // enforce option_scid_alias then), and if the funding tx is ever
+                                                               // un-confirmed we force-close the channel, ensuring short_to_chan_info
+                                                               // is always consistent.
+                                                               let mut short_to_chan_info = self.short_to_chan_info.write().unwrap();
+                                                               let scid_insert = short_to_chan_info.insert(real_scid, (channel.get_counterparty_node_id(), channel.channel_id()));
+                                                               assert!(scid_insert.is_none() || scid_insert.unwrap() == (channel.get_counterparty_node_id(), channel.channel_id()),
+                                                                       "SCIDs should never collide - ensure you weren't behind by a full {} blocks when creating channels",
+                                                                       fake_scid::MAX_SCID_BLOCKS_FROM_NOW);
+                                                       }
                                                }
-                                       }
-                               } else if let Err(reason) = res {
-                                       update_maps_on_chan_removal!(self, channel);
-                                       // It looks like our counterparty went on-chain or funding transaction was
-                                       // reorged out of the main chain. Close the channel.
-                                       failed_channels.push(channel.force_shutdown(true));
-                                       if let Ok(update) = self.get_channel_update_for_broadcast(&channel) {
-                                               pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
-                                                       msg: update
+                                       } else if let Err(reason) = res {
+                                               update_maps_on_chan_removal!(self, channel);
+                                               // It looks like our counterparty went on-chain or funding transaction was
+                                               // reorged out of the main chain. Close the channel.
+                                               failed_channels.push(channel.force_shutdown(true));
+                                               if let Ok(update) = self.get_channel_update_for_broadcast(&channel) {
+                                                       pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                                               msg: update
+                                                       });
+                                               }
+                                               let reason_message = format!("{}", reason);
+                                               self.issue_channel_close_events(channel, reason);
+                                               pending_msg_events.push(events::MessageSendEvent::HandleError {
+                                                       node_id: channel.get_counterparty_node_id(),
+                                                       action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage {
+                                                               channel_id: channel.channel_id(),
+                                                               data: reason_message,
+                                                       } },
                                                });
+                                               return false;
                                        }
-                                       let reason_message = format!("{}", reason);
-                                       self.issue_channel_close_events(channel, reason);
-                                       pending_msg_events.push(events::MessageSendEvent::HandleError {
-                                               node_id: channel.get_counterparty_node_id(),
-                                               action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage {
-                                                       channel_id: channel.channel_id(),
-                                                       data: reason_message,
-                                               } },
-                                       });
-                                       return false;
-                               }
-                               true
-                       });
+                                       true
+                               });
+                       }
                }
 
                if let Some(height) = height_opt {
@@ -6428,13 +5977,15 @@ where
        }
 }
 
-impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref >
-       ChannelMessageHandler for ChannelManager<M, T, K, F, L>
-       where M::Target: chain::Watch<<K::Target as KeysInterface>::Signer>,
-        T::Target: BroadcasterInterface,
-        K::Target: KeysInterface,
-        F::Target: FeeEstimator,
-        L::Target: Logger,
+impl<M: Deref, T: Deref, K: Deref, F: Deref, R: Deref, L: Deref>
+       ChannelMessageHandler for ChannelManager<M, T, K, F, R, L>
+where
+       M::Target: chain::Watch<<K::Target as SignerProvider>::Signer>,
+       T::Target: BroadcasterInterface,
+       K::Target: KeysInterface,
+       F::Target: FeeEstimator,
+       R::Target: Router,
+       L::Target: Logger,
 {
        fn handle_open_channel(&self, counterparty_node_id: &PublicKey, their_features: InitFeatures, msg: &msgs::OpenChannel) {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
@@ -6530,25 +6081,29 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref >
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
                let mut failed_channels = Vec::new();
                let mut no_channels_remain = true;
+               let mut channel_state = self.channel_state.lock().unwrap();
+               let mut per_peer_state = self.per_peer_state.write().unwrap();
                {
-                       let mut channel_state_lock = self.channel_state.lock().unwrap();
-                       let channel_state = &mut *channel_state_lock;
                        let pending_msg_events = &mut channel_state.pending_msg_events;
                        log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates. We believe we {} make future connections to this peer.",
                                log_pubkey!(counterparty_node_id), if no_connection_possible { "cannot" } else { "can" });
-                       channel_state.by_id.retain(|_, chan| {
-                               if chan.get_counterparty_node_id() == *counterparty_node_id {
-                                       chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
-                                       if chan.is_shutdown() {
-                                               update_maps_on_chan_removal!(self, chan);
-                                               self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer);
-                                               return false;
-                                       } else {
-                                               no_channels_remain = false;
+                       if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
+                               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                               let peer_state = &mut *peer_state_lock;
+                               peer_state.channel_by_id.retain(|_, chan| {
+                                       if chan.get_counterparty_node_id() == *counterparty_node_id {
+                                               chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
+                                               if chan.is_shutdown() {
+                                                       update_maps_on_chan_removal!(self, chan);
+                                                       self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer);
+                                                       return false;
+                                               } else {
+                                                       no_channels_remain = false;
+                                               }
                                        }
-                               }
-                               true
-                       });
+                                       true
+                               });
+                       }
                        pending_msg_events.retain(|msg| {
                                match msg {
                                        &events::MessageSendEvent::SendAcceptChannel { ref node_id, .. } => node_id != counterparty_node_id,
@@ -6573,10 +6128,12 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref >
                                        &events::MessageSendEvent::SendGossipTimestampFilter { .. } => false,
                                }
                        });
+                       mem::drop(channel_state);
                }
                if no_channels_remain {
-                       self.per_peer_state.write().unwrap().remove(counterparty_node_id);
+                       per_peer_state.remove(counterparty_node_id);
                }
+               mem::drop(per_peer_state);
 
                for failure in failed_channels.drain(..) {
                        self.finish_force_close_channel(failure);
@@ -6598,6 +6155,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref >
                        match peer_state_lock.entry(counterparty_node_id.clone()) {
                                hash_map::Entry::Vacant(e) => {
                                        e.insert(Mutex::new(PeerState {
+                                               channel_by_id: HashMap::new(),
                                                latest_features: init_msg.features.clone(),
                                        }));
                                },
@@ -6610,34 +6168,40 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref >
                let mut channel_state_lock = self.channel_state.lock().unwrap();
                let channel_state = &mut *channel_state_lock;
                let pending_msg_events = &mut channel_state.pending_msg_events;
-               channel_state.by_id.retain(|_, chan| {
-                       let retain = if chan.get_counterparty_node_id() == *counterparty_node_id {
-                               if !chan.have_received_message() {
-                                       // If we created this (outbound) channel while we were disconnected from the
-                                       // peer we probably failed to send the open_channel message, which is now
-                                       // lost. We can't have had anything pending related to this channel, so we just
-                                       // drop it.
-                                       false
-                               } else {
-                                       pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
-                                               node_id: chan.get_counterparty_node_id(),
-                                               msg: chan.get_channel_reestablish(&self.logger),
-                                       });
-                                       true
-                               }
-                       } else { true };
-                       if retain && chan.get_counterparty_node_id() != *counterparty_node_id {
-                               if let Some(msg) = chan.get_signed_channel_announcement(self.get_our_node_id(), self.genesis_hash.clone(), self.best_block.read().unwrap().height()) {
-                                       if let Ok(update_msg) = self.get_channel_update_for_broadcast(chan) {
-                                               pending_msg_events.push(events::MessageSendEvent::SendChannelAnnouncement {
-                                                       node_id: *counterparty_node_id,
-                                                       msg, update_msg,
+               let per_peer_state = self.per_peer_state.read().unwrap();
+
+               for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
+                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                       let peer_state = &mut *peer_state_lock;
+                       peer_state.channel_by_id.retain(|_, chan| {
+                               let retain = if chan.get_counterparty_node_id() == *counterparty_node_id {
+                                       if !chan.have_received_message() {
+                                               // If we created this (outbound) channel while we were disconnected from the
+                                               // peer we probably failed to send the open_channel message, which is now
+                                               // lost. We can't have had anything pending related to this channel, so we just
+                                               // drop it.
+                                               false
+                                       } else {
+                                               pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
+                                                       node_id: chan.get_counterparty_node_id(),
+                                                       msg: chan.get_channel_reestablish(&self.logger),
                                                });
+                                               true
+                                       }
+                               } else { true };
+                               if retain && chan.get_counterparty_node_id() != *counterparty_node_id {
+                                       if let Some(msg) = chan.get_signed_channel_announcement(self.get_our_node_id(), self.genesis_hash.clone(), self.best_block.read().unwrap().height()) {
+                                               if let Ok(update_msg) = self.get_channel_update_for_broadcast(chan) {
+                                                       pending_msg_events.push(events::MessageSendEvent::SendChannelAnnouncement {
+                                                               node_id: *counterparty_node_id,
+                                                               msg, update_msg,
+                                                       });
+                                               }
                                        }
                                }
-                       }
-                       retain
-               });
+                               retain
+                       });
+               }
                //TODO: Also re-broadcast announcement_signatures
                Ok(())
        }
@@ -6656,18 +6220,23 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref >
                        {
                                // First check if we can advance the channel type and try again.
                                let mut channel_state = self.channel_state.lock().unwrap();
-                               if let Some(chan) = channel_state.by_id.get_mut(&msg.channel_id) {
-                                       if chan.get_counterparty_node_id() != *counterparty_node_id {
-                                               return;
-                                       }
-                                       if let Ok(msg) = chan.maybe_handle_error_without_close(self.genesis_hash) {
-                                               channel_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
-                                                       node_id: *counterparty_node_id,
-                                                       msg,
-                                               });
-                                               return;
+                               let per_peer_state = self.per_peer_state.read().unwrap();
+                               if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
+                                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                                       let peer_state = &mut *peer_state_lock;
+                                       if let Some(chan) = peer_state.channel_by_id.get_mut(&msg.channel_id) {
+                                               if chan.get_counterparty_node_id() != *counterparty_node_id {
+                                                       return;
+                                               }
+                                               if let Ok(msg) = chan.maybe_handle_error_without_close(self.genesis_hash) {
+                                                       channel_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
+                                                               node_id: *counterparty_node_id,
+                                                               msg,
+                                                       });
+                                                       return;
+                                               }
                                        }
-                               }
+                               } else { return; }
                        }
 
                        // Untrusted messages from peer, we throw away the error if id points to a non-existent channel
@@ -7091,16 +6660,6 @@ impl Writeable for HTLCSource {
        }
 }
 
-impl_writeable_tlv_based_enum!(HTLCFailReason,
-       (0, LightningError) => {
-               (0, err, required),
-       },
-       (1, Reason) => {
-               (0, failure_code, required),
-               (2, data, vec_type),
-       },
-;);
-
 impl_writeable_tlv_based!(PendingAddHTLCInfo, {
        (0, forward_info, required),
        (1, prev_user_channel_id, (default_value, 0)),
@@ -7125,36 +6684,14 @@ impl_writeable_tlv_based!(PendingInboundPayment, {
        (8, min_value_msat, required),
 });
 
-impl_writeable_tlv_based_enum_upgradable!(PendingOutboundPayment,
-       (0, Legacy) => {
-               (0, session_privs, required),
-       },
-       (1, Fulfilled) => {
-               (0, session_privs, required),
-               (1, payment_hash, option),
-               (3, timer_ticks_without_htlcs, (default_value, 0)),
-       },
-       (2, Retryable) => {
-               (0, session_privs, required),
-               (1, pending_fee_msat, option),
-               (2, payment_hash, required),
-               (4, payment_secret, option),
-               (6, total_msat, required),
-               (8, pending_amt_msat, required),
-               (10, starting_block_height, required),
-       },
-       (3, Abandoned) => {
-               (0, session_privs, required),
-               (2, payment_hash, required),
-       },
-);
-
-impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable for ChannelManager<M, T, K, F, L>
-       where M::Target: chain::Watch<<K::Target as KeysInterface>::Signer>,
-        T::Target: BroadcasterInterface,
-        K::Target: KeysInterface,
-        F::Target: FeeEstimator,
-        L::Target: Logger,
+impl<M: Deref, T: Deref, K: Deref, F: Deref, R: Deref, L: Deref> Writeable for ChannelManager<M, T, K, F, R, L>
+where
+       M::Target: chain::Watch<<K::Target as SignerProvider>::Signer>,
+       T::Target: BroadcasterInterface,
+       K::Target: KeysInterface,
+       F::Target: FeeEstimator,
+       R::Target: Router,
+       L::Target: Logger,
 {
        fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
                let _consistency_lock = self.total_consistency_lock.write().unwrap();
@@ -7169,19 +6706,29 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable for ChannelMana
                }
 
                {
-                       // Take `channel_state` lock temporarily to avoid creating a lock order that requires
-                       // that the `forward_htlcs` lock is taken after `channel_state`
-                       let channel_state = self.channel_state.lock().unwrap();
+                       let per_peer_state = self.per_peer_state.read().unwrap();
                        let mut unfunded_channels = 0;
-                       for (_, channel) in channel_state.by_id.iter() {
-                               if !channel.is_funding_initiated() {
-                                       unfunded_channels += 1;
+                       let mut number_of_channels = 0;
+                       for (_, peer_state_mutex) in per_peer_state.iter() {
+                               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                               let peer_state = &mut *peer_state_lock;
+                               number_of_channels += peer_state.channel_by_id.len();
+                               for (_, channel) in peer_state.channel_by_id.iter() {
+                                       if !channel.is_funding_initiated() {
+                                               unfunded_channels += 1;
+                                       }
                                }
                        }
-                       ((channel_state.by_id.len() - unfunded_channels) as u64).write(writer)?;
-                       for (_, channel) in channel_state.by_id.iter() {
-                               if channel.is_funding_initiated() {
-                                       channel.write(writer)?;
+
+                       ((number_of_channels - unfunded_channels) as u64).write(writer)?;
+
+                       for (_, peer_state_mutex) in per_peer_state.iter() {
+                               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                               let peer_state = &mut *peer_state_lock;
+                               for (_, channel) in peer_state.channel_by_id.iter() {
+                                       if channel.is_funding_initiated() {
+                                               channel.write(writer)?;
+                                       }
                                }
                        }
                }
@@ -7200,7 +6747,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable for ChannelMana
 
                let pending_inbound_payments = self.pending_inbound_payments.lock().unwrap();
                let claimable_payments = self.claimable_payments.lock().unwrap();
-               let pending_outbound_payments = self.pending_outbound_payments.lock().unwrap();
+               let pending_outbound_payments = self.pending_outbound_payments.pending_outbound_payments.lock().unwrap();
 
                let mut htlc_purposes: Vec<&events::PaymentPurpose> = Vec::new();
                (claimable_payments.claimable_htlcs.len() as u64).write(writer)?;
@@ -7346,12 +6893,14 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable for ChannelMana
 /// which you've already broadcasted the transaction.
 ///
 /// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor
-pub struct ChannelManagerReadArgs<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
-       where M::Target: chain::Watch<<K::Target as KeysInterface>::Signer>,
-        T::Target: BroadcasterInterface,
-        K::Target: KeysInterface,
-        F::Target: FeeEstimator,
-        L::Target: Logger,
+pub struct ChannelManagerReadArgs<'a, M: Deref, T: Deref, K: Deref, F: Deref, R: Deref, L: Deref>
+where
+       M::Target: chain::Watch<<K::Target as SignerProvider>::Signer>,
+       T::Target: BroadcasterInterface,
+       K::Target: KeysInterface,
+       F::Target: FeeEstimator,
+       R::Target: Router,
+       L::Target: Logger,
 {
        /// The keys provider which will give us relevant keys. Some keys will be loaded during
        /// deserialization and KeysInterface::read_chan_signer will be used to read per-Channel
@@ -7373,6 +6922,11 @@ pub struct ChannelManagerReadArgs<'a, M: Deref, T: Deref, K: Deref, F: Deref, L:
        /// used to broadcast the latest local commitment transactions of channels which must be
        /// force-closed during deserialization.
        pub tx_broadcaster: T,
+       /// The router which will be used in the ChannelManager in the future for finding routes
+       /// on-the-fly for trampoline payments. Absent in private nodes that don't support forwarding.
+       ///
+       /// No calls to the router will be made during deserialization.
+       pub router: R,
        /// The Logger for use in the ChannelManager and which may be used to log information during
        /// deserialization.
        pub logger: L,
@@ -7392,24 +6946,26 @@ pub struct ChannelManagerReadArgs<'a, M: Deref, T: Deref, K: Deref, F: Deref, L:
        /// this struct.
        ///
        /// (C-not exported) because we have no HashMap bindings
-       pub channel_monitors: HashMap<OutPoint, &'a mut ChannelMonitor<<K::Target as KeysInterface>::Signer>>,
+       pub channel_monitors: HashMap<OutPoint, &'a mut ChannelMonitor<<K::Target as SignerProvider>::Signer>>,
 }
 
-impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
-               ChannelManagerReadArgs<'a, M, T, K, F, L>
-       where M::Target: chain::Watch<<K::Target as KeysInterface>::Signer>,
-               T::Target: BroadcasterInterface,
-               K::Target: KeysInterface,
-               F::Target: FeeEstimator,
-               L::Target: Logger,
-       {
+impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, R: Deref, L: Deref>
+               ChannelManagerReadArgs<'a, M, T, K, F, R, L>
+where
+       M::Target: chain::Watch<<K::Target as SignerProvider>::Signer>,
+       T::Target: BroadcasterInterface,
+       K::Target: KeysInterface,
+       F::Target: FeeEstimator,
+       R::Target: Router,
+       L::Target: Logger,
+{
        /// Simple utility function to create a ChannelManagerReadArgs which creates the monitor
        /// HashMap for you. This is primarily useful for C bindings where it is not practical to
        /// populate a HashMap directly from C.
-       pub fn new(keys_manager: K, fee_estimator: F, chain_monitor: M, tx_broadcaster: T, logger: L, default_config: UserConfig,
-                       mut channel_monitors: Vec<&'a mut ChannelMonitor<<K::Target as KeysInterface>::Signer>>) -> Self {
+       pub fn new(keys_manager: K, fee_estimator: F, chain_monitor: M, tx_broadcaster: T, router: R, logger: L, default_config: UserConfig,
+                       mut channel_monitors: Vec<&'a mut ChannelMonitor<<K::Target as SignerProvider>::Signer>>) -> Self {
                Self {
-                       keys_manager, fee_estimator, chain_monitor, tx_broadcaster, logger, default_config,
+                       keys_manager, fee_estimator, chain_monitor, tx_broadcaster, router, logger, default_config,
                        channel_monitors: channel_monitors.drain(..).map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect()
                }
        }
@@ -7417,29 +6973,33 @@ impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
 
 // Implement ReadableArgs for an Arc'd ChannelManager to make it a bit easier to work with the
 // SipmleArcChannelManager type:
-impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
-       ReadableArgs<ChannelManagerReadArgs<'a, M, T, K, F, L>> for (BlockHash, Arc<ChannelManager<M, T, K, F, L>>)
-       where M::Target: chain::Watch<<K::Target as KeysInterface>::Signer>,
-        T::Target: BroadcasterInterface,
-        K::Target: KeysInterface,
-        F::Target: FeeEstimator,
-        L::Target: Logger,
+impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, R: Deref, L: Deref>
+       ReadableArgs<ChannelManagerReadArgs<'a, M, T, K, F, R, L>> for (BlockHash, Arc<ChannelManager<M, T, K, F, R, L>>)
+where
+       M::Target: chain::Watch<<K::Target as SignerProvider>::Signer>,
+       T::Target: BroadcasterInterface,
+       K::Target: KeysInterface,
+       F::Target: FeeEstimator,
+       R::Target: Router,
+       L::Target: Logger,
 {
-       fn read<R: io::Read>(reader: &mut R, args: ChannelManagerReadArgs<'a, M, T, K, F, L>) -> Result<Self, DecodeError> {
-               let (blockhash, chan_manager) = <(BlockHash, ChannelManager<M, T, K, F, L>)>::read(reader, args)?;
+       fn read<Reader: io::Read>(reader: &mut Reader, args: ChannelManagerReadArgs<'a, M, T, K, F, R, L>) -> Result<Self, DecodeError> {
+               let (blockhash, chan_manager) = <(BlockHash, ChannelManager<M, T, K, F, R, L>)>::read(reader, args)?;
                Ok((blockhash, Arc::new(chan_manager)))
        }
 }
 
-impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
-       ReadableArgs<ChannelManagerReadArgs<'a, M, T, K, F, L>> for (BlockHash, ChannelManager<M, T, K, F, L>)
-       where M::Target: chain::Watch<<K::Target as KeysInterface>::Signer>,
-        T::Target: BroadcasterInterface,
-        K::Target: KeysInterface,
-        F::Target: FeeEstimator,
-        L::Target: Logger,
+impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, R: Deref, L: Deref>
+       ReadableArgs<ChannelManagerReadArgs<'a, M, T, K, F, R, L>> for (BlockHash, ChannelManager<M, T, K, F, R, L>)
+where
+       M::Target: chain::Watch<<K::Target as SignerProvider>::Signer>,
+       T::Target: BroadcasterInterface,
+       K::Target: KeysInterface,
+       F::Target: FeeEstimator,
+       R::Target: Router,
+       L::Target: Logger,
 {
-       fn read<R: io::Read>(reader: &mut R, mut args: ChannelManagerReadArgs<'a, M, T, K, F, L>) -> Result<Self, DecodeError> {
+       fn read<Reader: io::Read>(reader: &mut Reader, mut args: ChannelManagerReadArgs<'a, M, T, K, F, R, L>) -> Result<Self, DecodeError> {
                let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
 
                let genesis_hash: BlockHash = Readable::read(reader)?;
@@ -7450,12 +7010,12 @@ impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
 
                let channel_count: u64 = Readable::read(reader)?;
                let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128));
-               let mut by_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
+               let mut peer_channels: HashMap<PublicKey, HashMap<[u8; 32], Channel<<K::Target as SignerProvider>::Signer>>> = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
                let mut id_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
                let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
                let mut channel_closures = Vec::new();
                for _ in 0..channel_count {
-                       let mut channel: Channel<<K::Target as KeysInterface>::Signer> = Channel::read(reader, (&args.keys_manager, best_block_height))?;
+                       let mut channel: Channel<<K::Target as SignerProvider>::Signer> = Channel::read(reader, (&args.keys_manager, best_block_height))?;
                        let funding_txo = channel.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
                        funding_txo_set.insert(funding_txo.clone());
                        if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) {
@@ -7516,7 +7076,17 @@ impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                        if channel.is_funding_initiated() {
                                                id_to_peer.insert(channel.channel_id(), channel.get_counterparty_node_id());
                                        }
-                                       by_id.insert(channel.channel_id(), channel);
+                                       match peer_channels.entry(channel.get_counterparty_node_id()) {
+                                               hash_map::Entry::Occupied(mut entry) => {
+                                                       let by_id_map = entry.get_mut();
+                                                       by_id_map.insert(channel.channel_id(), channel);
+                                               },
+                                               hash_map::Entry::Vacant(entry) => {
+                                                       let mut by_id_map = HashMap::new();
+                                                       by_id_map.insert(channel.channel_id(), channel);
+                                                       entry.insert(by_id_map);
+                                               }
+                                       }
                                }
                        } else if channel.is_awaiting_initial_mon_persist() {
                                // If we were persisted and shut down while the initial ChannelMonitor persistence
@@ -7571,10 +7141,11 @@ impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                }
 
                let peer_count: u64 = Readable::read(reader)?;
-               let mut per_peer_state = HashMap::with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex<PeerState>)>()));
+               let mut per_peer_state = HashMap::with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex<PeerState<<K::Target as SignerProvider>::Signer>>)>()));
                for _ in 0..peer_count {
                        let peer_pubkey = Readable::read(reader)?;
                        let peer_state = PeerState {
+                               channel_by_id: peer_channels.remove(&peer_pubkey).unwrap_or(HashMap::new()),
                                latest_features: Readable::read(reader)?,
                        };
                        per_peer_state.insert(peer_pubkey, Mutex::new(peer_state));
@@ -7667,7 +7238,7 @@ impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                        // We only rebuild the pending payments map if we were most recently serialized by
                        // 0.0.102+
                        for (_, monitor) in args.channel_monitors.iter() {
-                               if by_id.get(&monitor.get_funding_txo().0.to_channel_id()).is_none() {
+                               if id_to_peer.get(&monitor.get_funding_txo().0.to_channel_id()).is_none() {
                                        for (htlc_source, htlc) in monitor.get_pending_outbound_htlcs() {
                                                if let HTLCSource::OutboundRoute { payment_id, session_priv, path, payment_secret, .. } = htlc_source {
                                                        if path.is_empty() {
@@ -7702,17 +7273,19 @@ impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                        }
                                        for (htlc_source, htlc) in monitor.get_all_current_outbound_htlcs() {
                                                if let HTLCSource::PreviousHopData(prev_hop_data) = htlc_source {
+                                                       let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| {
+                                                               info.prev_funding_outpoint == prev_hop_data.outpoint &&
+                                                                       info.prev_htlc_id == prev_hop_data.htlc_id
+                                                       };
                                                        // The ChannelMonitor is now responsible for this HTLC's
                                                        // failure/success and will let us know what its outcome is. If we
-                                                       // still have an entry for this HTLC in `forward_htlcs`, we were
-                                                       // apparently not persisted after the monitor was when forwarding
-                                                       // the payment.
+                                                       // still have an entry for this HTLC in `forward_htlcs` or
+                                                       // `pending_intercepted_htlcs`, we were apparently not persisted after
+                                                       // the monitor was when forwarding the payment.
                                                        forward_htlcs.retain(|_, forwards| {
                                                                forwards.retain(|forward| {
                                                                        if let HTLCForwardInfo::AddHTLC(htlc_info) = forward {
-                                                                               if htlc_info.prev_short_channel_id == prev_hop_data.short_channel_id &&
-                                                                                       htlc_info.prev_htlc_id == prev_hop_data.htlc_id
-                                                                               {
+                                                                               if pending_forward_matches_htlc(&htlc_info) {
                                                                                        log_info!(args.logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}",
                                                                                                log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id()));
                                                                                        false
@@ -7720,7 +7293,19 @@ impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                                                        } else { true }
                                                                });
                                                                !forwards.is_empty()
-                                                       })
+                                                       });
+                                                       pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| {
+                                                               if pending_forward_matches_htlc(&htlc_info) {
+                                                                       log_info!(args.logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}",
+                                                                               log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id()));
+                                                                       pending_events_read.retain(|event| {
+                                                                               if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event {
+                                                                                       intercepted_id != ev_id
+                                                                               } else { true }
+                                                                       });
+                                                                       false
+                                                               } else { true }
+                                                       });
                                                }
                                        }
                                }
@@ -7801,28 +7386,32 @@ impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                }
 
                let mut outbound_scid_aliases = HashSet::new();
-               for (chan_id, chan) in by_id.iter_mut() {
-                       if chan.outbound_scid_alias() == 0 {
-                               let mut outbound_scid_alias;
-                               loop {
-                                       outbound_scid_alias = fake_scid::Namespace::OutboundAlias
-                                               .get_fake_scid(best_block_height, &genesis_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.keys_manager);
-                                       if outbound_scid_aliases.insert(outbound_scid_alias) { break; }
-                               }
-                               chan.set_outbound_scid_alias(outbound_scid_alias);
-                       } else if !outbound_scid_aliases.insert(chan.outbound_scid_alias()) {
-                               // Note that in rare cases its possible to hit this while reading an older
-                               // channel if we just happened to pick a colliding outbound alias above.
-                               log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.outbound_scid_alias());
-                               return Err(DecodeError::InvalidValue);
-                       }
-                       if chan.is_usable() {
-                               if short_to_chan_info.insert(chan.outbound_scid_alias(), (chan.get_counterparty_node_id(), *chan_id)).is_some() {
+               for (_peer_node_id, peer_state_mutex) in per_peer_state.iter_mut() {
+                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                       let peer_state = &mut *peer_state_lock;
+                       for (chan_id, chan) in peer_state.channel_by_id.iter_mut() {
+                               if chan.outbound_scid_alias() == 0 {
+                                       let mut outbound_scid_alias;
+                                       loop {
+                                               outbound_scid_alias = fake_scid::Namespace::OutboundAlias
+                                                       .get_fake_scid(best_block_height, &genesis_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.keys_manager);
+                                               if outbound_scid_aliases.insert(outbound_scid_alias) { break; }
+                                       }
+                                       chan.set_outbound_scid_alias(outbound_scid_alias);
+                               } else if !outbound_scid_aliases.insert(chan.outbound_scid_alias()) {
                                        // Note that in rare cases its possible to hit this while reading an older
                                        // channel if we just happened to pick a colliding outbound alias above.
                                        log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.outbound_scid_alias());
                                        return Err(DecodeError::InvalidValue);
                                }
+                               if chan.is_usable() {
+                                       if short_to_chan_info.insert(chan.outbound_scid_alias(), (chan.get_counterparty_node_id(), *chan_id)).is_some() {
+                                               // Note that in rare cases its possible to hit this while reading an older
+                                               // channel if we just happened to pick a colliding outbound alias above.
+                                               log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.outbound_scid_alias());
+                                               return Err(DecodeError::InvalidValue);
+                                       }
+                               }
                        }
                }
 
@@ -7859,8 +7448,13 @@ impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                                // without the new monitor persisted - we'll end up right back here on
                                                // restart.
                                                let previous_channel_id = claimable_htlc.prev_hop.outpoint.to_channel_id();
-                                               if let Some(channel) = by_id.get_mut(&previous_channel_id) {
-                                                       channel.claim_htlc_while_disconnected_dropping_mon_update(claimable_htlc.prev_hop.htlc_id, payment_preimage, &args.logger);
+                                               if let Some(peer_node_id) = id_to_peer.get(&previous_channel_id){
+                                                       let peer_state_mutex = per_peer_state.get(peer_node_id).unwrap();
+                                                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                                                       let peer_state = &mut *peer_state_lock;
+                                                       if let Some(channel) = peer_state.channel_by_id.get_mut(&previous_channel_id) {
+                                                               channel.claim_htlc_while_disconnected_dropping_mon_update(claimable_htlc.prev_hop.htlc_id, payment_preimage, &args.logger);
+                                                       }
                                                }
                                                if let Some(previous_hop_monitor) = args.channel_monitors.get(&claimable_htlc.prev_hop.outpoint) {
                                                        previous_hop_monitor.provide_payment_preimage(&payment_hash, &payment_preimage, &args.tx_broadcaster, &bounded_fee_estimator, &args.logger);
@@ -7881,16 +7475,16 @@ impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                        fee_estimator: bounded_fee_estimator,
                        chain_monitor: args.chain_monitor,
                        tx_broadcaster: args.tx_broadcaster,
+                       router: args.router,
 
                        best_block: RwLock::new(BestBlock::new(best_block_hash, best_block_height)),
 
                        channel_state: Mutex::new(ChannelHolder {
-                               by_id,
                                pending_msg_events: Vec::new(),
                        }),
                        inbound_payment_key: expanded_inbound_key,
                        pending_inbound_payments: Mutex::new(pending_inbound_payments),
-                       pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()),
+                       pending_outbound_payments: OutboundPayments { pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()) },
                        pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs.unwrap()),
 
                        forward_htlcs: Mutex::new(forward_htlcs),
@@ -7908,7 +7502,7 @@ impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
 
                        highest_seen_timestamp: AtomicUsize::new(highest_seen_timestamp as usize),
 
-                       per_peer_state: RwLock::new(per_peer_state),
+                       per_peer_state: FairRwLock::new(per_peer_state),
 
                        pending_events: Mutex::new(pending_events_read),
                        pending_background_events: Mutex::new(pending_background_events_read),
@@ -7949,7 +7543,7 @@ mod tests {
        use crate::util::errors::APIError;
        use crate::util::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
        use crate::util::test_utils;
-       use crate::chain::keysinterface::KeysInterface;
+       use crate::chain::keysinterface::{EntropySource, KeysInterface};
 
        #[test]
        fn test_notify_limits() {
@@ -8048,7 +7642,7 @@ mod tests {
                // Use the utility function send_payment_along_path to send the payment with MPP data which
                // indicates there are more HTLCs coming.
                let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match.
-               let session_privs = nodes[0].node.add_new_pending_payment(our_payment_hash, Some(payment_secret), payment_id, &mpp_route).unwrap();
+               let session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash, Some(payment_secret), payment_id, &mpp_route).unwrap();
                nodes[0].node.send_payment_along_path(&mpp_route.paths[0], &route.payment_params, &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[0]).unwrap();
                check_added_monitors!(nodes[0], 1);
                let mut events = nodes[0].node.get_and_clear_pending_msg_events();
@@ -8266,7 +7860,7 @@ mod tests {
                        final_value_msat: 10_000,
                        final_cltv_expiry_delta: 40,
                };
-               let network_graph = nodes[0].network_graph;
+               let network_graph = nodes[0].network_graph.clone();
                let first_hops = nodes[0].node.list_usable_channels();
                let scorer = test_utils::TestScorer::with_penalty(0);
                let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
@@ -8277,8 +7871,8 @@ mod tests {
 
                let test_preimage = PaymentPreimage([42; 32]);
                let mismatch_payment_hash = PaymentHash([43; 32]);
-               let session_privs = nodes[0].node.add_new_pending_payment(mismatch_payment_hash, None, PaymentId(mismatch_payment_hash.0), &route).unwrap();
-               nodes[0].node.send_payment_internal(&route, mismatch_payment_hash, &None, Some(test_preimage), PaymentId(mismatch_payment_hash.0), None, session_privs).unwrap();
+               let session_privs = nodes[0].node.test_add_new_pending_payment(mismatch_payment_hash, None, PaymentId(mismatch_payment_hash.0), &route).unwrap();
+               nodes[0].node.test_send_payment_internal(&route, mismatch_payment_hash, &None, Some(test_preimage), PaymentId(mismatch_payment_hash.0), None, session_privs).unwrap();
                check_added_monitors!(nodes[0], 1);
 
                let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
@@ -8311,7 +7905,7 @@ mod tests {
                        final_value_msat: 10_000,
                        final_cltv_expiry_delta: 40,
                };
-               let network_graph = nodes[0].network_graph;
+               let network_graph = nodes[0].network_graph.clone();
                let first_hops = nodes[0].node.list_usable_channels();
                let scorer = test_utils::TestScorer::with_penalty(0);
                let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
@@ -8323,8 +7917,8 @@ mod tests {
                let test_preimage = PaymentPreimage([42; 32]);
                let test_secret = PaymentSecret([43; 32]);
                let payment_hash = PaymentHash(Sha256::hash(&test_preimage.0).into_inner());
-               let session_privs = nodes[0].node.add_new_pending_payment(payment_hash, Some(test_secret), PaymentId(payment_hash.0), &route).unwrap();
-               nodes[0].node.send_payment_internal(&route, payment_hash, &Some(test_secret), Some(test_preimage), PaymentId(payment_hash.0), None, session_privs).unwrap();
+               let session_privs = nodes[0].node.test_add_new_pending_payment(payment_hash, Some(test_secret), PaymentId(payment_hash.0), &route).unwrap();
+               nodes[0].node.test_send_payment_internal(&route, payment_hash, &Some(test_secret), Some(test_preimage), PaymentId(payment_hash.0), None, session_privs).unwrap();
                check_added_monitors!(nodes[0], 1);
 
                let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
@@ -8515,7 +8109,7 @@ mod tests {
 pub mod bench {
        use crate::chain::Listen;
        use crate::chain::chainmonitor::{ChainMonitor, Persist};
-       use crate::chain::keysinterface::{KeysManager, KeysInterface, InMemorySigner};
+       use crate::chain::keysinterface::{EntropySource, KeysManager, KeysInterface, InMemorySigner};
        use crate::ln::channelmanager::{self, BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage, PaymentId};
        use crate::ln::functional_test_utils::*;
        use crate::ln::msgs::{ChannelMessageHandler, Init};
@@ -8539,7 +8133,8 @@ pub mod bench {
                                &'a test_utils::TestBroadcaster, &'a test_utils::TestFeeEstimator,
                                &'a test_utils::TestLogger, &'a P>,
                        &'a test_utils::TestBroadcaster, &'a KeysManager,
-                       &'a test_utils::TestFeeEstimator, &'a test_utils::TestLogger>,
+                       &'a test_utils::TestFeeEstimator, &'a test_utils::TestRouter<'a>,
+                       &'a test_utils::TestLogger>,
        }
 
        #[cfg(test)]
@@ -8557,15 +8152,16 @@ pub mod bench {
 
                let tx_broadcaster = test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new()), blocks: Arc::new(Mutex::new(Vec::new()))};
                let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
+               let logger_a = test_utils::TestLogger::with_id("node a".to_owned());
+               let router = test_utils::TestRouter::new(Arc::new(NetworkGraph::new(genesis_hash, &logger_a)));
 
                let mut config: UserConfig = Default::default();
                config.channel_handshake_config.minimum_depth = 1;
 
-               let logger_a = test_utils::TestLogger::with_id("node a".to_owned());
                let chain_monitor_a = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_a);
                let seed_a = [1u8; 32];
                let keys_manager_a = KeysManager::new(&seed_a, 42, 42);
-               let node_a = ChannelManager::new(&fee_estimator, &chain_monitor_a, &tx_broadcaster, &logger_a, &keys_manager_a, config.clone(), ChainParameters {
+               let node_a = ChannelManager::new(&fee_estimator, &chain_monitor_a, &tx_broadcaster, &router, &logger_a, &keys_manager_a, config.clone(), ChainParameters {
                        network,
                        best_block: BestBlock::from_genesis(network),
                });
@@ -8575,7 +8171,7 @@ pub mod bench {
                let chain_monitor_b = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_b);
                let seed_b = [2u8; 32];
                let keys_manager_b = KeysManager::new(&seed_b, 42, 42);
-               let node_b = ChannelManager::new(&fee_estimator, &chain_monitor_b, &tx_broadcaster, &logger_b, &keys_manager_b, config.clone(), ChainParameters {
+               let node_b = ChannelManager::new(&fee_estimator, &chain_monitor_b, &tx_broadcaster, &router, &logger_b, &keys_manager_b, config.clone(), ChainParameters {
                        network,
                        best_block: BestBlock::from_genesis(network),
                });