Relicense as dual Apache-2.0 + MIT
[rust-lightning] / lightning / src / ln / channelmanager.rs
index 709ddde2e641144f03424f85f83060e332f25fc9..a17eb1a610bc132f736dcf42438b3689d20bb3f0 100644 (file)
@@ -1,3 +1,12 @@
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
 //! The top-level channel management and payment tracking stuff lives here.
 //!
 //! The ChannelManager is the main chunk of logic implementing the lightning protocol and is
@@ -34,7 +43,7 @@ use ln::features::{InitFeatures, NodeFeatures};
 use routing::router::{Route, RouteHop};
 use ln::msgs;
 use ln::onion_utils;
-use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError};
+use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, OptionalField};
 use chain::keysinterface::{ChannelKeys, KeysInterface, KeysManager, InMemoryChannelKeys};
 use util::config::UserConfig;
 use util::{byte_utils, events};
@@ -51,6 +60,7 @@ use std::sync::atomic::{AtomicUsize, Ordering};
 use std::time::Duration;
 use std::marker::{Sync, Send};
 use std::ops::Deref;
+use bitcoin::hashes::hex::ToHex;
 
 // We hold various information about HTLC relay in the HTLC objects in Channel itself:
 //
@@ -192,14 +202,14 @@ struct MsgHandleErrInternal {
 }
 impl MsgHandleErrInternal {
        #[inline]
-       fn send_err_msg_no_close(err: &'static str, channel_id: [u8; 32]) -> Self {
+       fn send_err_msg_no_close(err: String, channel_id: [u8; 32]) -> Self {
                Self {
                        err: LightningError {
-                               err,
+                               err: err.clone(),
                                action: msgs::ErrorAction::SendErrorMessage {
                                        msg: msgs::ErrorMessage {
                                                channel_id,
-                                               data: err.to_string()
+                                               data: err
                                        },
                                },
                        },
@@ -207,7 +217,7 @@ impl MsgHandleErrInternal {
                }
        }
        #[inline]
-       fn ignore_no_close(err: &'static str) -> Self {
+       fn ignore_no_close(err: String) -> Self {
                Self {
                        err: LightningError {
                                err,
@@ -221,14 +231,14 @@ impl MsgHandleErrInternal {
                Self { err, shutdown_finish: None }
        }
        #[inline]
-       fn from_finish_shutdown(err: &'static str, channel_id: [u8; 32], shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>) -> Self {
+       fn from_finish_shutdown(err: String, channel_id: [u8; 32], shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>) -> Self {
                Self {
                        err: LightningError {
-                               err,
+                               err: err.clone(),
                                action: msgs::ErrorAction::SendErrorMessage {
                                        msg: msgs::ErrorMessage {
                                                channel_id,
-                                               data: err.to_string()
+                                               data: err
                                        },
                                },
                        },
@@ -244,20 +254,20 @@ impl MsgHandleErrInternal {
                                        action: msgs::ErrorAction::IgnoreError,
                                },
                                ChannelError::Close(msg) => LightningError {
-                                       err: msg,
+                                       err: msg.clone(),
                                        action: msgs::ErrorAction::SendErrorMessage {
                                                msg: msgs::ErrorMessage {
                                                        channel_id,
-                                                       data: msg.to_string()
+                                                       data: msg
                                                },
                                        },
                                },
                                ChannelError::CloseDelayBroadcast(msg) => LightningError {
-                                       err: msg,
+                                       err: msg.clone(),
                                        action: msgs::ErrorAction::SendErrorMessage {
                                                msg: msgs::ErrorMessage {
                                                        channel_id,
-                                                       data: msg.to_string()
+                                                       data: msg
                                                },
                                        },
                                },
@@ -370,7 +380,7 @@ pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, M, T, F, L> = ChannelManage
 /// SimpleArcChannelManager when you require a ChannelManager with a static lifetime, such as when
 /// you're using lightning-net-tokio.
 pub struct ChannelManager<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
-       where M::Target: ManyChannelMonitor<ChanSigner>,
+       where M::Target: ManyChannelMonitor<Keys=ChanSigner>,
         T::Target: BroadcasterInterface,
         K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
         F::Target: FeeEstimator,
@@ -632,7 +642,7 @@ macro_rules! handle_monitor_err {
                                // splitting hairs we'd prefer to claim payments that were to us, but we haven't
                                // given up the preimage yet, so might as well just wait until the payment is
                                // retried, avoiding the on-chain fees.
-                               let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(true), $self.get_channel_update(&chan).ok()));
+                               let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), channel_id, chan.force_shutdown(true), $self.get_channel_update(&chan).ok()));
                                res
                        },
                        ChannelMonitorUpdateErr::TemporaryFailure => {
@@ -655,7 +665,7 @@ macro_rules! handle_monitor_err {
                                        debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst || !$resend_commitment);
                                }
                                $entry.get_mut().monitor_update_failed($resend_raa, $resend_commitment, $failed_forwards, $failed_fails);
-                               Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor"), *$entry.key()))
+                               Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor".to_owned()), *$entry.key()))
                        },
                }
        }
@@ -683,7 +693,7 @@ macro_rules! maybe_break_monitor_err {
 }
 
 impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<ChanSigner, M, T, K, F, L>
-       where M::Target: ManyChannelMonitor<ChanSigner>,
+       where M::Target: ManyChannelMonitor<Keys=ChanSigner>,
         T::Target: BroadcasterInterface,
         K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
         F::Target: FeeEstimator,
@@ -707,10 +717,10 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
        /// the ChannelManager as a listener to the BlockNotifier and call the BlockNotifier's
        /// `block_(dis)connected` methods, which will notify all registered listeners in one
        /// go.
-       pub fn new(network: Network, fee_est: F, monitor: M, tx_broadcaster: T, logger: L, keys_manager: K, config: UserConfig, current_blockchain_height: usize) -> Result<ChannelManager<ChanSigner, M, T, K, F, L>, secp256k1::Error> {
+       pub fn new(network: Network, fee_est: F, monitor: M, tx_broadcaster: T, logger: L, keys_manager: K, config: UserConfig, current_blockchain_height: usize) -> Self {
                let secp_ctx = Secp256k1::new();
 
-               let res = ChannelManager {
+               ChannelManager {
                        default_configuration: config.clone(),
                        genesis_hash: genesis_block(network).header.bitcoin_hash(),
                        fee_estimator: fee_est,
@@ -740,9 +750,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                        keys_manager,
 
                        logger,
-               };
-
-               Ok(res)
+               }
        }
 
        /// Creates a new outbound channel to the given remote node and with the given value.
@@ -759,19 +767,19 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
        /// greater than channel_value_satoshis * 1k or channel_value_satoshis is < 1000.
        pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_id: u64, override_config: Option<UserConfig>) -> Result<(), APIError> {
                if channel_value_satoshis < 1000 {
-                       return Err(APIError::APIMisuseError { err: "channel_value must be at least 1000 satoshis" });
+                       return Err(APIError::APIMisuseError { err: format!("Channel value must be at least 1000 satoshis. It was {}", channel_value_satoshis) });
                }
 
                let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration };
                let channel = Channel::new_outbound(&self.fee_estimator, &self.keys_manager, their_network_key, channel_value_satoshis, push_msat, user_id, config)?;
-               let res = channel.get_open_channel(self.genesis_hash.clone(), &self.fee_estimator);
+               let res = channel.get_open_channel(self.genesis_hash.clone());
 
                let _ = self.total_consistency_lock.read().unwrap();
                let mut channel_state = self.channel_state.lock().unwrap();
                match channel_state.by_id.entry(channel.channel_id()) {
                        hash_map::Entry::Occupied(_) => {
                                if cfg!(feature = "fuzztarget") {
-                                       return Err(APIError::APIMisuseError { err: "Fuzzy bad RNG" });
+                                       return Err(APIError::APIMisuseError { err: "Fuzzy bad RNG".to_owned() });
                                } else {
                                        panic!("RNG is bad???");
                                }
@@ -857,7 +865,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                                (failed_htlcs, Some(chan_entry.remove_entry().1))
                                        } else { (failed_htlcs, None) }
                                },
-                               hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable{err: "No such channel"})
+                               hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()})
                        }
                };
                for htlc_source in failed_htlcs.drain(..) {
@@ -1187,7 +1195,8 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                                        res.extend_from_slice(&byte_utils::be32_to_array(msg.cltv_expiry));
                                                }
                                                else if code == 0x1000 | 20 {
-                                                       res.extend_from_slice(&byte_utils::be16_to_array(chan_update.contents.flags));
+                                                       // TODO: underspecified, follow https://github.com/lightningnetwork/lightning-rfc/issues/791
+                                                       res.extend_from_slice(&byte_utils::be16_to_array(0));
                                                }
                                                res.extend_from_slice(&chan_update.encode_with_len()[..]);
                                        }
@@ -1203,7 +1212,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
        /// May be called with channel_state already locked!
        fn get_channel_update(&self, chan: &Channel<ChanSigner>) -> Result<msgs::ChannelUpdate, LightningError> {
                let short_channel_id = match chan.get_short_channel_id() {
-                       None => return Err(LightningError{err: "Channel not yet established", action: msgs::ErrorAction::IgnoreError}),
+                       None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}),
                        Some(id) => id,
                };
 
@@ -1213,9 +1222,10 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                        chain_hash: self.genesis_hash,
                        short_channel_id: short_channel_id,
                        timestamp: chan.get_update_time_counter(),
-                       flags: (!were_node_one) as u16 | ((!chan.is_live() as u16) << 1),
+                       flags: (!were_node_one) as u8 | ((!chan.is_live() as u8) << 1),
                        cltv_expiry_delta: CLTV_EXPIRY_DELTA,
                        htlc_minimum_msat: chan.get_our_htlc_minimum_msat(),
+                       htlc_maximum_msat: OptionalField::Present(chan.get_announced_htlc_max_msat()),
                        fee_base_msat: chan.get_our_fee_base_msat(&self.fee_estimator),
                        fee_proportional_millionths: chan.get_fee_proportional_millionths(),
                        excess_data: Vec::new(),
@@ -1248,7 +1258,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                let err: Result<(), _> = loop {
                        let mut channel_lock = self.channel_state.lock().unwrap();
                        let id = match channel_lock.short_to_id.get(&path.first().unwrap().short_channel_id) {
-                               None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!"}),
+                               None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()}),
                                Some(id) => id.clone(),
                        };
 
@@ -1259,7 +1269,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                                return Err(APIError::RouteError{err: "Node ID mismatch on first hop!"});
                                        }
                                        if !chan.get().is_live() {
-                                               return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!"});
+                                               return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!".to_owned()});
                                        }
                                        break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute {
                                                path: path.clone(),
@@ -1821,6 +1831,44 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                } else { false }
        }
 
+       // Fail a list of HTLCs that were just freed from the holding cell. The HTLCs need to be
+       // failed backwards or, if they were one of our outgoing HTLCs, then their failure needs to
+       // be surfaced to the user.
+       fn fail_holding_cell_htlcs(&self, mut htlcs_to_fail: Vec<(HTLCSource, PaymentHash)>, channel_id: [u8; 32]) {
+               for (htlc_src, payment_hash) in htlcs_to_fail.drain(..) {
+                       match htlc_src {
+                               HTLCSource::PreviousHopData(HTLCPreviousHopData { .. }) => {
+                                       let (failure_code, onion_failure_data) =
+                                               match self.channel_state.lock().unwrap().by_id.entry(channel_id) {
+                                                       hash_map::Entry::Occupied(chan_entry) => {
+                                                               if let Ok(upd) = self.get_channel_update(&chan_entry.get()) {
+                                                                       (0x1000|7, upd.encode_with_len())
+                                                               } else {
+                                                                       (0x4000|10, Vec::new())
+                                                               }
+                                                       },
+                                                       hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new())
+                                               };
+                                       let channel_state = self.channel_state.lock().unwrap();
+                                       self.fail_htlc_backwards_internal(channel_state,
+                                               htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data});
+                               },
+                               HTLCSource::OutboundRoute { .. } => {
+                                       self.pending_events.lock().unwrap().push(
+                                               events::Event::PaymentFailed {
+                                                       payment_hash,
+                                                       rejected_by_dest: false,
+#[cfg(test)]
+                                                       error_code: None,
+#[cfg(test)]
+                                                       error_data: None,
+                                               }
+                                       )
+                               },
+                       };
+               }
+       }
+
        /// Fails an HTLC backwards to the sender of it to us.
        /// Note that while we take a channel_state lock as input, we do *not* assume consistency here.
        /// There are several callsites that do stupid things like loop over a list of payment_hashes
@@ -2215,7 +2263,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
 
        fn internal_open_channel(&self, their_node_id: &PublicKey, their_features: InitFeatures, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> {
                if msg.chain_hash != self.genesis_hash {
-                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash", msg.temporary_channel_id.clone()));
+                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), msg.temporary_channel_id.clone()));
                }
 
                let channel = Channel::new_from_req(&self.fee_estimator, &self.keys_manager, their_node_id.clone(), their_features, msg, 0, &self.default_configuration)
@@ -2223,7 +2271,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                let mut channel_state_lock = self.channel_state.lock().unwrap();
                let channel_state = &mut *channel_state_lock;
                match channel_state.by_id.entry(channel.channel_id()) {
-                       hash_map::Entry::Occupied(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision!", msg.temporary_channel_id.clone())),
+                       hash_map::Entry::Occupied(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision!".to_owned(), msg.temporary_channel_id.clone())),
                        hash_map::Entry::Vacant(entry) => {
                                channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
                                        node_id: their_node_id.clone(),
@@ -2242,12 +2290,12 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                        match channel_state.by_id.entry(msg.temporary_channel_id) {
                                hash_map::Entry::Occupied(mut chan) => {
                                        if chan.get().get_their_node_id() != *their_node_id {
-                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.temporary_channel_id));
+                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.temporary_channel_id));
                                        }
                                        try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration, their_features), channel_state, chan);
                                        (chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id())
                                },
-                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.temporary_channel_id))
+                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id))
                        }
                };
                let mut pending_events = self.pending_events.lock().unwrap();
@@ -2267,23 +2315,23 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                        match channel_state.by_id.entry(msg.temporary_channel_id.clone()) {
                                hash_map::Entry::Occupied(mut chan) => {
                                        if chan.get().get_their_node_id() != *their_node_id {
-                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.temporary_channel_id));
+                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.temporary_channel_id));
                                        }
                                        (try_chan_entry!(self, chan.get_mut().funding_created(msg, &self.logger), channel_state, chan), chan.remove())
                                },
-                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.temporary_channel_id))
+                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id))
                        }
                };
                // Because we have exclusive ownership of the channel here we can release the channel_state
                // lock before add_monitor
-               if let Err(e) = self.monitor.add_monitor(monitor_update.get_funding_txo(), monitor_update) {
+               if let Err(e) = self.monitor.add_monitor(monitor_update.get_funding_txo().0, monitor_update) {
                        match e {
                                ChannelMonitorUpdateErr::PermanentFailure => {
                                        // Note that we reply with the new channel_id in error messages if we gave up on the
                                        // channel, not the temporary_channel_id. This is compatible with ourselves, but the
                                        // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
                                        // any messages referencing a previously-closed channel anyway.
-                                       return Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", funding_msg.channel_id, chan.force_shutdown(true), None));
+                                       return Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), funding_msg.channel_id, chan.force_shutdown(true), None));
                                },
                                ChannelMonitorUpdateErr::TemporaryFailure => {
                                        // There's no problem signing a counterparty's funding transaction if our monitor
@@ -2298,7 +2346,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                let channel_state = &mut *channel_state_lock;
                match channel_state.by_id.entry(funding_msg.channel_id) {
                        hash_map::Entry::Occupied(_) => {
-                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id", funding_msg.channel_id))
+                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id".to_owned(), funding_msg.channel_id))
                        },
                        hash_map::Entry::Vacant(e) => {
                                channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
@@ -2318,7 +2366,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                        match channel_state.by_id.entry(msg.channel_id) {
                                hash_map::Entry::Occupied(mut chan) => {
                                        if chan.get().get_their_node_id() != *their_node_id {
-                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
+                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                        }
                                        let monitor = match chan.get_mut().funding_signed(&msg, &self.logger) {
                                                Ok(update) => update,
@@ -2329,7 +2377,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                        }
                                        (chan.get().get_funding_txo().unwrap(), chan.get().get_user_id())
                                },
-                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
+                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                        }
                };
                let mut pending_events = self.pending_events.lock().unwrap();
@@ -2346,7 +2394,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                match channel_state.by_id.entry(msg.channel_id) {
                        hash_map::Entry::Occupied(mut chan) => {
                                if chan.get().get_their_node_id() != *their_node_id {
-                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
+                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                }
                                try_chan_entry!(self, chan.get_mut().funding_locked(&msg), channel_state, chan);
                                if let Some(announcement_sigs) = self.get_announcement_sigs(chan.get()) {
@@ -2367,7 +2415,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                }
                                Ok(())
                        },
-                       hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
+                       hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                }
        }
 
@@ -2379,7 +2427,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                        match channel_state.by_id.entry(msg.channel_id.clone()) {
                                hash_map::Entry::Occupied(mut chan_entry) => {
                                        if chan_entry.get().get_their_node_id() != *their_node_id {
-                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
+                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                        }
                                        let (shutdown, closing_signed, dropped_htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.fee_estimator, &msg), channel_state, chan_entry);
                                        if let Some(msg) = shutdown {
@@ -2401,7 +2449,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                                (dropped_htlcs, Some(chan_entry.remove_entry().1))
                                        } else { (dropped_htlcs, None) }
                                },
-                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
+                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                        }
                };
                for htlc_source in dropped_htlcs.drain(..) {
@@ -2425,7 +2473,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                        match channel_state.by_id.entry(msg.channel_id.clone()) {
                                hash_map::Entry::Occupied(mut chan_entry) => {
                                        if chan_entry.get().get_their_node_id() != *their_node_id {
-                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
+                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                        }
                                        let (closing_signed, tx) = try_chan_entry!(self, chan_entry.get_mut().closing_signed(&self.fee_estimator, &msg), channel_state, chan_entry);
                                        if let Some(msg) = closing_signed {
@@ -2446,7 +2494,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                                (tx, Some(chan_entry.remove_entry().1))
                                        } else { (tx, None) }
                                },
-                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
+                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                        }
                };
                if let Some(broadcast_tx) = tx {
@@ -2474,49 +2522,57 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                //encrypted with the same key. It's not immediately obvious how to usefully exploit that,
                //but we should prevent it anyway.
 
-               let (mut pending_forward_info, mut channel_state_lock) = self.decode_update_add_htlc_onion(msg);
+               let (pending_forward_info, mut channel_state_lock) = self.decode_update_add_htlc_onion(msg);
                let channel_state = &mut *channel_state_lock;
 
                match channel_state.by_id.entry(msg.channel_id) {
                        hash_map::Entry::Occupied(mut chan) => {
                                if chan.get().get_their_node_id() != *their_node_id {
-                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
+                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                }
-                               if !chan.get().is_usable() {
+
+                               let create_pending_htlc_status = |chan: &Channel<ChanSigner>, pending_forward_info: PendingHTLCStatus, error_code: u16| {
+                                       // Ensure error_code has the UPDATE flag set, since by default we send a
+                                       // channel update along as part of failing the HTLC.
+                                       assert!((error_code & 0x1000) != 0);
                                        // If the update_add is completely bogus, the call will Err and we will close,
                                        // but if we've sent a shutdown and they haven't acknowledged it yet, we just
                                        // want to reject the new HTLC and fail it backwards instead of forwarding.
-                                       if let PendingHTLCStatus::Forward(PendingHTLCInfo { incoming_shared_secret, .. }) = pending_forward_info {
-                                               let chan_update = self.get_channel_update(chan.get());
-                                               pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
-                                                       channel_id: msg.channel_id,
-                                                       htlc_id: msg.htlc_id,
-                                                       reason: if let Ok(update) = chan_update {
-                                                               // TODO: Note that |20 is defined as "channel FROM the processing
-                                                               // node has been disabled" (emphasis mine), which seems to imply
-                                                               // that we can't return |20 for an inbound channel being disabled.
-                                                               // This probably needs a spec update but should definitely be
-                                                               // allowed.
-                                                               onion_utils::build_first_hop_failure_packet(&incoming_shared_secret, 0x1000|20, &{
+                                       match pending_forward_info {
+                                               PendingHTLCStatus::Forward(PendingHTLCInfo { ref incoming_shared_secret, .. }) => {
+                                                       let reason = if let Ok(upd) = self.get_channel_update(chan) {
+                                                               onion_utils::build_first_hop_failure_packet(incoming_shared_secret, error_code, &{
                                                                        let mut res = Vec::with_capacity(8 + 128);
-                                                                       res.extend_from_slice(&byte_utils::be16_to_array(update.contents.flags));
-                                                                       res.extend_from_slice(&update.encode_with_len()[..]);
+                                                                       // TODO: underspecified, follow https://github.com/lightningnetwork/lightning-rfc/issues/791
+                                                                       res.extend_from_slice(&byte_utils::be16_to_array(0));
+                                                                       res.extend_from_slice(&upd.encode_with_len()[..]);
                                                                        res
                                                                }[..])
                                                        } else {
-                                                               // This can only happen if the channel isn't in the fully-funded
-                                                               // state yet, implying our counterparty is trying to route payments
-                                                               // over the channel back to themselves (cause no one else should
-                                                               // know the short_id is a lightning channel yet). We should have no
-                                                               // problem just calling this unknown_next_peer
-                                                               onion_utils::build_first_hop_failure_packet(&incoming_shared_secret, 0x4000|10, &[])
-                                                       },
-                                               }));
+                                                               // The only case where we'd be unable to
+                                                               // successfully get a channel update is if the
+                                                               // channel isn't in the fully-funded state yet,
+                                                               // implying our counterparty is trying to route
+                                                               // payments over the channel back to themselves
+                                                               // (cause no one else should know the short_id
+                                                               // is a lightning channel yet). We should have
+                                                               // no problem just calling this
+                                                               // unknown_next_peer (0x4000|10).
+                                                               onion_utils::build_first_hop_failure_packet(incoming_shared_secret, 0x4000|10, &[])
+                                                       };
+                                                       let msg = msgs::UpdateFailHTLC {
+                                                               channel_id: msg.channel_id,
+                                                               htlc_id: msg.htlc_id,
+                                                               reason
+                                                       };
+                                                       PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg))
+                                               },
+                                               _ => pending_forward_info
                                        }
-                               }
-                               try_chan_entry!(self, chan.get_mut().update_add_htlc(&msg, pending_forward_info), channel_state, chan);
+                               };
+                               try_chan_entry!(self, chan.get_mut().update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.logger), channel_state, chan);
                        },
-                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
+                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                }
                Ok(())
        }
@@ -2528,11 +2584,11 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                        match channel_state.by_id.entry(msg.channel_id) {
                                hash_map::Entry::Occupied(mut chan) => {
                                        if chan.get().get_their_node_id() != *their_node_id {
-                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
+                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                        }
                                        try_chan_entry!(self, chan.get_mut().update_fulfill_htlc(&msg), channel_state, chan)
                                },
-                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
+                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                        }
                };
                self.claim_funds_internal(channel_lock, htlc_source, msg.payment_preimage.clone());
@@ -2545,11 +2601,11 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                match channel_state.by_id.entry(msg.channel_id) {
                        hash_map::Entry::Occupied(mut chan) => {
                                if chan.get().get_their_node_id() != *their_node_id {
-                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
+                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                }
                                try_chan_entry!(self, chan.get_mut().update_fail_htlc(&msg, HTLCFailReason::LightningError { err: msg.reason.clone() }), channel_state, chan);
                        },
-                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
+                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                }
                Ok(())
        }
@@ -2560,16 +2616,16 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                match channel_state.by_id.entry(msg.channel_id) {
                        hash_map::Entry::Occupied(mut chan) => {
                                if chan.get().get_their_node_id() != *their_node_id {
-                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
+                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                }
                                if (msg.failure_code & 0x8000) == 0 {
-                                       let chan_err: ChannelError = ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set");
+                                       let chan_err: ChannelError = ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set".to_owned());
                                        try_chan_entry!(self, Err(chan_err), channel_state, chan);
                                }
                                try_chan_entry!(self, chan.get_mut().update_fail_malformed_htlc(&msg, HTLCFailReason::Reason { failure_code: msg.failure_code, data: Vec::new() }), channel_state, chan);
                                Ok(())
                        },
-                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
+                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                }
        }
 
@@ -2579,7 +2635,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                match channel_state.by_id.entry(msg.channel_id) {
                        hash_map::Entry::Occupied(mut chan) => {
                                if chan.get().get_their_node_id() != *their_node_id {
-                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
+                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                }
                                let (revoke_and_ack, commitment_signed, closing_signed, monitor_update) =
                                        match chan.get_mut().commitment_signed(&msg, &self.fee_estimator, &self.logger) {
@@ -2621,7 +2677,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                }
                                Ok(())
                        },
-                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
+                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                }
        }
 
@@ -2661,23 +2717,27 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
        }
 
        fn internal_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), MsgHandleErrInternal> {
-               let (pending_forwards, mut pending_failures, short_channel_id) = {
+               let mut htlcs_to_fail = Vec::new();
+               let res = loop {
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
                        match channel_state.by_id.entry(msg.channel_id) {
                                hash_map::Entry::Occupied(mut chan) => {
                                        if chan.get().get_their_node_id() != *their_node_id {
-                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
+                                               break Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                        }
                                        let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update();
-                                       let (commitment_update, pending_forwards, pending_failures, closing_signed, monitor_update) =
-                                               try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.fee_estimator, &self.logger), channel_state, chan);
+                                       let (commitment_update, pending_forwards, pending_failures, closing_signed, monitor_update, htlcs_to_fail_in) =
+                                               break_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.fee_estimator, &self.logger), channel_state, chan);
+                                       htlcs_to_fail = htlcs_to_fail_in;
                                        if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) {
                                                if was_frozen_for_monitor {
                                                        assert!(commitment_update.is_none() && closing_signed.is_none() && pending_forwards.is_empty() && pending_failures.is_empty());
-                                                       return Err(MsgHandleErrInternal::ignore_no_close("Previous monitor update failure prevented responses to RAA"));
+                                                       break Err(MsgHandleErrInternal::ignore_no_close("Previous monitor update failure prevented responses to RAA".to_owned()));
                                                } else {
-                                                       return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, commitment_update.is_some(), pending_forwards, pending_failures);
+                                                       if let Err(e) = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, commitment_update.is_some(), pending_forwards, pending_failures) {
+                                                               break Err(e);
+                                                       } else { unreachable!(); }
                                                }
                                        }
                                        if let Some(updates) = commitment_update {
@@ -2692,17 +2752,22 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                                        msg,
                                                });
                                        }
-                                       (pending_forwards, pending_failures, chan.get().get_short_channel_id().expect("RAA should only work on a short-id-available channel"))
+                                       break Ok((pending_forwards, pending_failures, chan.get().get_short_channel_id().expect("RAA should only work on a short-id-available channel")))
                                },
-                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
+                               hash_map::Entry::Vacant(_) => break Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                        }
                };
-               for failure in pending_failures.drain(..) {
-                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
+               self.fail_holding_cell_htlcs(htlcs_to_fail, msg.channel_id);
+               match res {
+                       Ok((pending_forwards, mut pending_failures, short_channel_id)) => {
+                               for failure in pending_failures.drain(..) {
+                                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
+                               }
+                               self.forward_htlcs(&mut [(short_channel_id, pending_forwards)]);
+                               Ok(())
+                       },
+                       Err(e) => Err(e)
                }
-               self.forward_htlcs(&mut [(short_channel_id, pending_forwards)]);
-
-               Ok(())
        }
 
        fn internal_update_fee(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), MsgHandleErrInternal> {
@@ -2711,11 +2776,11 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                match channel_state.by_id.entry(msg.channel_id) {
                        hash_map::Entry::Occupied(mut chan) => {
                                if chan.get().get_their_node_id() != *their_node_id {
-                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
+                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                }
                                try_chan_entry!(self, chan.get_mut().update_fee(&self.fee_estimator, &msg), channel_state, chan);
                        },
-                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
+                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                }
                Ok(())
        }
@@ -2727,10 +2792,10 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                match channel_state.by_id.entry(msg.channel_id) {
                        hash_map::Entry::Occupied(mut chan) => {
                                if chan.get().get_their_node_id() != *their_node_id {
-                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
+                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                }
                                if !chan.get().is_usable() {
-                                       return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it", action: msgs::ErrorAction::IgnoreError}));
+                                       return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it".to_owned(), action: msgs::ErrorAction::IgnoreError}));
                                }
 
                                let our_node_id = self.get_our_node_id();
@@ -2739,10 +2804,21 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
 
                                let were_node_one = announcement.node_id_1 == our_node_id;
                                let msghash = hash_to_message!(&Sha256dHash::hash(&announcement.encode()[..])[..]);
-                               if self.secp_ctx.verify(&msghash, &msg.node_signature, if were_node_one { &announcement.node_id_2 } else { &announcement.node_id_1 }).is_err() ||
-                                               self.secp_ctx.verify(&msghash, &msg.bitcoin_signature, if were_node_one { &announcement.bitcoin_key_2 } else { &announcement.bitcoin_key_1 }).is_err() {
-                                       let chan_err: ChannelError = ChannelError::Close("Bad announcement_signatures node_signature");
-                                       try_chan_entry!(self, Err(chan_err), channel_state, chan);
+                               {
+                                       let their_node_key = if were_node_one { &announcement.node_id_2 } else { &announcement.node_id_1 };
+                                       let their_bitcoin_key = if were_node_one { &announcement.bitcoin_key_2 } else { &announcement.bitcoin_key_1 };
+                                       match (self.secp_ctx.verify(&msghash, &msg.node_signature, their_node_key),
+                                                  self.secp_ctx.verify(&msghash, &msg.bitcoin_signature, their_bitcoin_key)) {
+                                               (Err(e), _) => {
+                                                       let chan_err: ChannelError = ChannelError::Close(format!("Bad announcement_signatures. Failed to verify node_signature: {:?}. Maybe using different node_secret for transport and routing msg? UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}", e, &announcement, their_node_key));
+                                                       try_chan_entry!(self, Err(chan_err), channel_state, chan);
+                                               },
+                                               (_, Err(e)) => {
+                                                       let chan_err: ChannelError = ChannelError::Close(format!("Bad announcement_signatures. Failed to verify bitcoin_signature: {:?}. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})", e, &announcement, their_bitcoin_key));
+                                                       try_chan_entry!(self, Err(chan_err), channel_state, chan);
+                                               },
+                                               _ => {}
+                                       }
                                }
 
                                let our_node_sig = self.secp_ctx.sign(&msghash, &self.our_network_key);
@@ -2758,7 +2834,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                        update_msg: self.get_channel_update(chan.get()).unwrap(), // can only fail if we're not in a ready state
                                });
                        },
-                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
+                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                }
                Ok(())
        }
@@ -2770,8 +2846,12 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                match channel_state.by_id.entry(msg.channel_id) {
                        hash_map::Entry::Occupied(mut chan) => {
                                if chan.get().get_their_node_id() != *their_node_id {
-                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
+                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                }
+                               // Currently, we expect all holding cell update_adds to be dropped on peer
+                               // disconnect, so Channel's reestablish will never hand us any holding cell
+                               // freed HTLCs to fail backwards. If in the future we no longer drop pending
+                               // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here.
                                let (funding_locked, revoke_and_ack, commitment_update, monitor_update_opt, mut order, shutdown) =
                                        try_chan_entry!(self, chan.get_mut().channel_reestablish(msg, &self.logger), channel_state, chan);
                                if let Some(monitor_update) = monitor_update_opt {
@@ -2829,7 +2909,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                }
                                Ok(())
                        },
-                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
+                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                }
        }
 
@@ -2838,7 +2918,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
        /// PeerManager::process_events afterwards.
        /// Note: This API is likely to change!
        #[doc(hidden)]
-       pub fn update_fee(&self, channel_id: [u8;32], feerate_per_kw: u64) -> Result<(), APIError> {
+       pub fn update_fee(&self, channel_id: [u8;32], feerate_per_kw: u32) -> Result<(), APIError> {
                let _ = self.total_consistency_lock.read().unwrap();
                let their_node_id;
                let err: Result<(), _> = loop {
@@ -2846,16 +2926,16 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                        let channel_state = &mut *channel_state_lock;
 
                        match channel_state.by_id.entry(channel_id) {
-                               hash_map::Entry::Vacant(_) => return Err(APIError::APIMisuseError{err: "Failed to find corresponding channel"}),
+                               hash_map::Entry::Vacant(_) => return Err(APIError::APIMisuseError{err: format!("Failed to find corresponding channel for id {}", channel_id.to_hex())}),
                                hash_map::Entry::Occupied(mut chan) => {
                                        if !chan.get().is_outbound() {
-                                               return Err(APIError::APIMisuseError{err: "update_fee cannot be sent for an inbound channel"});
+                                               return Err(APIError::APIMisuseError{err: "update_fee cannot be sent for an inbound channel".to_owned()});
                                        }
                                        if chan.get().is_awaiting_monitor_update() {
                                                return Err(APIError::MonitorUpdateFailed);
                                        }
                                        if !chan.get().is_live() {
-                                               return Err(APIError::ChannelUnavailable{err: "Channel is either not yet fully established or peer is currently disconnected"});
+                                               return Err(APIError::ChannelUnavailable{err: "Channel is either not yet fully established or peer is currently disconnected".to_owned()});
                                        }
                                        their_node_id = chan.get().get_their_node_id();
                                        if let Some((update_fee, commitment_signed, monitor_update)) =
@@ -2889,7 +2969,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
 }
 
 impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> events::MessageSendEventsProvider for ChannelManager<ChanSigner, M, T, K, F, L>
-       where M::Target: ManyChannelMonitor<ChanSigner>,
+       where M::Target: ManyChannelMonitor<Keys=ChanSigner>,
         T::Target: BroadcasterInterface,
         K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
         F::Target: FeeEstimator,
@@ -2920,7 +3000,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
 }
 
 impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> events::EventsProvider for ChannelManager<ChanSigner, M, T, K, F, L>
-       where M::Target: ManyChannelMonitor<ChanSigner>,
+       where M::Target: ManyChannelMonitor<Keys=ChanSigner>,
         T::Target: BroadcasterInterface,
         K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
         F::Target: FeeEstimator,
@@ -2952,13 +3032,13 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
 
 impl<ChanSigner: ChannelKeys, M: Deref + Sync + Send, T: Deref + Sync + Send, K: Deref + Sync + Send, F: Deref + Sync + Send, L: Deref + Sync + Send>
        ChainListener for ChannelManager<ChanSigner, M, T, K, F, L>
-       where M::Target: ManyChannelMonitor<ChanSigner>,
+       where M::Target: ManyChannelMonitor<Keys=ChanSigner>,
         T::Target: BroadcasterInterface,
         K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
         F::Target: FeeEstimator,
                                L::Target: Logger,
 {
-       fn block_connected(&self, header: &BlockHeader, height: u32, txn_matched: &[&Transaction], indexes_of_txn_matched: &[u32]) {
+       fn block_connected(&self, header: &BlockHeader, height: u32, txn_matched: &[&Transaction], indexes_of_txn_matched: &[usize]) {
                let header_hash = header.bitcoin_hash();
                log_trace!(self.logger, "Block {} at height {} connected with {} txn matched", header_hash, height, txn_matched.len());
                let _ = self.total_consistency_lock.read().unwrap();
@@ -3120,7 +3200,7 @@ impl<ChanSigner: ChannelKeys, M: Deref + Sync + Send, T: Deref + Sync + Send, K:
 
 impl<ChanSigner: ChannelKeys, M: Deref + Sync + Send, T: Deref + Sync + Send, K: Deref + Sync + Send, F: Deref + Sync + Send, L: Deref + Sync + Send>
        ChannelMessageHandler for ChannelManager<ChanSigner, M, T, K, F, L>
-       where M::Target: ManyChannelMonitor<ChanSigner>,
+       where M::Target: ManyChannelMonitor<Keys=ChanSigner>,
         T::Target: BroadcasterInterface,
         K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
         F::Target: FeeEstimator,
@@ -3238,6 +3318,10 @@ impl<ChanSigner: ChannelKeys, M: Deref + Sync + Send, T: Deref + Sync + Send, K:
                                log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates", log_pubkey!(their_node_id));
                                channel_state.by_id.retain(|_, chan| {
                                        if chan.get_their_node_id() == *their_node_id {
+                                               // Note that currently on channel reestablish we assert that there are no
+                                               // holding cell add-HTLCs, so if in the future we stop removing uncommitted HTLCs
+                                               // on peer disconnect here, there will need to be corresponding changes in
+                                               // reestablish logic.
                                                let failed_adds = chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
                                                chan.to_disabled_marked();
                                                if !failed_adds.is_empty() {
@@ -3559,7 +3643,7 @@ impl Readable for HTLCForwardInfo {
 }
 
 impl<ChanSigner: ChannelKeys + Writeable, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable for ChannelManager<ChanSigner, M, T, K, F, L>
-       where M::Target: ManyChannelMonitor<ChanSigner>,
+       where M::Target: ManyChannelMonitor<Keys=ChanSigner>,
         T::Target: BroadcasterInterface,
         K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
         F::Target: FeeEstimator,
@@ -3643,7 +3727,7 @@ impl<ChanSigner: ChannelKeys + Writeable, M: Deref, T: Deref, K: Deref, F: Deref
 /// 6) Disconnect/connect blocks on the ChannelManager.
 /// 7) Register the new ChannelManager with your ChainWatchInterface.
 pub struct ChannelManagerReadArgs<'a, ChanSigner: 'a + ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
-       where M::Target: ManyChannelMonitor<ChanSigner>,
+       where M::Target: ManyChannelMonitor<Keys=ChanSigner>,
         T::Target: BroadcasterInterface,
         K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
         F::Target: FeeEstimator,
@@ -3693,7 +3777,7 @@ pub struct ChannelManagerReadArgs<'a, ChanSigner: 'a + ChannelKeys, M: Deref, T:
 // SipmleArcChannelManager type:
 impl<'a, ChanSigner: ChannelKeys + Readable, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
        ReadableArgs<ChannelManagerReadArgs<'a, ChanSigner, M, T, K, F, L>> for (BlockHash, Arc<ChannelManager<ChanSigner, M, T, K, F, L>>)
-       where M::Target: ManyChannelMonitor<ChanSigner>,
+       where M::Target: ManyChannelMonitor<Keys=ChanSigner>,
         T::Target: BroadcasterInterface,
         K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
         F::Target: FeeEstimator,
@@ -3707,7 +3791,7 @@ impl<'a, ChanSigner: ChannelKeys + Readable, M: Deref, T: Deref, K: Deref, F: De
 
 impl<'a, ChanSigner: ChannelKeys + Readable, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
        ReadableArgs<ChannelManagerReadArgs<'a, ChanSigner, M, T, K, F, L>> for (BlockHash, ChannelManager<ChanSigner, M, T, K, F, L>)
-       where M::Target: ManyChannelMonitor<ChanSigner>,
+       where M::Target: ManyChannelMonitor<Keys=ChanSigner>,
         T::Target: BroadcasterInterface,
         K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
         F::Target: FeeEstimator,