Implement `VecReadWrapper` for `MaybeReadable`
[rust-lightning] / lightning / src / ln / channelmanager.rs
index ad9a7e2955494cd53a95a63542a22d42539c7cc7..38e2172ed409c0eb684c5b0b1290820c47c23c49 100644 (file)
@@ -37,7 +37,7 @@ use bitcoin::secp256k1;
 
 use chain;
 use chain::{Confirm, Watch, BestBlock};
-use chain::chaininterface::{BroadcasterInterface, FeeEstimator};
+use chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator};
 use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, ChannelMonitorUpdateErr, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID};
 use chain::transaction::{OutPoint, TransactionData};
 // Since this struct is returned in `list_channels` methods, expose it here in case users want to
@@ -60,17 +60,17 @@ use util::chacha20::{ChaCha20, ChaChaReader};
 use util::logger::{Logger, Level};
 use util::errors::APIError;
 
+use io;
 use prelude::*;
 use core::{cmp, mem};
 use core::cell::RefCell;
-use std::io::{Cursor, Read};
+use io::{Cursor, Read};
 use sync::{Arc, Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard};
 use core::sync::atomic::{AtomicUsize, Ordering};
 use core::time::Duration;
 #[cfg(any(test, feature = "allow_wallclock_use"))]
 use std::time::Instant;
 use core::ops::Deref;
-use bitcoin::hashes::hex::ToHex;
 
 // We hold various information about HTLC relay in the HTLC objects in Channel itself:
 //
@@ -490,6 +490,8 @@ pub struct ChannelManager<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref,
        /// Because adding or removing an entry is rare, we usually take an outer read lock and then
        /// operate on the inner value freely. Sadly, this prevents parallel operation when opening a
        /// new channel.
+       ///
+       /// If also holding `channel_state` lock, must lock `channel_state` prior to `per_peer_state`.
        per_peer_state: RwLock<HashMap<PublicKey, Mutex<PeerState>>>,
 
        pending_events: Mutex<Vec<events::Event>>,
@@ -870,6 +872,18 @@ macro_rules! try_chan_entry {
        }
 }
 
+macro_rules! remove_channel {
+       ($channel_state: expr, $entry: expr) => {
+               {
+                       let channel = $entry.remove_entry().1;
+                       if let Some(short_id) = channel.get_short_channel_id() {
+                               $channel_state.short_to_id.remove(&short_id);
+                       }
+                       channel
+               }
+       }
+}
+
 macro_rules! handle_monitor_err {
        ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
                handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, Vec::new(), Vec::new())
@@ -1164,8 +1178,18 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        return Err(APIError::APIMisuseError { err: format!("Channel value must be at least 1000 satoshis. It was {}", channel_value_satoshis) });
                }
 
-               let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration };
-               let channel = Channel::new_outbound(&self.fee_estimator, &self.keys_manager, their_network_key, channel_value_satoshis, push_msat, user_id, config)?;
+               let channel = {
+                       let per_peer_state = self.per_peer_state.read().unwrap();
+                       match per_peer_state.get(&their_network_key) {
+                               Some(peer_state) => {
+                                       let peer_state = peer_state.lock().unwrap();
+                                       let their_features = &peer_state.latest_features;
+                                       let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration };
+                                       Channel::new_outbound(&self.fee_estimator, &self.keys_manager, their_network_key, their_features, channel_value_satoshis, push_msat, user_id, config)?
+                               },
+                               None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", their_network_key) }),
+                       }
+               };
                let res = channel.get_open_channel(self.genesis_hash.clone());
 
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
@@ -1259,40 +1283,61 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        pub fn close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 
-               let (mut failed_htlcs, chan_option) = {
+               let counterparty_node_id;
+               let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>;
+               let result: Result<(), _> = loop {
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
                        match channel_state.by_id.entry(channel_id.clone()) {
                                hash_map::Entry::Occupied(mut chan_entry) => {
-                                       let (shutdown_msg, failed_htlcs) = chan_entry.get_mut().get_shutdown()?;
+                                       counterparty_node_id = chan_entry.get().get_counterparty_node_id();
+                                       let per_peer_state = self.per_peer_state.read().unwrap();
+                                       let (shutdown_msg, monitor_update, htlcs) = match per_peer_state.get(&counterparty_node_id) {
+                                               Some(peer_state) => {
+                                                       let peer_state = peer_state.lock().unwrap();
+                                                       let their_features = &peer_state.latest_features;
+                                                       chan_entry.get_mut().get_shutdown(&self.keys_manager, their_features)?
+                                               },
+                                               None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", counterparty_node_id) }),
+                                       };
+                                       failed_htlcs = htlcs;
+
+                                       // Update the monitor with the shutdown script if necessary.
+                                       if let Some(monitor_update) = monitor_update {
+                                               if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) {
+                                                       let (result, is_permanent) =
+                                                               handle_monitor_err!(self, e, channel_state.short_to_id, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, false, false, Vec::new(), Vec::new(), chan_entry.key());
+                                                       if is_permanent {
+                                                               remove_channel!(channel_state, chan_entry);
+                                                               break result;
+                                                       }
+                                               }
+                                       }
+
                                        channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
-                                               node_id: chan_entry.get().get_counterparty_node_id(),
+                                               node_id: counterparty_node_id,
                                                msg: shutdown_msg
                                        });
+
                                        if chan_entry.get().is_shutdown() {
-                                               if let Some(short_id) = chan_entry.get().get_short_channel_id() {
-                                                       channel_state.short_to_id.remove(&short_id);
+                                               let channel = remove_channel!(channel_state, chan_entry);
+                                               if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) {
+                                                       channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                                               msg: channel_update
+                                                       });
                                                }
-                                               (failed_htlcs, Some(chan_entry.remove_entry().1))
-                                       } else { (failed_htlcs, None) }
+                                       }
+                                       break Ok(());
                                },
                                hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()})
                        }
                };
+
                for htlc_source in failed_htlcs.drain(..) {
                        self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
                }
-               let chan_update = if let Some(chan) = chan_option {
-                       self.get_channel_update_for_broadcast(&chan).ok()
-               } else { None };
-
-               if let Some(update) = chan_update {
-                       let mut channel_state = self.channel_state.lock().unwrap();
-                       channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
-                               msg: update
-                       });
-               }
 
+               let _ = handle_error!(self, result, counterparty_node_id);
                Ok(())
        }
 
@@ -1432,8 +1477,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
 
                let mut chacha = ChaCha20::new(&rho, &[0u8; 8]);
                let mut chacha_stream = ChaChaReader { chacha: &mut chacha, read: Cursor::new(&msg.onion_routing_packet.hop_data[..]) };
-               let (next_hop_data, next_hop_hmac) = {
-                       match msgs::OnionHopData::read(&mut chacha_stream) {
+               let (next_hop_data, next_hop_hmac): (msgs::OnionHopData, _) = {
+                       match <msgs::OnionHopData as Readable>::read(&mut chacha_stream) {
                                Err(err) => {
                                        let error_code = match err {
                                                msgs::DecodeError::UnknownVersion => 0x4000 | 1, // unknown realm byte
@@ -1857,6 +1902,9 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        // for now more than 10 paths likely carries too much one-path failure.
                        return Err(PaymentSendFailure::ParameterError(APIError::RouteError{err: "Sending over more than 10 paths is not currently supported"}));
                }
+               if payment_secret.is_none() && route.paths.len() > 1 {
+                       return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError{err: "Payment secret is required for multi-path payments".to_string()}));
+               }
                let mut total_value = 0;
                let our_node_id = self.get_our_node_id();
                let mut path_errs = Vec::with_capacity(route.paths.len());
@@ -1911,9 +1959,13 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        /// would be able to guess -- otherwise, an intermediate node may claim the payment and it will
        /// never reach the recipient.
        ///
+       /// See [`send_payment`] documentation for more details on the return value of this function.
+       ///
        /// Similar to regular payments, you MUST NOT reuse a `payment_preimage` value. See
        /// [`send_payment`] for more information about the risks of duplicate preimage usage.
        ///
+       /// Note that `route` must have exactly one path.
+       ///
        /// [`send_payment`]: Self::send_payment
        pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option<PaymentPreimage>) -> Result<PaymentHash, PaymentSendFailure> {
                let preimage = match payment_preimage {
@@ -2508,46 +2560,151 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                self.process_background_events();
        }
 
-       /// If a peer is disconnected we mark any channels with that peer as 'disabled'.
-       /// After some time, if channels are still disabled we need to broadcast a ChannelUpdate
-       /// to inform the network about the uselessness of these channels.
+       fn update_channel_fee(&self, short_to_id: &mut HashMap<u64, [u8; 32]>, pending_msg_events: &mut Vec<events::MessageSendEvent>, chan_id: &[u8; 32], chan: &mut Channel<Signer>, new_feerate: u32) -> (bool, NotifyOption, Result<(), MsgHandleErrInternal>) {
+               if !chan.is_outbound() { return (true, NotifyOption::SkipPersist, Ok(())); }
+               // If the feerate has decreased by less than half, don't bother
+               if new_feerate <= chan.get_feerate() && new_feerate * 2 > chan.get_feerate() {
+                       log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {}.",
+                               log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate);
+                       return (true, NotifyOption::SkipPersist, Ok(()));
+               }
+               if !chan.is_live() {
+                       log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).",
+                               log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate);
+                       return (true, NotifyOption::SkipPersist, Ok(()));
+               }
+               log_trace!(self.logger, "Channel {} qualifies for a feerate change from {} to {}.",
+                       log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate);
+
+               let mut retain_channel = true;
+               let res = match chan.send_update_fee_and_commit(new_feerate, &self.logger) {
+                       Ok(res) => Ok(res),
+                       Err(e) => {
+                               let (drop, res) = convert_chan_err!(self, e, short_to_id, chan, chan_id);
+                               if drop { retain_channel = false; }
+                               Err(res)
+                       }
+               };
+               let ret_err = match res {
+                       Ok(Some((update_fee, commitment_signed, monitor_update))) => {
+                               if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) {
+                                       let (res, drop) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, false, true, Vec::new(), Vec::new(), chan_id);
+                                       if drop { retain_channel = false; }
+                                       res
+                               } else {
+                                       pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+                                               node_id: chan.get_counterparty_node_id(),
+                                               updates: msgs::CommitmentUpdate {
+                                                       update_add_htlcs: Vec::new(),
+                                                       update_fulfill_htlcs: Vec::new(),
+                                                       update_fail_htlcs: Vec::new(),
+                                                       update_fail_malformed_htlcs: Vec::new(),
+                                                       update_fee: Some(update_fee),
+                                                       commitment_signed,
+                                               },
+                                       });
+                                       Ok(())
+                               }
+                       },
+                       Ok(None) => Ok(()),
+                       Err(e) => Err(e),
+               };
+               (retain_channel, NotifyOption::DoPersist, ret_err)
+       }
+
+       #[cfg(fuzzing)]
+       /// In chanmon_consistency we want to sometimes do the channel fee updates done in
+       /// timer_tick_occurred, but we can't generate the disabled channel updates as it considers
+       /// these a fuzz failure (as they usually indicate a channel force-close, which is exactly what
+       /// it wants to detect). Thus, we have a variant exposed here for its benefit.
+       pub fn maybe_update_chan_fees(&self) {
+               PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
+                       let mut should_persist = NotifyOption::SkipPersist;
+
+                       let new_feerate = self.fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);
+
+                       let mut handle_errors = Vec::new();
+                       {
+                               let mut channel_state_lock = self.channel_state.lock().unwrap();
+                               let channel_state = &mut *channel_state_lock;
+                               let pending_msg_events = &mut channel_state.pending_msg_events;
+                               let short_to_id = &mut channel_state.short_to_id;
+                               channel_state.by_id.retain(|chan_id, chan| {
+                                       let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_id, pending_msg_events, chan_id, chan, new_feerate);
+                                       if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
+                                       if err.is_err() {
+                                               handle_errors.push(err);
+                                       }
+                                       retain_channel
+                               });
+                       }
+
+                       should_persist
+               });
+       }
+
+       /// Performs actions which should happen on startup and roughly once per minute thereafter.
        ///
-       /// This method handles all the details, and must be called roughly once per minute.
+       /// This currently includes:
+       ///  * Increasing or decreasing the on-chain feerate estimates for our outbound channels,
+       ///  * Broadcasting `ChannelUpdate` messages if we've been disconnected from our peer for more
+       ///    than a minute, informing the network that they should no longer attempt to route over
+       ///    the channel.
        ///
-       /// Note that in some rare cases this may generate a `chain::Watch::update_channel` call.
+       /// Note that this may cause reentrancy through `chain::Watch::update_channel` calls or feerate
+       /// estimate fetches.
        pub fn timer_tick_occurred(&self) {
                PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
                        let mut should_persist = NotifyOption::SkipPersist;
                        if self.process_background_events() { should_persist = NotifyOption::DoPersist; }
 
-                       let mut channel_state_lock = self.channel_state.lock().unwrap();
-                       let channel_state = &mut *channel_state_lock;
-                       for (_, chan) in channel_state.by_id.iter_mut() {
-                               match chan.channel_update_status() {
-                                       ChannelUpdateStatus::Enabled if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged),
-                                       ChannelUpdateStatus::Disabled if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged),
-                                       ChannelUpdateStatus::DisabledStaged if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Enabled),
-                                       ChannelUpdateStatus::EnabledStaged if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Disabled),
-                                       ChannelUpdateStatus::DisabledStaged if !chan.is_live() => {
-                                               if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
-                                                       channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
-                                                               msg: update
-                                                       });
-                                               }
-                                               should_persist = NotifyOption::DoPersist;
-                                               chan.set_channel_update_status(ChannelUpdateStatus::Disabled);
-                                       },
-                                       ChannelUpdateStatus::EnabledStaged if chan.is_live() => {
-                                               if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
-                                                       channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
-                                                               msg: update
-                                                       });
-                                               }
-                                               should_persist = NotifyOption::DoPersist;
-                                               chan.set_channel_update_status(ChannelUpdateStatus::Enabled);
-                                       },
-                                       _ => {},
-                               }
+                       let new_feerate = self.fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);
+
+                       let mut handle_errors = Vec::new();
+                       {
+                               let mut channel_state_lock = self.channel_state.lock().unwrap();
+                               let channel_state = &mut *channel_state_lock;
+                               let pending_msg_events = &mut channel_state.pending_msg_events;
+                               let short_to_id = &mut channel_state.short_to_id;
+                               channel_state.by_id.retain(|chan_id, chan| {
+                                       match chan.channel_update_status() {
+                                               ChannelUpdateStatus::Enabled if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged),
+                                               ChannelUpdateStatus::Disabled if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged),
+                                               ChannelUpdateStatus::DisabledStaged if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Enabled),
+                                               ChannelUpdateStatus::EnabledStaged if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Disabled),
+                                               ChannelUpdateStatus::DisabledStaged if !chan.is_live() => {
+                                                       if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
+                                                               pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                                                       msg: update
+                                                               });
+                                                       }
+                                                       should_persist = NotifyOption::DoPersist;
+                                                       chan.set_channel_update_status(ChannelUpdateStatus::Disabled);
+                                               },
+                                               ChannelUpdateStatus::EnabledStaged if chan.is_live() => {
+                                                       if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
+                                                               pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                                                       msg: update
+                                                               });
+                                                       }
+                                                       should_persist = NotifyOption::DoPersist;
+                                                       chan.set_channel_update_status(ChannelUpdateStatus::Enabled);
+                                               },
+                                               _ => {},
+                                       }
+
+                                       let counterparty_node_id = chan.get_counterparty_node_id();
+                                       let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_id, pending_msg_events, chan_id, chan, new_feerate);
+                                       if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
+                                       if err.is_err() {
+                                               handle_errors.push((err, counterparty_node_id));
+                                       }
+                                       retain_channel
+                               });
+                       }
+
+                       for (err, counterparty_node_id) in handle_errors.drain(..) {
+                               let _ = handle_error!(self, err, counterparty_node_id);
                        }
 
                        should_persist
@@ -3026,7 +3183,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), msg.temporary_channel_id.clone()));
                }
 
-               let channel = Channel::new_from_req(&self.fee_estimator, &self.keys_manager, counterparty_node_id.clone(), their_features, msg, 0, &self.default_configuration)
+               let channel = Channel::new_from_req(&self.fee_estimator, &self.keys_manager, counterparty_node_id.clone(), &their_features, msg, 0, &self.default_configuration)
                        .map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id))?;
                let mut channel_state_lock = self.channel_state.lock().unwrap();
                let channel_state = &mut *channel_state_lock;
@@ -3052,7 +3209,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        if chan.get().get_counterparty_node_id() != *counterparty_node_id {
                                                return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.temporary_channel_id));
                                        }
-                                       try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration, their_features), channel_state, chan);
+                                       try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration, &their_features), channel_state, chan);
                                        (chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id())
                                },
                                hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id))
@@ -3189,7 +3346,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        }
 
        fn internal_shutdown(&self, counterparty_node_id: &PublicKey, their_features: &InitFeatures, msg: &msgs::Shutdown) -> Result<(), MsgHandleErrInternal> {
-               let (mut dropped_htlcs, chan_option) = {
+               let mut dropped_htlcs: Vec<(HTLCSource, PaymentHash)>;
+               let result: Result<(), _> = loop {
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
 
@@ -3198,25 +3356,37 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        if chan_entry.get().get_counterparty_node_id() != *counterparty_node_id {
                                                return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                        }
-                                       let (shutdown, closing_signed, dropped_htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.fee_estimator, &their_features, &msg), channel_state, chan_entry);
+
+                                       let (shutdown, closing_signed, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.fee_estimator, &self.keys_manager, &their_features, &msg), channel_state, chan_entry);
+                                       dropped_htlcs = htlcs;
+
+                                       // Update the monitor with the shutdown script if necessary.
+                                       if let Some(monitor_update) = monitor_update {
+                                               if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) {
+                                                       let (result, is_permanent) =
+                                                               handle_monitor_err!(self, e, channel_state.short_to_id, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, false, false, Vec::new(), Vec::new(), chan_entry.key());
+                                                       if is_permanent {
+                                                               remove_channel!(channel_state, chan_entry);
+                                                               break result;
+                                                       }
+                                               }
+                                       }
+
                                        if let Some(msg) = shutdown {
                                                channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
-                                                       node_id: counterparty_node_id.clone(),
+                                                       node_id: *counterparty_node_id,
                                                        msg,
                                                });
                                        }
                                        if let Some(msg) = closing_signed {
+                                               // TODO: Do not send this if the monitor update failed.
                                                channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
-                                                       node_id: counterparty_node_id.clone(),
+                                                       node_id: *counterparty_node_id,
                                                        msg,
                                                });
                                        }
-                                       if chan_entry.get().is_shutdown() {
-                                               if let Some(short_id) = chan_entry.get().get_short_channel_id() {
-                                                       channel_state.short_to_id.remove(&short_id);
-                                               }
-                                               (dropped_htlcs, Some(chan_entry.remove_entry().1))
-                                       } else { (dropped_htlcs, None) }
+
+                                       break Ok(());
                                },
                                hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                        }
@@ -3224,14 +3394,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                for htlc_source in dropped_htlcs.drain(..) {
                        self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
                }
-               if let Some(chan) = chan_option {
-                       if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
-                               let mut channel_state = self.channel_state.lock().unwrap();
-                               channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
-                                       msg: update
-                               });
-                       }
-               }
+
+               let _ = handle_error!(self, result, *counterparty_node_id);
                Ok(())
        }
 
@@ -3668,62 +3832,6 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                Ok(())
        }
 
-       /// Begin Update fee process. Allowed only on an outbound channel.
-       /// If successful, will generate a UpdateHTLCs event, so you should probably poll
-       /// PeerManager::process_events afterwards.
-       /// Note: This API is likely to change!
-       /// (C-not exported) Cause its doc(hidden) anyway
-       #[doc(hidden)]
-       pub fn update_fee(&self, channel_id: [u8;32], feerate_per_kw: u32) -> Result<(), APIError> {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
-               let counterparty_node_id;
-               let err: Result<(), _> = loop {
-                       let mut channel_state_lock = self.channel_state.lock().unwrap();
-                       let channel_state = &mut *channel_state_lock;
-
-                       match channel_state.by_id.entry(channel_id) {
-                               hash_map::Entry::Vacant(_) => return Err(APIError::APIMisuseError{err: format!("Failed to find corresponding channel for id {}", channel_id.to_hex())}),
-                               hash_map::Entry::Occupied(mut chan) => {
-                                       if !chan.get().is_outbound() {
-                                               return Err(APIError::APIMisuseError{err: "update_fee cannot be sent for an inbound channel".to_owned()});
-                                       }
-                                       if chan.get().is_awaiting_monitor_update() {
-                                               return Err(APIError::MonitorUpdateFailed);
-                                       }
-                                       if !chan.get().is_live() {
-                                               return Err(APIError::ChannelUnavailable{err: "Channel is either not yet fully established or peer is currently disconnected".to_owned()});
-                                       }
-                                       counterparty_node_id = chan.get().get_counterparty_node_id();
-                                       if let Some((update_fee, commitment_signed, monitor_update)) =
-                                                       break_chan_entry!(self, chan.get_mut().send_update_fee_and_commit(feerate_per_kw, &self.logger), channel_state, chan)
-                                       {
-                                               if let Err(_e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
-                                                       unimplemented!();
-                                               }
-                                               log_debug!(self.logger, "Updating fee resulted in a commitment_signed for channel {}", log_bytes!(chan.get().channel_id()));
-                                               channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
-                                                       node_id: chan.get().get_counterparty_node_id(),
-                                                       updates: msgs::CommitmentUpdate {
-                                                               update_add_htlcs: Vec::new(),
-                                                               update_fulfill_htlcs: Vec::new(),
-                                                               update_fail_htlcs: Vec::new(),
-                                                               update_fail_malformed_htlcs: Vec::new(),
-                                                               update_fee: Some(update_fee),
-                                                               commitment_signed,
-                                                       },
-                                               });
-                                       }
-                               },
-                       }
-                       return Ok(())
-               };
-
-               match handle_error!(self, err, counterparty_node_id) {
-                       Ok(_) => unreachable!(),
-                       Err(e) => { Err(APIError::APIMisuseError { err: e.err })}
-               }
-       }
-
        /// Process pending events from the `chain::Watch`, returning whether any events were processed.
        fn process_pending_monitor_events(&self) -> bool {
                let mut failed_channels = Vec::new();
@@ -4029,7 +4137,7 @@ where
                                result = NotifyOption::DoPersist;
                        }
 
-                       let mut pending_events = std::mem::replace(&mut *self.pending_events.lock().unwrap(), vec![]);
+                       let mut pending_events = mem::replace(&mut *self.pending_events.lock().unwrap(), vec![]);
                        if !pending_events.is_empty() {
                                result = NotifyOption::DoPersist;
                        }
@@ -4649,7 +4757,7 @@ impl_writeable_tlv_based!(HTLCPreviousHopData, {
 });
 
 impl Writeable for ClaimableHTLC {
-       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
                let payment_data = match &self.onion_payload {
                        OnionPayload::Invoice(data) => Some(data.clone()),
                        _ => None,
@@ -4753,7 +4861,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable f
         F::Target: FeeEstimator,
         L::Target: Logger,
 {
-       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
                let _consistency_lock = self.total_consistency_lock.write().unwrap();
 
                write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
@@ -4949,7 +5057,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
         F::Target: FeeEstimator,
         L::Target: Logger,
 {
-       fn read<R: ::std::io::Read>(reader: &mut R, args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result<Self, DecodeError> {
+       fn read<R: io::Read>(reader: &mut R, args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result<Self, DecodeError> {
                let (blockhash, chan_manager) = <(BlockHash, ChannelManager<Signer, M, T, K, F, L>)>::read(reader, args)?;
                Ok((blockhash, Arc::new(chan_manager)))
        }
@@ -4963,7 +5071,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
         F::Target: FeeEstimator,
         L::Target: Logger,
 {
-       fn read<R: ::std::io::Read>(reader: &mut R, mut args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result<Self, DecodeError> {
+       fn read<R: io::Read>(reader: &mut R, mut args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result<Self, DecodeError> {
                let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
 
                let genesis_hash: BlockHash = Readable::read(reader)?;
@@ -4999,6 +5107,10 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                                channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() ||
                                                channel.get_latest_monitor_update_id() < monitor.get_latest_update_id() {
                                        // But if the channel is behind of the monitor, close the channel:
+                                       log_error!(args.logger, "A ChannelManager is stale compared to the current ChannelMonitor!");
+                                       log_error!(args.logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast.");
+                                       log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
+                                               log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id());
                                        let (_, mut new_failed_htlcs) = channel.force_shutdown(true);
                                        failed_htlcs.append(&mut new_failed_htlcs);
                                        monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger);
@@ -5153,23 +5265,26 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
 mod tests {
        use bitcoin::hashes::Hash;
        use bitcoin::hashes::sha256::Hash as Sha256;
-       use core::sync::atomic::{AtomicBool, Ordering};
        use core::time::Duration;
        use ln::{PaymentPreimage, PaymentHash, PaymentSecret};
-       use ln::channelmanager::PersistenceNotifier;
+       use ln::channelmanager::PaymentSendFailure;
        use ln::features::{InitFeatures, InvoiceFeatures};
        use ln::functional_test_utils::*;
        use ln::msgs;
        use ln::msgs::ChannelMessageHandler;
        use routing::router::{get_keysend_route, get_route};
+       use util::errors::APIError;
        use util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
        use util::test_utils;
-       use std::sync::Arc;
-       use std::thread;
 
        #[cfg(feature = "std")]
        #[test]
        fn test_wait_timeout() {
+               use ln::channelmanager::PersistenceNotifier;
+               use sync::Arc;
+               use core::sync::atomic::{AtomicBool, Ordering};
+               use std::thread;
+
                let persistence_notifier = Arc::new(PersistenceNotifier::new());
                let thread_notifier = Arc::clone(&persistence_notifier);
 
@@ -5219,6 +5334,12 @@ mod tests {
                let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
                let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
 
+               // All nodes start with a persistable update pending as `create_network` connects each node
+               // with all other nodes to make most tests simpler.
+               assert!(nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
+               assert!(nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
+               assert!(nodes[2].node.await_persistable_update_timeout(Duration::from_millis(1)));
+
                let mut chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
 
                // We check that the channel info nodes have doesn't change too early, even though we try
@@ -5540,6 +5661,39 @@ mod tests {
 
                nodes[1].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "We don't support MPP keysend payments".to_string(), 1);
        }
+
+       #[test]
+       fn test_multi_hop_missing_secret() {
+               let chanmon_cfgs = create_chanmon_cfgs(4);
+               let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+               let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
+               let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+
+               let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+               let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+               let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+               let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+               let logger = test_utils::TestLogger::new();
+
+               // Marshall an MPP route.
+               let (_, payment_hash, _) = get_payment_preimage_hash!(&nodes[3]);
+               let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
+               let mut route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[3].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap();
+               let path = route.paths[0].clone();
+               route.paths.push(path);
+               route.paths[0][0].pubkey = nodes[1].node.get_our_node_id();
+               route.paths[0][0].short_channel_id = chan_1_id;
+               route.paths[0][1].short_channel_id = chan_3_id;
+               route.paths[1][0].pubkey = nodes[2].node.get_our_node_id();
+               route.paths[1][0].short_channel_id = chan_2_id;
+               route.paths[1][1].short_channel_id = chan_4_id;
+
+               match nodes[0].node.send_payment(&route, payment_hash, &None).unwrap_err() {
+                       PaymentSendFailure::ParameterError(APIError::APIMisuseError { ref err }) => {
+                               assert!(regex::Regex::new(r"Payment secret is required for multi-path payments").unwrap().is_match(err))                        },
+                       _ => panic!("unexpected error")
+               }
+       }
 }
 
 #[cfg(all(any(test, feature = "_test_utils"), feature = "unstable"))]
@@ -5551,7 +5705,7 @@ pub mod bench {
        use ln::channelmanager::{BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage};
        use ln::features::{InitFeatures, InvoiceFeatures};
        use ln::functional_test_utils::*;
-       use ln::msgs::ChannelMessageHandler;
+       use ln::msgs::{ChannelMessageHandler, Init};
        use routing::network_graph::NetworkGraph;
        use routing::router::get_route;
        use util::test_utils;
@@ -5614,6 +5768,8 @@ pub mod bench {
                });
                let node_b_holder = NodeHolder { node: &node_b };
 
+               node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: InitFeatures::known() });
+               node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: InitFeatures::known() });
                node_a.create_channel(node_b.get_our_node_id(), 8_000_000, 100_000_000, 42, None).unwrap();
                node_b.handle_open_channel(&node_a.get_our_node_id(), InitFeatures::known(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id()));
                node_a.handle_accept_channel(&node_b.get_our_node_id(), InitFeatures::known(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id()));