Ignore our own gossip if it is sent to us from our counterparty
[rust-lightning] / lightning / src / ln / channelmanager.rs
index 13a993fd965596dc41e12a387bc178ea31372b35..47ea356442398d461e68f4656b81e0d35ffe1b79 100644 (file)
@@ -64,7 +64,6 @@ use util::errors::APIError;
 use prelude::*;
 use core::{cmp, mem};
 use core::cell::RefCell;
-use std::collections::{HashMap, hash_map, HashSet};
 use std::io::{Cursor, Read};
 use std::sync::{Arc, Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard};
 use core::sync::atomic::{AtomicUsize, Ordering};
@@ -627,13 +626,13 @@ pub const MIN_FINAL_CLTV_EXPIRY: u32 = HTLC_FAIL_BACK_BUFFER + 3;
 const CHECK_CLTV_EXPIRY_SANITY: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - CLTV_CLAIM_BUFFER - ANTI_REORG_DELAY - LATENCY_GRACE_PERIOD_BLOCKS;
 
 // Check for ability of an attacker to make us fail on-chain by delaying an HTLC claim. See
-// ChannelMontior::would_broadcast_at_height for a description of why this is needed.
+// ChannelMonitor::should_broadcast_holder_commitment_txn for a description of why this is needed.
 #[deny(const_err)]
 #[allow(dead_code)]
 const CHECK_CLTV_EXPIRY_SANITY_2: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER;
 
 /// Details of a channel, as returned by ChannelManager::list_channels and ChannelManager::list_usable_channels
-#[derive(Clone)]
+#[derive(Clone, Debug, PartialEq)]
 pub struct ChannelDetails {
        /// The channel's ID (prior to funding transaction generation, this is a random 32 bytes,
        /// thereafter this is the txid of the funding transaction xor the funding transaction output).
@@ -678,8 +677,7 @@ pub struct ChannelDetails {
        /// point after the funding transaction received enough confirmations).
        pub is_funding_locked: bool,
        /// True if the channel is (a) confirmed and funding_locked messages have been exchanged, (b)
-       /// the peer is connected, (c) no monitor update failure is pending resolution, and (d) the
-       /// channel is not currently negotiating a shutdown.
+       /// the peer is connected, and (c) the channel is not currently negotiating a shutdown.
        ///
        /// This is a strict superset of `is_funding_locked`.
        pub is_usable: bool,
@@ -777,12 +775,12 @@ macro_rules! convert_chan_err {
                                (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id.clone()))
                        },
                        ChannelError::Close(msg) => {
-                               log_trace!($self.logger, "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg);
+                               log_error!($self.logger, "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg);
                                if let Some(short_id) = $channel.get_short_channel_id() {
                                        $short_to_id.remove(&short_id);
                                }
                                let shutdown_res = $channel.force_shutdown(true);
-                               (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $self.get_channel_update(&$channel).ok()))
+                               (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
                        },
                        ChannelError::CloseDelayBroadcast(msg) => {
                                log_error!($self.logger, "Channel {} need to be shutdown but closing transactions not broadcast due to {}", log_bytes!($channel_id[..]), msg);
@@ -790,7 +788,7 @@ macro_rules! convert_chan_err {
                                        $short_to_id.remove(&short_id);
                                }
                                let shutdown_res = $channel.force_shutdown(false);
-                               (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $self.get_channel_update(&$channel).ok()))
+                               (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
                        }
                }
        }
@@ -846,7 +844,8 @@ macro_rules! handle_monitor_err {
                                // splitting hairs we'd prefer to claim payments that were to us, but we haven't
                                // given up the preimage yet, so might as well just wait until the payment is
                                // retried, avoiding the on-chain fees.
-                               let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), *$chan_id, $chan.force_shutdown(true), $self.get_channel_update(&$chan).ok()));
+                               let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), *$chan_id,
+                                               $chan.force_shutdown(true), $self.get_channel_update_for_broadcast(&$chan).ok() ));
                                (res, true)
                        },
                        ChannelMonitorUpdateErr::TemporaryFailure => {
@@ -1226,9 +1225,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
                }
                let chan_update = if let Some(chan) = chan_option {
-                       if let Ok(update) = self.get_channel_update(&chan) {
-                               Some(update)
-                       } else { None }
+                       self.get_channel_update_for_broadcast(&chan).ok()
                } else { None };
 
                if let Some(update) = chan_update {
@@ -1244,7 +1241,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        #[inline]
        fn finish_force_close_channel(&self, shutdown_res: ShutdownResult) {
                let (monitor_update_option, mut failed_htlcs) = shutdown_res;
-               log_trace!(self.logger, "Finishing force-closure of channel {} HTLCs to fail", failed_htlcs.len());
+               log_debug!(self.logger, "Finishing force-closure of channel with {} HTLCs to fail", failed_htlcs.len());
                for htlc_source in failed_htlcs.drain(..) {
                        self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
                }
@@ -1275,9 +1272,9 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()});
                        }
                };
-               log_trace!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
+               log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
                self.finish_force_close_channel(chan.force_shutdown(true));
-               if let Ok(update) = self.get_channel_update(&chan) {
+               if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
                        let mut channel_state = self.channel_state.lock().unwrap();
                        channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                msg: update
@@ -1537,31 +1534,31 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        // hopefully an attacker trying to path-trace payments cannot make this occur
                                        // on a small/per-node/per-channel scale.
                                        if !chan.is_live() { // channel_disabled
-                                               break Some(("Forwarding channel is not in a ready state.", 0x1000 | 20, Some(self.get_channel_update(chan).unwrap())));
+                                               break Some(("Forwarding channel is not in a ready state.", 0x1000 | 20, Some(self.get_channel_update_for_unicast(chan).unwrap())));
                                        }
                                        if *amt_to_forward < chan.get_counterparty_htlc_minimum_msat() { // amount_below_minimum
-                                               break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, Some(self.get_channel_update(chan).unwrap())));
+                                               break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, Some(self.get_channel_update_for_unicast(chan).unwrap())));
                                        }
                                        let fee = amt_to_forward.checked_mul(chan.get_fee_proportional_millionths() as u64).and_then(|prop_fee| { (prop_fee / 1000000).checked_add(chan.get_holder_fee_base_msat(&self.fee_estimator) as u64) });
                                        if fee.is_none() || msg.amount_msat < fee.unwrap() || (msg.amount_msat - fee.unwrap()) < *amt_to_forward { // fee_insufficient
-                                               break Some(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, Some(self.get_channel_update(chan).unwrap())));
+                                               break Some(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, Some(self.get_channel_update_for_unicast(chan).unwrap())));
                                        }
                                        if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + chan.get_cltv_expiry_delta() as u64 { // incorrect_cltv_expiry
-                                               break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update(chan).unwrap())));
+                                               break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update_for_unicast(chan).unwrap())));
                                        }
                                        let cur_height = self.best_block.read().unwrap().height() + 1;
                                        // Theoretically, channel counterparty shouldn't send us a HTLC expiring now, but we want to be robust wrt to counterparty
                                        // packet sanitization (see HTLC_FAIL_BACK_BUFFER rational)
                                        if msg.cltv_expiry <= cur_height + HTLC_FAIL_BACK_BUFFER as u32 { // expiry_too_soon
-                                               break Some(("CLTV expiry is too close", 0x1000 | 14, Some(self.get_channel_update(chan).unwrap())));
+                                               break Some(("CLTV expiry is too close", 0x1000 | 14, Some(self.get_channel_update_for_unicast(chan).unwrap())));
                                        }
                                        if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far
                                                break Some(("CLTV expiry is too far in the future", 21, None));
                                        }
-                                       // In theory, we would be safe against unitentional channel-closure, if we only required a margin of LATENCY_GRACE_PERIOD_BLOCKS.
-                                       // But, to be safe against policy reception, we use a longuer delay.
+                                       // In theory, we would be safe against unintentional channel-closure, if we only required a margin of LATENCY_GRACE_PERIOD_BLOCKS.
+                                       // But, to be safe against policy reception, we use a longer delay.
                                        if (*outgoing_cltv_value) as u64 <= (cur_height + HTLC_FAIL_BACK_BUFFER) as u64 {
-                                               break Some(("Outgoing CLTV value is too soon", 0x1000 | 14, Some(self.get_channel_update(chan).unwrap())));
+                                               break Some(("Outgoing CLTV value is too soon", 0x1000 | 14, Some(self.get_channel_update_for_unicast(chan).unwrap())));
                                        }
 
                                        break None;
@@ -1589,9 +1586,29 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                (pending_forward_info, channel_state.unwrap())
        }
 
-       /// only fails if the channel does not yet have an assigned short_id
+       /// Gets the current channel_update for the given channel. This first checks if the channel is
+       /// public, and thus should be called whenever the result is going to be passed out in a
+       /// [`MessageSendEvent::BroadcastChannelUpdate`] event.
+       ///
        /// May be called with channel_state already locked!
-       fn get_channel_update(&self, chan: &Channel<Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
+       fn get_channel_update_for_broadcast(&self, chan: &Channel<Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
+               if !chan.should_announce() {
+                       return Err(LightningError {
+                               err: "Cannot broadcast a channel_update for a private channel".to_owned(),
+                               action: msgs::ErrorAction::IgnoreError
+                       });
+               }
+               log_trace!(self.logger, "Attempting to generate broadcast channel update for channel {}", log_bytes!(chan.channel_id()));
+               self.get_channel_update_for_unicast(chan)
+       }
+
+       /// Gets the current channel_update for the given channel. This does not check if the channel
+       /// is public (only returning an Err if the channel does not yet have an assigned short_id),
+       /// and thus MUST NOT be called unless the recipient of the resulting message has already
+       /// provided evidence that they know about the existence of the channel.
+       /// May be called with channel_state already locked!
+       fn get_channel_update_for_unicast(&self, chan: &Channel<Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
+               log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.channel_id()));
                let short_channel_id = match chan.get_short_channel_id() {
                        None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}),
                        Some(id) => id,
@@ -1672,6 +1689,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                        return Err(APIError::MonitorUpdateFailed);
                                                }
 
+                                               log_debug!(self.logger, "Sending payment along path resulted in a commitment_signed for channel {}", log_bytes!(chan.get().channel_id()));
                                                channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
                                                        node_id: path.first().unwrap().pubkey,
                                                        updates: msgs::CommitmentUpdate {
@@ -1983,7 +2001,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        if let Some(msg) = chan.get_signed_channel_announcement(&self.our_network_key, self.get_our_node_id(), self.genesis_hash.clone()) {
                                channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
                                        msg,
-                                       update_msg: match self.get_channel_update(chan) {
+                                       update_msg: match self.get_channel_update_for_broadcast(chan) {
                                                Ok(msg) => msg,
                                                Err(_) => continue,
                                        },
@@ -2061,7 +2079,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                        onion_packet, ..
                                                                                }, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value },
                                                                                prev_funding_outpoint } => {
-                                                                       log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", log_bytes!(payment_hash.0), prev_short_channel_id, short_chan_id);
+                                                                       log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, log_bytes!(payment_hash.0), short_chan_id);
                                                                        let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
                                                                                short_channel_id: prev_short_channel_id,
                                                                                outpoint: prev_funding_outpoint,
@@ -2075,7 +2093,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                        } else {
                                                                                                panic!("Stated return value requirements in send_htlc() were not met");
                                                                                        }
-                                                                                       let chan_update = self.get_channel_update(chan.get()).unwrap();
+                                                                                       let chan_update = self.get_channel_update_for_unicast(chan.get()).unwrap();
                                                                                        failed_forwards.push((htlc_source, payment_hash,
                                                                                                HTLCFailReason::Reason { failure_code: 0x1000 | 7, data: chan_update.encode_with_len() }
                                                                                        ));
@@ -2101,11 +2119,11 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                        panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
                                                                },
                                                                HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
-                                                                       log_trace!(self.logger, "Failing HTLC back to channel with short id {} after delay", short_chan_id);
+                                                                       log_trace!(self.logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
                                                                        match chan.get_mut().get_update_fail_htlc(htlc_id, err_packet, &self.logger) {
                                                                                Err(e) => {
                                                                                        if let ChannelError::Ignore(msg) = e {
-                                                                                               log_trace!(self.logger, "Failed to fail backwards to short_id {}: {}", short_chan_id, msg);
+                                                                                               log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
                                                                                        } else {
                                                                                                panic!("Stated return value requirements in get_update_fail_htlc() were not met");
                                                                                        }
@@ -2147,7 +2165,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                        if let Some(short_id) = channel.get_short_channel_id() {
                                                                                                channel_state.short_to_id.remove(&short_id);
                                                                                        }
-                                                                                       Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.force_shutdown(true), self.get_channel_update(&channel).ok()))
+                                                                                       Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok()))
                                                                                },
                                                                                ChannelError::CloseDelayBroadcast(_) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); }
                                                                        };
@@ -2159,6 +2177,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                handle_errors.push((chan.get().get_counterparty_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true)));
                                                                continue;
                                                        }
+                                                       log_debug!(self.logger, "Forwarding HTLCs resulted in a commitment update with {} HTLCs added and {} HTLCs failed for channel {}",
+                                                               add_htlc_msgs.len(), fail_htlc_msgs.len(), log_bytes!(chan.get().channel_id()));
                                                        channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
                                                                node_id: chan.get().get_counterparty_node_id(),
                                                                updates: msgs::CommitmentUpdate {
@@ -2348,7 +2368,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        ChannelUpdateStatus::DisabledStaged if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Enabled),
                                        ChannelUpdateStatus::EnabledStaged if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Disabled),
                                        ChannelUpdateStatus::DisabledStaged if !chan.is_live() => {
-                                               if let Ok(update) = self.get_channel_update(&chan) {
+                                               if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
                                                        channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                                msg: update
                                                        });
@@ -2357,7 +2377,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                chan.set_channel_update_status(ChannelUpdateStatus::Disabled);
                                        },
                                        ChannelUpdateStatus::EnabledStaged if chan.is_live() => {
-                                               if let Ok(update) = self.get_channel_update(&chan) {
+                                               if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
                                                        channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                                msg: update
                                                        });
@@ -2407,7 +2427,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        let (failure_code, onion_failure_data) =
                                                match self.channel_state.lock().unwrap().by_id.entry(channel_id) {
                                                        hash_map::Entry::Occupied(chan_entry) => {
-                                                               if let Ok(upd) = self.get_channel_update(&chan_entry.get()) {
+                                                               if let Ok(upd) = self.get_channel_update_for_unicast(&chan_entry.get()) {
                                                                        (0x1000|7, upd.encode_with_len())
                                                                } else {
                                                                        (0x4000|10, Vec::new())
@@ -2665,6 +2685,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                }
                                        }
                                        if let Some((msg, commitment_signed)) = msgs {
+                                               log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}",
+                                                       log_bytes!(payment_preimage.0), log_bytes!(chan.get().channel_id()));
                                                channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
                                                        node_id: chan.get().get_counterparty_node_id(),
                                                        updates: msgs::CommitmentUpdate {
@@ -2769,7 +2791,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        pub fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64) {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 
-               let (mut pending_failures, chan_restoration_res) = {
+               let chan_restoration_res;
+               let mut pending_failures = {
                        let mut channel_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_lock;
                        let mut channel = match channel_state.by_id.entry(funding_txo.to_channel_id()) {
@@ -2781,7 +2804,21 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        }
 
                        let (raa, commitment_update, order, pending_forwards, pending_failures, funding_broadcastable, funding_locked) = channel.get_mut().monitor_updating_restored(&self.logger);
-                       (pending_failures, handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, raa, commitment_update, order, None, pending_forwards, funding_broadcastable, funding_locked))
+                       let channel_update = if funding_locked.is_some() && channel.get().is_usable() && !channel.get().should_announce() {
+                               // We only send a channel_update in the case where we are just now sending a
+                               // funding_locked and the channel is in a usable state. Further, we rely on the
+                               // normal announcement_signatures process to send a channel_update for public
+                               // channels, only generating a unicast channel_update if this is a private channel.
+                               Some(events::MessageSendEvent::SendChannelUpdate {
+                                       node_id: channel.get().get_counterparty_node_id(),
+                                       msg: self.get_channel_update_for_unicast(channel.get()).unwrap(),
+                               })
+                       } else { None };
+                       chan_restoration_res = handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, raa, commitment_update, order, None, pending_forwards, funding_broadcastable, funding_locked);
+                       if let Some(upd) = channel_update {
+                               channel_state.pending_msg_events.push(upd);
+                       }
+                       pending_failures
                };
                post_handle_chan_restoration!(self, chan_restoration_res);
                for failure in pending_failures.drain(..) {
@@ -2928,7 +2965,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                if chan.get().get_counterparty_node_id() != *counterparty_node_id {
                                        return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                }
-                               try_chan_entry!(self, chan.get_mut().funding_locked(&msg), channel_state, chan);
+                               try_chan_entry!(self, chan.get_mut().funding_locked(&msg, &self.logger), channel_state, chan);
                                if let Some(announcement_sigs) = self.get_announcement_sigs(chan.get()) {
                                        log_trace!(self.logger, "Sending announcement_signatures for {} in response to funding_locked", log_bytes!(chan.get().channel_id()));
                                        // If we see locking block before receiving remote funding_locked, we broadcast our
@@ -2944,6 +2981,11 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                node_id: counterparty_node_id.clone(),
                                                msg: announcement_sigs,
                                        });
+                               } else if chan.get().is_usable() {
+                                       channel_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
+                                               node_id: counterparty_node_id.clone(),
+                                               msg: self.get_channel_update_for_unicast(chan.get()).unwrap(),
+                                       });
                                }
                                Ok(())
                        },
@@ -2988,7 +3030,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
                }
                if let Some(chan) = chan_option {
-                       if let Ok(update) = self.get_channel_update(&chan) {
+                       if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
                                let mut channel_state = self.channel_state.lock().unwrap();
                                channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                        msg: update
@@ -3034,7 +3076,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        self.tx_broadcaster.broadcast_transaction(&broadcast_tx);
                }
                if let Some(chan) = chan_option {
-                       if let Ok(update) = self.get_channel_update(&chan) {
+                       if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
                                let mut channel_state = self.channel_state.lock().unwrap();
                                channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                        msg: update
@@ -3072,7 +3114,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        // want to reject the new HTLC and fail it backwards instead of forwarding.
                                        match pending_forward_info {
                                                PendingHTLCStatus::Forward(PendingHTLCInfo { ref incoming_shared_secret, .. }) => {
-                                                       let reason = if let Ok(upd) = self.get_channel_update(chan) {
+                                                       let reason = if let Ok(upd) = self.get_channel_update_for_unicast(chan) {
                                                                onion_utils::build_first_hop_failure_packet(incoming_shared_secret, error_code, &{
                                                                        let mut res = Vec::with_capacity(8 + 128);
                                                                        // TODO: underspecified, follow https://github.com/lightningnetwork/lightning-rfc/issues/791
@@ -3334,7 +3376,9 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
 
                                channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
                                        msg: try_chan_entry!(self, chan.get_mut().announcement_signatures(&self.our_network_key, self.get_our_node_id(), self.genesis_hash.clone(), msg), channel_state, chan),
-                                       update_msg: self.get_channel_update(chan.get()).unwrap(), // can only fail if we're not in a ready state
+                                       // Note that announcement_signatures fails if the channel cannot be announced,
+                                       // so get_channel_update_for_broadcast will never fail by the time we get here.
+                                       update_msg: self.get_channel_update_for_broadcast(chan.get()).unwrap(),
                                });
                        },
                        hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
@@ -3342,31 +3386,44 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                Ok(())
        }
 
-       fn internal_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) -> Result<(), MsgHandleErrInternal> {
+       /// Returns ShouldPersist if anything changed, otherwise either SkipPersist or an Err.
+       fn internal_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) -> Result<NotifyOption, MsgHandleErrInternal> {
                let mut channel_state_lock = self.channel_state.lock().unwrap();
                let channel_state = &mut *channel_state_lock;
                let chan_id = match channel_state.short_to_id.get(&msg.contents.short_channel_id) {
                        Some(chan_id) => chan_id.clone(),
                        None => {
                                // It's not a local channel
-                               return Ok(())
+                               return Ok(NotifyOption::SkipPersist)
                        }
                };
                match channel_state.by_id.entry(chan_id) {
                        hash_map::Entry::Occupied(mut chan) => {
                                if chan.get().get_counterparty_node_id() != *counterparty_node_id {
-                                       // TODO: see issue #153, need a consistent behavior on obnoxious behavior from random node
-                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), chan_id));
+                                       if chan.get().should_announce() {
+                                               // If the announcement is about a channel of ours which is public, some
+                                               // other peer may simply be forwarding all its gossip to us. Don't provide
+                                               // a scary-looking error message and return Ok instead.
+                                               return Ok(NotifyOption::SkipPersist);
+                                       }
+                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id));
+                               }
+                               let were_node_one = self.get_our_node_id().serialize()[..] < chan.get().get_counterparty_node_id().serialize()[..];
+                               let msg_from_node_one = msg.contents.flags & 1 == 0;
+                               if were_node_one == msg_from_node_one {
+                                       return Ok(NotifyOption::SkipPersist);
+                               } else {
+                                       try_chan_entry!(self, chan.get_mut().channel_update(&msg), channel_state, chan);
                                }
-                               try_chan_entry!(self, chan.get_mut().channel_update(&msg), channel_state, chan);
                        },
                        hash_map::Entry::Vacant(_) => unreachable!()
                }
-               Ok(())
+               Ok(NotifyOption::DoPersist)
        }
 
        fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> {
-               let (htlcs_failed_forward, chan_restoration_res) = {
+               let chan_restoration_res;
+               let (htlcs_failed_forward, need_lnd_workaround) = {
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
 
@@ -3381,19 +3438,37 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here.
                                        let (funding_locked, revoke_and_ack, commitment_update, monitor_update_opt, order, htlcs_failed_forward, shutdown) =
                                                try_chan_entry!(self, chan.get_mut().channel_reestablish(msg, &self.logger), channel_state, chan);
+                                       let mut channel_update = None;
                                        if let Some(msg) = shutdown {
                                                channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
                                                        node_id: counterparty_node_id.clone(),
                                                        msg,
                                                });
+                                       } else if chan.get().is_usable() {
+                                               // If the channel is in a usable state (ie the channel is not being shut
+                                               // down), send a unicast channel_update to our counterparty to make sure
+                                               // they have the latest channel parameters.
+                                               channel_update = Some(events::MessageSendEvent::SendChannelUpdate {
+                                                       node_id: chan.get().get_counterparty_node_id(),
+                                                       msg: self.get_channel_update_for_unicast(chan.get()).unwrap(),
+                                               });
+                                       }
+                                       let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take();
+                                       chan_restoration_res = handle_chan_restoration_locked!(self, channel_state_lock, channel_state, chan, revoke_and_ack, commitment_update, order, monitor_update_opt, Vec::new(), None, funding_locked);
+                                       if let Some(upd) = channel_update {
+                                               channel_state.pending_msg_events.push(upd);
                                        }
-                                       (htlcs_failed_forward, handle_chan_restoration_locked!(self, channel_state_lock, channel_state, chan, revoke_and_ack, commitment_update, order, monitor_update_opt, Vec::new(), None, funding_locked))
+                                       (htlcs_failed_forward, need_lnd_workaround)
                                },
                                hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                        }
                };
                post_handle_chan_restoration!(self, chan_restoration_res);
                self.fail_holding_cell_htlcs(htlcs_failed_forward, msg.channel_id);
+
+               if let Some(funding_locked_msg) = need_lnd_workaround {
+                       self.internal_funding_locked(counterparty_node_id, &funding_locked_msg)?;
+               }
                Ok(())
        }
 
@@ -3429,6 +3504,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                if let Err(_e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
                                                        unimplemented!();
                                                }
+                                               log_debug!(self.logger, "Updating fee resulted in a commitment_signed for channel {}", log_bytes!(chan.get().channel_id()));
                                                channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
                                                        node_id: chan.get().get_counterparty_node_id(),
                                                        updates: msgs::CommitmentUpdate {
@@ -3479,7 +3555,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                        short_to_id.remove(&short_id);
                                                }
                                                failed_channels.push(chan.force_shutdown(false));
-                                               if let Ok(update) = self.get_channel_update(&chan) {
+                                               if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
                                                        pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                                msg: update
                                                        });
@@ -3805,7 +3881,7 @@ where
                        *best_block = BestBlock::new(header.prev_blockhash, new_height)
                }
 
-               self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time));
+               self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time, &self.logger));
        }
 }
 
@@ -3841,7 +3917,7 @@ where
 
                *self.best_block.write().unwrap() = BestBlock::new(block_hash, height);
 
-               self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time));
+               self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, &self.logger));
 
                macro_rules! max_time {
                        ($timestamp: expr) => {
@@ -3883,7 +3959,7 @@ where
                self.do_chain_event(None, |channel| {
                        if let Some(funding_txo) = channel.get_funding_txo() {
                                if funding_txo.txid == *txid {
-                                       channel.funding_transaction_unconfirmed().map(|_| (None, Vec::new()))
+                                       channel.funding_transaction_unconfirmed(&self.logger).map(|_| (None, Vec::new()))
                                } else { Ok((None, Vec::new())) }
                        } else { Ok((None, Vec::new())) }
                });
@@ -3918,7 +3994,7 @@ where
                                let res = f(channel);
                                if let Ok((chan_res, mut timed_out_pending_htlcs)) = res {
                                        for (source, payment_hash) in timed_out_pending_htlcs.drain(..) {
-                                               let chan_update = self.get_channel_update(&channel).map(|u| u.encode_with_len()).unwrap(); // Cannot add/recv HTLCs before we have a short_id so unwrap is safe
+                                               let chan_update = self.get_channel_update_for_unicast(&channel).map(|u| u.encode_with_len()).unwrap(); // Cannot add/recv HTLCs before we have a short_id so unwrap is safe
                                                timed_out_htlcs.push((source, payment_hash,  HTLCFailReason::Reason {
                                                        failure_code: 0x1000 | 14, // expiry_too_soon, or at least it is now
                                                        data: chan_update,
@@ -3935,6 +4011,12 @@ where
                                                                node_id: channel.get_counterparty_node_id(),
                                                                msg: announcement_sigs,
                                                        });
+                                               } else if channel.is_usable() {
+                                                       log_trace!(self.logger, "Sending funding_locked WITHOUT announcement_signatures but with private channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id()));
+                                                       pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
+                                                               node_id: channel.get_counterparty_node_id(),
+                                                               msg: self.get_channel_update_for_unicast(channel).unwrap(),
+                                                       });
                                                } else {
                                                        log_trace!(self.logger, "Sending funding_locked WITHOUT announcement_signatures for {}", log_bytes!(channel.channel_id()));
                                                }
@@ -3947,7 +4029,7 @@ where
                                        // It looks like our counterparty went on-chain or funding transaction was
                                        // reorged out of the main chain. Close the channel.
                                        failed_channels.push(channel.force_shutdown(true));
-                                       if let Ok(update) = self.get_channel_update(&channel) {
+                                       if let Ok(update) = self.get_channel_update_for_broadcast(&channel) {
                                                pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                        msg: update
                                                });
@@ -4100,8 +4182,13 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
        }
 
        fn handle_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
-               let _ = handle_error!(self, self.internal_channel_update(counterparty_node_id, msg), *counterparty_node_id);
+               PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
+                       if let Ok(persist) = handle_error!(self, self.internal_channel_update(counterparty_node_id, msg), *counterparty_node_id) {
+                               persist
+                       } else {
+                               NotifyOption::SkipPersist
+                       }
+               });
        }
 
        fn handle_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) {
@@ -4126,7 +4213,7 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
                                                        short_to_id.remove(&short_id);
                                                }
                                                failed_channels.push(chan.force_shutdown(true));
-                                               if let Ok(update) = self.get_channel_update(&chan) {
+                                               if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
                                                        pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                                msg: update
                                                        });
@@ -4169,6 +4256,7 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
                                        &events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true,
                                        &events::MessageSendEvent::BroadcastNodeAnnouncement { .. } => true,
                                        &events::MessageSendEvent::BroadcastChannelUpdate { .. } => true,
+                                       &events::MessageSendEvent::SendChannelUpdate { ref node_id, .. } => node_id != counterparty_node_id,
                                        &events::MessageSendEvent::HandleError { ref node_id, .. } => node_id != counterparty_node_id,
                                        &events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => true,
                                        &events::MessageSendEvent::SendChannelRangeQuery { .. } => false,
@@ -4321,22 +4409,22 @@ const MIN_SERIALIZATION_VERSION: u8 = 1;
 
 impl_writeable_tlv_based_enum!(PendingHTLCRouting,
        (0, Forward) => {
-               (0, onion_packet),
-               (2, short_channel_id),
-       }, {}, {},
+               (0, onion_packet, required),
+               (2, short_channel_id, required),
+       },
        (1, Receive) => {
-               (0, payment_data),
-               (2, incoming_cltv_expiry),
-       }, {}, {}
+               (0, payment_data, required),
+               (2, incoming_cltv_expiry, required),
+       }
 ;);
 
 impl_writeable_tlv_based!(PendingHTLCInfo, {
-       (0, routing),
-       (2, incoming_shared_secret),
-       (4, payment_hash),
-       (6, amt_to_forward),
-       (8, outgoing_cltv_value)
-}, {}, {});
+       (0, routing, required),
+       (2, incoming_shared_secret, required),
+       (4, payment_hash, required),
+       (6, amt_to_forward, required),
+       (8, outgoing_cltv_value, required)
+});
 
 impl_writeable_tlv_based_enum!(HTLCFailureMsg, ;
        (0, Relay),
@@ -4348,60 +4436,58 @@ impl_writeable_tlv_based_enum!(PendingHTLCStatus, ;
 );
 
 impl_writeable_tlv_based!(HTLCPreviousHopData, {
-       (0, short_channel_id),
-       (2, outpoint),
-       (4, htlc_id),
-       (6, incoming_packet_shared_secret)
-}, {}, {});
+       (0, short_channel_id, required),
+       (2, outpoint, required),
+       (4, htlc_id, required),
+       (6, incoming_packet_shared_secret, required)
+});
 
 impl_writeable_tlv_based!(ClaimableHTLC, {
-       (0, prev_hop),
-       (2, value),
-       (4, payment_data),
-       (6, cltv_expiry),
-}, {}, {});
+       (0, prev_hop, required),
+       (2, value, required),
+       (4, payment_data, required),
+       (6, cltv_expiry, required),
+});
 
 impl_writeable_tlv_based_enum!(HTLCSource,
        (0, OutboundRoute) => {
-               (0, session_priv),
-               (2, first_hop_htlc_msat),
-       }, {}, {
-               (4, path),
-       };
+               (0, session_priv, required),
+               (2, first_hop_htlc_msat, required),
+               (4, path, vec_type),
+       }, ;
        (1, PreviousHopData)
 );
 
 impl_writeable_tlv_based_enum!(HTLCFailReason,
        (0, LightningError) => {
-               (0, err),
-       }, {}, {},
+               (0, err, required),
+       },
        (1, Reason) => {
-               (0, failure_code),
-       }, {}, {
-               (2, data),
+               (0, failure_code, required),
+               (2, data, vec_type),
        },
 ;);
 
 impl_writeable_tlv_based_enum!(HTLCForwardInfo,
        (0, AddHTLC) => {
-               (0, forward_info),
-               (2, prev_short_channel_id),
-               (4, prev_htlc_id),
-               (6, prev_funding_outpoint),
-       }, {}, {},
+               (0, forward_info, required),
+               (2, prev_short_channel_id, required),
+               (4, prev_htlc_id, required),
+               (6, prev_funding_outpoint, required),
+       },
        (1, FailHTLC) => {
-               (0, htlc_id),
-               (2, err_packet),
-       }, {}, {},
+               (0, htlc_id, required),
+               (2, err_packet, required),
+       },
 ;);
 
 impl_writeable_tlv_based!(PendingInboundPayment, {
-       (0, payment_secret),
-       (2, expiry_time),
-       (4, user_payment_id),
-       (6, payment_preimage),
-       (8, min_value_msat),
-}, {}, {});
+       (0, payment_secret, required),
+       (2, expiry_time, required),
+       (4, user_payment_id, required),
+       (6, payment_preimage, required),
+       (8, min_value_msat, required),
+});
 
 impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable for ChannelManager<Signer, M, T, K, F, L>
        where M::Target: chain::Watch<Signer>,
@@ -4496,7 +4582,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable f
                        session_priv.write(writer)?;
                }
 
-               write_tlv_fields!(writer, {}, {});
+               write_tlv_fields!(writer, {});
 
                Ok(())
        }
@@ -4643,6 +4729,13 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                                channel.get_cur_counterparty_commitment_transaction_number() < monitor.get_cur_counterparty_commitment_number() ||
                                                channel.get_latest_monitor_update_id() > monitor.get_latest_update_id() {
                                        // If the channel is ahead of the monitor, return InvalidValue:
+                                       log_error!(args.logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!");
+                                       log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
+                                               log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id());
+                                       log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
+                                       log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
+                                       log_error!(args.logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
+                                       log_error!(args.logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/rust-bitcoin/rust-lightning");
                                        return Err(DecodeError::InvalidValue);
                                } else if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() ||
                                                channel.get_revoked_counterparty_commitment_transaction_number() > monitor.get_min_seen_secret() ||
@@ -4659,6 +4752,11 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                        by_id.insert(channel.channel_id(), channel);
                                }
                        } else {
+                               log_error!(args.logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", log_bytes!(channel.channel_id()));
+                               log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
+                               log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
+                               log_error!(args.logger, " Without the ChannelMonitor we cannot continue without risking funds.");
+                               log_error!(args.logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/rust-bitcoin/rust-lightning");
                                return Err(DecodeError::InvalidValue);
                        }
                }
@@ -4741,7 +4839,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                        }
                }
 
-               read_tlv_fields!(reader, {}, {});
+               read_tlv_fields!(reader, {});
 
                let mut secp_ctx = Secp256k1::new();
                secp_ctx.seeded_randomize(&args.keys_manager.get_secure_random_bytes());
@@ -4801,6 +4899,9 @@ mod tests {
        use core::sync::atomic::{AtomicBool, Ordering};
        use std::thread;
        use core::time::Duration;
+       use ln::functional_test_utils::*;
+       use ln::features::InitFeatures;
+       use ln::msgs::ChannelMessageHandler;
 
        #[test]
        fn test_wait_timeout() {
@@ -4843,6 +4944,78 @@ mod tests {
                        }
                }
        }
+
+       #[test]
+       fn test_notify_limits() {
+               // Check that a few cases which don't require the persistence of a new ChannelManager,
+               // indeed, do not cause the persistence of a new ChannelManager.
+               let chanmon_cfgs = create_chanmon_cfgs(3);
+               let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+               let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+               let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+               let mut chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+
+               // We check that the channel info nodes have doesn't change too early, even though we try
+               // to connect messages with new values
+               chan.0.contents.fee_base_msat *= 2;
+               chan.1.contents.fee_base_msat *= 2;
+               let node_a_chan_info = nodes[0].node.list_channels()[0].clone();
+               let node_b_chan_info = nodes[1].node.list_channels()[0].clone();
+
+               // The first two nodes (which opened a channel) should now require fresh persistence
+               assert!(nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
+               assert!(nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
+               // ... but the last node should not.
+               assert!(!nodes[2].node.await_persistable_update_timeout(Duration::from_millis(1)));
+               // After persisting the first two nodes they should no longer need fresh persistence.
+               assert!(!nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
+               assert!(!nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
+
+               // Node 3, unrelated to the only channel, shouldn't care if it receives a channel_update
+               // about the channel.
+               nodes[2].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &chan.0);
+               nodes[2].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &chan.1);
+               assert!(!nodes[2].node.await_persistable_update_timeout(Duration::from_millis(1)));
+
+               // The nodes which are a party to the channel should also ignore messages from unrelated
+               // parties.
+               nodes[0].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.0);
+               nodes[0].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.1);
+               nodes[1].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.0);
+               nodes[1].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.1);
+               assert!(!nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
+               assert!(!nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
+
+               // At this point the channel info given by peers should still be the same.
+               assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
+               assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info);
+
+               // An earlier version of handle_channel_update didn't check the directionality of the
+               // update message and would always update the local fee info, even if our peer was
+               // (spuriously) forwarding us our own channel_update.
+               let as_node_one = nodes[0].node.get_our_node_id().serialize()[..] < nodes[1].node.get_our_node_id().serialize()[..];
+               let as_update = if as_node_one == (chan.0.contents.flags & 1 == 0 /* chan.0 is from node one */) { &chan.0 } else { &chan.1 };
+               let bs_update = if as_node_one == (chan.0.contents.flags & 1 == 0 /* chan.0 is from node one */) { &chan.1 } else { &chan.0 };
+
+               // First deliver each peers' own message, checking that the node doesn't need to be
+               // persisted and that its channel info remains the same.
+               nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &as_update);
+               nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &bs_update);
+               assert!(!nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
+               assert!(!nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
+               assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
+               assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info);
+
+               // Finally, deliver the other peers' message, ensuring each node needs to be persisted and
+               // the channel info has updated.
+               nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &bs_update);
+               nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &as_update);
+               assert!(nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
+               assert!(nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
+               assert_ne!(nodes[0].node.list_channels()[0], node_a_chan_info);
+               assert_ne!(nodes[1].node.list_channels()[0], node_b_chan_info);
+       }
 }
 
 #[cfg(all(any(test, feature = "_test_utils"), feature = "unstable"))]
@@ -4942,7 +5115,19 @@ pub mod bench {
                Listen::block_connected(&node_b, &block, 1);
 
                node_a.handle_funding_locked(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendFundingLocked, node_a.get_our_node_id()));
-               node_b.handle_funding_locked(&node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendFundingLocked, node_b.get_our_node_id()));
+               let msg_events = node_a.get_and_clear_pending_msg_events();
+               assert_eq!(msg_events.len(), 2);
+               match msg_events[0] {
+                       MessageSendEvent::SendFundingLocked { ref msg, .. } => {
+                               node_b.handle_funding_locked(&node_a.get_our_node_id(), msg);
+                               get_event_msg!(node_b_holder, MessageSendEvent::SendChannelUpdate, node_a.get_our_node_id());
+                       },
+                       _ => panic!(),
+               }
+               match msg_events[1] {
+                       MessageSendEvent::SendChannelUpdate { .. } => {},
+                       _ => panic!(),
+               }
 
                let dummy_graph = NetworkGraph::new(genesis_hash);