Merge pull request #954 from TheBlueMatt/2021-06-no-spurious-forward-fails
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Thu, 1 Jul 2021 03:28:30 +0000 (03:28 +0000)
committerGitHub <noreply@github.com>
Thu, 1 Jul 2021 03:28:30 +0000 (03:28 +0000)
Consider channels "live" even if they are awaiting a monitor update

1  2 
lightning/src/ln/chanmon_update_fail_tests.rs
lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs

index fdcf7c2de70ee71edb5bd8e3d2ff4dfdafa67606,73fc531562b196f747dac9c755f6a575dfd80fb8..dd7b1906ca079aced33d3a62384fc1772d02b49c
@@@ -41,6 -41,7 +41,6 @@@ use ln::functional_test_utils::*
  use util::test_utils;
  
  use prelude::*;
 -use std::collections::HashMap;
  use std::sync::{Arc, Mutex};
  
  // If persister_fail is true, we have the persister return a PermanentFailure
@@@ -905,8 -906,8 +905,8 @@@ fn do_test_monitor_update_fail_raa(test
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        check_added_monitors!(nodes[1], 1);
  
-       // Attempt to forward a third payment but fail due to the second channel being unavailable
-       // for forwarding.
+       // Forward a third payment which will also be added to the holding cell, despite the channel
+       // being paused waiting a monitor update.
        let (_, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[2]);
        {
                let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
        commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true);
        check_added_monitors!(nodes[1], 0);
  
-       let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
-       assert_eq!(events_2.len(), 1);
-       match events_2.remove(0) {
-               MessageSendEvent::UpdateHTLCs { node_id, updates } => {
-                       assert_eq!(node_id, nodes[0].node.get_our_node_id());
-                       assert!(updates.update_fulfill_htlcs.is_empty());
-                       assert_eq!(updates.update_fail_htlcs.len(), 1);
-                       assert!(updates.update_fail_malformed_htlcs.is_empty());
-                       assert!(updates.update_add_htlcs.is_empty());
-                       assert!(updates.update_fee.is_none());
-                       nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
-                       commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false, true);
-                       let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
-                       assert_eq!(msg_events.len(), 1);
-                       match msg_events[0] {
-                               MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg }} => {
-                                       assert_eq!(msg.contents.short_channel_id, chan_2.0.contents.short_channel_id);
-                                       assert_eq!(msg.contents.flags & 2, 2); // temp disabled
-                               },
-                               _ => panic!("Unexpected event"),
-                       }
-                       let events = nodes[0].node.get_and_clear_pending_events();
-                       assert_eq!(events.len(), 1);
-                       if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events[0] {
-                               assert_eq!(payment_hash, payment_hash_3);
-                               assert!(!rejected_by_dest);
-                       } else { panic!("Unexpected event!"); }
-               },
-               _ => panic!("Unexpected event type!"),
-       };
+       // Call forward_pending_htlcs and check that the new HTLC was simply added to the holding cell
+       // and not forwarded.
+       expect_pending_htlcs_forwardable!(nodes[1]);
+       check_added_monitors!(nodes[1], 0);
+       assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
  
        let (payment_preimage_4, payment_hash_4) = if test_ignore_second_cs {
                // Try to route another payment backwards from 2 to make sure 1 holds off on responding
                assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
                nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
                assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
-               assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
                (Some(payment_preimage_4), Some(payment_hash_4))
        } else { (None, None) };
  
  
        nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &messages_a.0);
        commitment_signed_dance!(nodes[0], nodes[1], messages_a.1, false);
-       let events_4 = nodes[0].node.get_and_clear_pending_events();
-       assert_eq!(events_4.len(), 1);
-       if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events_4[0] {
-               assert_eq!(payment_hash, payment_hash_1);
-               assert!(rejected_by_dest);
-       } else { panic!("Unexpected event!"); }
+       expect_payment_failed!(nodes[0], payment_hash_1, true);
  
        nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_b.msgs[0]);
+       let as_cs;
        if test_ignore_second_cs {
                nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg);
                check_added_monitors!(nodes[2], 1);
  
                nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
                check_added_monitors!(nodes[1], 1);
-               let as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
-               assert!(as_cs.update_add_htlcs.is_empty());
-               assert!(as_cs.update_fail_htlcs.is_empty());
-               assert!(as_cs.update_fail_malformed_htlcs.is_empty());
-               assert!(as_cs.update_fulfill_htlcs.is_empty());
-               assert!(as_cs.update_fee.is_none());
+               as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
  
                nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_cs.commitment_signed);
                check_added_monitors!(nodes[1], 1);
-               let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
-               nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_cs.commitment_signed);
+       } else {
+               nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg);
                check_added_monitors!(nodes[2], 1);
-               let bs_second_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
  
-               nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
-               check_added_monitors!(nodes[2], 1);
-               assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty());
+               let bs_revoke_and_commit = nodes[2].node.get_and_clear_pending_msg_events();
+               assert_eq!(bs_revoke_and_commit.len(), 2);
+               match bs_revoke_and_commit[0] {
+                       MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
+                               assert_eq!(*node_id, nodes[1].node.get_our_node_id());
+                               nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &msg);
+                               check_added_monitors!(nodes[1], 1);
+                       },
+                       _ => panic!("Unexpected event"),
+               }
  
-               nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_second_raa);
-               check_added_monitors!(nodes[1], 1);
-               assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
-       } else {
-               commitment_signed_dance!(nodes[2], nodes[1], send_event_b.commitment_msg, false);
+               as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
+               match bs_revoke_and_commit[1] {
+                       MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
+                               assert_eq!(*node_id, nodes[1].node.get_our_node_id());
+                               assert!(updates.update_add_htlcs.is_empty());
+                               assert!(updates.update_fail_htlcs.is_empty());
+                               assert!(updates.update_fail_malformed_htlcs.is_empty());
+                               assert!(updates.update_fulfill_htlcs.is_empty());
+                               assert!(updates.update_fee.is_none());
+                               nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed);
+                               check_added_monitors!(nodes[1], 1);
+                       },
+                       _ => panic!("Unexpected event"),
+               }
        }
  
+       assert_eq!(as_cs.update_add_htlcs.len(), 1);
+       assert!(as_cs.update_fail_htlcs.is_empty());
+       assert!(as_cs.update_fail_malformed_htlcs.is_empty());
+       assert!(as_cs.update_fulfill_htlcs.is_empty());
+       assert!(as_cs.update_fee.is_none());
+       let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
+       nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &as_cs.update_add_htlcs[0]);
+       nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_cs.commitment_signed);
+       check_added_monitors!(nodes[2], 1);
+       let bs_second_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+       nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
+       check_added_monitors!(nodes[2], 1);
+       let bs_second_cs = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
+       nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_second_raa);
+       check_added_monitors!(nodes[1], 1);
+       assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+       nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_second_cs.commitment_signed);
+       check_added_monitors!(nodes[1], 1);
+       let as_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
+       nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_second_raa);
+       check_added_monitors!(nodes[2], 1);
+       assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty());
        expect_pending_htlcs_forwardable!(nodes[2]);
  
        let events_6 = nodes[2].node.get_and_clear_pending_events();
-       assert_eq!(events_6.len(), 1);
+       assert_eq!(events_6.len(), 2);
        match events_6[0] {
                Event::PaymentReceived { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_2); },
                _ => panic!("Unexpected event"),
        };
+       match events_6[1] {
+               Event::PaymentReceived { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_3); },
+               _ => panic!("Unexpected event"),
+       };
  
        if test_ignore_second_cs {
                expect_pending_htlcs_forwardable!(nodes[1]);
@@@ -1611,9 -1622,9 +1621,9 @@@ fn first_message_on_recv_ordering() 
  fn test_monitor_update_fail_claim() {
        // Basic test for monitor update failures when processing claim_funds calls.
        // We set up a simple 3-node network, sending a payment from A to B and failing B's monitor
-       // update to claim the payment. We then send a payment C->B->A, making the forward of this
-       // payment from B to A fail due to the paused channel. Finally, we restore the channel monitor
-       // updating and claim the payment on B.
+       // update to claim the payment. We then send two payments C->B->A, which are held at B.
+       // Finally, we restore the channel monitor updating and claim the payment on B, forwarding
+       // the payments from C onwards to A.
        let chanmon_cfgs = create_chanmon_cfgs(3);
        let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
  
        *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure));
        assert!(nodes[1].node.claim_funds(payment_preimage_1));
+       nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1);
        check_added_monitors!(nodes[1], 1);
  
+       // Note that at this point there is a pending commitment transaction update for A being held by
+       // B. Even when we go to send the payment from C through B to A, B will not update this
+       // already-signed commitment transaction and will instead wait for it to resolve before
+       // forwarding the payment onwards.
        let (_, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[0]);
+       let route;
        {
                let net_graph_msg_handler = &nodes[2].net_graph_msg_handler;
-               let route = get_route(&nodes[2].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
+               route = get_route(&nodes[2].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1_000_000, TEST_FINAL_CLTV, &logger).unwrap();
                nodes[2].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap();
                check_added_monitors!(nodes[2], 1);
        }
        nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
        let events = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 0);
-       nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1);
        commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true);
  
-       let bs_fail_update = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
-       nodes[2].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[0]);
-       commitment_signed_dance!(nodes[2], nodes[1], bs_fail_update.commitment_signed, false, true);
-       let msg_events = nodes[2].node.get_and_clear_pending_msg_events();
-       assert_eq!(msg_events.len(), 1);
-       match msg_events[0] {
-               MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg }} => {
-                       assert_eq!(msg.contents.short_channel_id, chan_1.0.contents.short_channel_id);
-                       assert_eq!(msg.contents.flags & 2, 2); // temp disabled
-               },
-               _ => panic!("Unexpected event"),
-       }
+       let (_, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[0]);
+       nodes[2].node.send_payment(&route, payment_hash_3, &Some(payment_secret_3)).unwrap();
+       check_added_monitors!(nodes[2], 1);
  
-       let events = nodes[2].node.get_and_clear_pending_events();
+       let mut events = nodes[2].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 1);
-       if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events[0] {
-               assert_eq!(payment_hash, payment_hash_2);
-               assert!(!rejected_by_dest);
-       } else { panic!("Unexpected event!"); }
+       let payment_event = SendEvent::from_event(events.pop().unwrap());
+       nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
+       let events = nodes[1].node.get_and_clear_pending_msg_events();
+       assert_eq!(events.len(), 0);
+       commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true);
  
        // Now restore monitor updating on the 0<->1 channel and claim the funds on B.
        let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
        let bs_fulfill_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_fulfill_update.update_fulfill_htlcs[0]);
        commitment_signed_dance!(nodes[0], nodes[1], bs_fulfill_update.commitment_signed, false);
+       expect_payment_sent!(nodes[0], payment_preimage_1);
+       // Get the payment forwards, note that they were batched into one commitment update.
+       expect_pending_htlcs_forwardable!(nodes[1]);
+       check_added_monitors!(nodes[1], 1);
+       let bs_forward_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[0]);
+       nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[1]);
+       commitment_signed_dance!(nodes[0], nodes[1], bs_forward_update.commitment_signed, false);
+       expect_pending_htlcs_forwardable!(nodes[0]);
  
        let events = nodes[0].node.get_and_clear_pending_events();
-       assert_eq!(events.len(), 1);
-       if let Event::PaymentSent { payment_preimage, .. } = events[0] {
-               assert_eq!(payment_preimage, payment_preimage_1);
-       } else { panic!("Unexpected event!"); }
+       assert_eq!(events.len(), 2);
+       match events[0] {
+               Event::PaymentReceived { ref payment_hash, ref payment_preimage, ref payment_secret, amt, user_payment_id: _ } => {
+                       assert_eq!(payment_hash_2, *payment_hash);
+                       assert!(payment_preimage.is_none());
+                       assert_eq!(payment_secret_2, *payment_secret);
+                       assert_eq!(1_000_000, amt);
+               },
+               _ => panic!("Unexpected event"),
+       }
+       match events[1] {
+               Event::PaymentReceived { ref payment_hash, ref payment_preimage, ref payment_secret, amt, user_payment_id: _ } => {
+                       assert_eq!(payment_hash_3, *payment_hash);
+                       assert!(payment_preimage.is_none());
+                       assert_eq!(payment_secret_3, *payment_secret);
+                       assert_eq!(1_000_000, amt);
+               },
+               _ => panic!("Unexpected event"),
+       }
  }
  
  #[test]
index 2f447c86155ffc78853f1516b2e44e120adb8b0f,2672511115ef6fd9ae159e1572fa5e953d9a1308..1d3e264a3fcbf263eec75e3b85e8d0b65c81e024
@@@ -288,7 -288,7 +288,7 @@@ impl HTLCCandidate 
  }
  
  /// Information needed for constructing an invoice route hint for this channel.
 -#[derive(Clone)]
 +#[derive(Clone, Debug, PartialEq)]
  pub struct CounterpartyForwardingInfo {
        /// Base routing fee in millisatoshis.
        pub fee_base_msat: u32,
@@@ -434,15 -434,6 +434,15 @@@ pub(super) struct Channel<Signer: Sign
        next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
        #[cfg(any(test, feature = "fuzztarget"))]
        next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
 +
 +      /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
 +      /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
 +      /// funding_locked *before* sending the channel_reestablish (which is clearly a violation of
 +      /// the BOLT specs). We copy c-lightning's workaround here and simply store the funding_locked
 +      /// message until we receive a channel_reestablish.
 +      ///
 +      /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
 +      pub workaround_lnd_bug_4006: Option<msgs::FundingLocked>,
  }
  
  #[cfg(any(test, feature = "fuzztarget"))]
@@@ -642,8 -633,6 +642,8 @@@ impl<Signer: Sign> Channel<Signer> 
                        next_local_commitment_tx_fee_info_cached: Mutex::new(None),
                        #[cfg(any(test, feature = "fuzztarget"))]
                        next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
 +
 +                      workaround_lnd_bug_4006: None,
                })
        }
  
                        next_local_commitment_tx_fee_info_cached: Mutex::new(None),
                        #[cfg(any(test, feature = "fuzztarget"))]
                        next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
 +
 +                      workaround_lnd_bug_4006: None,
                };
  
                Ok(chan)
                let mut local_htlc_total_msat = 0;
                let mut value_to_self_msat_offset = 0;
  
 -              log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for {}, generated by {} with fee {}...", commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number), get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()), if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
 +              log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
 +                      commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
 +                      get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
 +                      log_bytes!(self.channel_id), if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
  
                macro_rules! get_htlc_in_commitment {
                        ($htlc: expr, $offered: expr) => {
                                        _ => {}
                                }
                        }
 -                      log_trace!(logger, "Adding HTLC claim to holding_cell! Current state: {}", self.channel_state);
 +                      log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", log_bytes!(self.channel_id()), self.channel_state);
                        self.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
                                payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
                        });
                                debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
                                return Ok((None, Some(monitor_update)));
                        }
 -                      log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill!", log_bytes!(htlc.payment_hash.0));
 +                      log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id));
                        htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
                }
  
                                        _ => {}
                                }
                        }
 -                      log_trace!(logger, "Placing failure for HTLC ID {} in holding cell", htlc_id_arg);
 +                      log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, log_bytes!(self.channel_id()));
                        self.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC {
                                htlc_id: htlc_id_arg,
                                err_packet,
                        return Ok(None);
                }
  
 -              log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message", htlc_id_arg);
 +              log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, log_bytes!(self.channel_id()));
                {
                        let htlc = &mut self.pending_inbound_htlcs[pending_idx];
                        htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone()));
                        let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
                        let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.channel_value_satoshis);
                        // They sign the holder commitment transaction...
 -                      log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {}", log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]), encode::serialize_hex(&funding_script));
 +                      log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
 +                              log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.counterparty_funding_pubkey().serialize()),
 +                              encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
 +                              encode::serialize_hex(&funding_script), log_bytes!(self.channel_id()));
                        secp_check!(self.secp_ctx.verify(&sighash, &sig, self.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
                }
  
  
                let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
                let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
 -              log_trace!(logger, "Initial counterparty ID {} tx {}", counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
 +              log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
 +                      log_bytes!(self.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
  
                let counterparty_signature = self.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, &self.secp_ctx)
                                .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0;
                self.cur_counterparty_commitment_transaction_number -= 1;
                self.cur_holder_commitment_transaction_number -= 1;
  
 +              log_info!(logger, "Generated funding_signed for peer for channel {}", log_bytes!(self.channel_id()));
 +
                Ok((msgs::FundingSigned {
                        channel_id: self.channel_id,
                        signature
                let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
                let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
  
 -              log_trace!(logger, "Initial counterparty ID {} tx {}", counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
 +              log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
 +                      log_bytes!(self.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
  
                let holder_signer = self.build_holder_transaction_keys(self.cur_holder_commitment_transaction_number)?;
                let initial_commitment_tx = self.build_commitment_transaction(self.cur_holder_commitment_transaction_number, &holder_signer, true, false, self.feerate_per_kw, logger).0;
                self.cur_holder_commitment_transaction_number -= 1;
                self.cur_counterparty_commitment_transaction_number -= 1;
  
 +              log_info!(logger, "Received funding_signed from peer for channel {}", log_bytes!(self.channel_id()));
 +
                Ok((channel_monitor, self.funding_transaction.as_ref().cloned().unwrap()))
        }
  
 -      pub fn funding_locked(&mut self, msg: &msgs::FundingLocked) -> Result<(), ChannelError> {
 +      pub fn funding_locked<L: Deref>(&mut self, msg: &msgs::FundingLocked, logger: &L) -> Result<(), ChannelError> where L::Target: Logger {
                if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
 -                      return Err(ChannelError::Close("Peer sent funding_locked when we needed a channel_reestablish".to_owned()));
 +                      self.workaround_lnd_bug_4006 = Some(msg.clone());
 +                      return Err(ChannelError::Ignore("Peer sent funding_locked when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
                }
  
                let non_shutdown_state = self.channel_state & (!MULTI_STATE_FLAGS);
  
                self.counterparty_prev_commitment_point = self.counterparty_cur_commitment_point;
                self.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
 +
 +              log_info!(logger, "Received funding_locked from peer for channel {}", log_bytes!(self.channel_id()));
 +
                Ok(())
        }
  
                        if pending_remote_value_msat - msg.amount_msat - chan_reserve_msat < remote_fee_cost_incl_stuck_buffer_msat {
                                // Note that if the pending_forward_status is not updated here, then it's because we're already failing
                                // the HTLC, i.e. its status is already set to failing.
 -                              log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation");
 +                              log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", log_bytes!(self.channel_id()));
                                pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
                        }
                } else {
                                let bitcoin_tx = trusted_tx.built_transaction();
                                let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.channel_value_satoshis);
  
 -                              log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {}", log_bytes!(msg.signature.serialize_compact()[..]), log_bytes!(self.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction), log_bytes!(sighash[..]), encode::serialize_hex(&funding_script));
 +                              log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
 +                                      log_bytes!(msg.signature.serialize_compact()[..]),
 +                                      log_bytes!(self.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
 +                                      log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), log_bytes!(self.channel_id()));
                                if let Err(_) = self.secp_ctx.verify(&sighash, &msg.signature, &self.counterparty_funding_pubkey()) {
                                        return Err((None, ChannelError::Close("Invalid commitment tx signature from peer".to_owned())));
                                }
                                let htlc_tx = self.build_htlc_transaction(&commitment_txid, &htlc, true, &keys, feerate_per_kw);
                                let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &keys);
                                let htlc_sighash = hash_to_message!(&bip143::SigHashCache::new(&htlc_tx).signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, SigHashType::All)[..]);
 -                              log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {}", log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.serialize()), encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript));
 +                              log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
 +                                      log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.serialize()),
 +                                      encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), log_bytes!(self.channel_id()));
                                if let Err(_) = self.secp_ctx.verify(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key) {
                                        return Err((None, ChannelError::Close("Invalid HTLC tx signature from peer".to_owned())));
                                }
                                Some(forward_info.clone())
                        } else { None };
                        if let Some(forward_info) = new_forward {
 +                              log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
 +                                      log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id));
                                htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
                                need_commitment = true;
                        }
                        if let Some(fail_reason) = if let &mut OutboundHTLCState::RemoteRemoved(ref mut fail_reason) = &mut htlc.state {
                                Some(fail_reason.take())
                        } else { None } {
 +                              log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
 +                                      log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id));
                                htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(fail_reason);
                                need_commitment = true;
                        }
                                self.latest_monitor_update_id = monitor_update.update_id;
                                monitor_update.updates.append(&mut additional_update.updates);
                        }
 +                      log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
 +                              log_bytes!(self.channel_id));
                        // TODO: Call maybe_propose_first_closing_signed on restoration (or call it here and
                        // re-send the message on restoration)
                        return Err((Some(monitor_update), ChannelError::Ignore("Previous monitor update failure prevented generation of RAA".to_owned())));
                        (None, self.maybe_propose_first_closing_signed(fee_estimator))
                } else { (None, None) };
  
 +              log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
 +                      log_bytes!(self.channel_id()), if commitment_signed.is_some() { " our own commitment_signed and" } else { "" });
 +
                Ok((msgs::RevokeAndACK {
                        channel_id: self.channel_id,
                        per_commitment_secret,
        fn free_holding_cell_htlcs<L: Deref>(&mut self, logger: &L) -> Result<(Option<(msgs::CommitmentUpdate, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>), ChannelError> where L::Target: Logger {
                assert_eq!(self.channel_state & ChannelState::MonitorUpdateFailed as u32, 0);
                if self.holding_cell_htlc_updates.len() != 0 || self.holding_cell_update_fee.is_some() {
 -                      log_trace!(logger, "Freeing holding cell with {} HTLC updates{}", self.holding_cell_htlc_updates.len(), if self.holding_cell_update_fee.is_some() { " and a fee update" } else { "" });
 +                      log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.holding_cell_htlc_updates.len(),
 +                              if self.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, log_bytes!(self.channel_id()));
  
                        let mut monitor_update = ChannelMonitorUpdate {
                                update_id: self.latest_monitor_update_id + 1, // We don't increment this yet!
                                                        Err(e) => {
                                                                match e {
                                                                        ChannelError::Ignore(ref msg) => {
 -                                                                              log_info!(logger, "Failed to send HTLC with payment_hash {} due to {}", log_bytes!(payment_hash.0), msg);
 +                                                                              log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}",
 +                                                                                      log_bytes!(payment_hash.0), msg, log_bytes!(self.channel_id()));
                                                                                // If we fail to send here, then this HTLC should
                                                                                // be failed backwards. Failing to send here
                                                                                // indicates that this HTLC may keep being put back
                        self.latest_monitor_update_id = monitor_update.update_id;
                        monitor_update.updates.append(&mut additional_update.updates);
  
 +                      log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
 +                              log_bytes!(self.channel_id()), if update_fee.is_some() { "a fee update, " } else { "" },
 +                              update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len());
 +
                        Ok((Some((msgs::CommitmentUpdate {
                                update_add_htlcs,
                                update_fulfill_htlcs,
                self.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
                self.cur_counterparty_commitment_transaction_number -= 1;
  
 -              log_trace!(logger, "Updating HTLCs on receipt of RAA...");
 +              log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", log_bytes!(self.channel_id()));
                let mut to_forward_infos = Vec::new();
                let mut revoked_htlcs = Vec::new();
                let mut update_fail_htlcs = Vec::new();
                        });
                        for htlc in pending_inbound_htlcs.iter_mut() {
                                let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
 -                                      log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to Committed", log_bytes!(htlc.payment_hash.0));
                                        true
                                } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
 -                                      log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", log_bytes!(htlc.payment_hash.0));
                                        true
                                } else { false };
                                if swap {
                                        mem::swap(&mut state, &mut htlc.state);
  
                                        if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
 +                                              log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", log_bytes!(htlc.payment_hash.0));
                                                htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
                                                require_commitment = true;
                                        } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
                                                match forward_info {
                                                        PendingHTLCStatus::Fail(fail_msg) => {
 +                                                              log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", log_bytes!(htlc.payment_hash.0));
                                                                require_commitment = true;
                                                                match fail_msg {
                                                                        HTLCFailureMsg::Relay(msg) => {
                                                                }
                                                        },
                                                        PendingHTLCStatus::Forward(forward_info) => {
 +                                                              log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", log_bytes!(htlc.payment_hash.0));
                                                                to_forward_infos.push((forward_info, htlc.htlc_id));
                                                                htlc.state = InboundHTLCState::Committed;
                                                        }
                        }
                        self.monitor_pending_forwards.append(&mut to_forward_infos);
                        self.monitor_pending_failures.append(&mut revoked_htlcs);
 +                      log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", log_bytes!(self.channel_id()));
                        return Ok((None, Vec::new(), Vec::new(), None, monitor_update, Vec::new()))
                }
  
                                        self.latest_monitor_update_id = monitor_update.update_id;
                                        monitor_update.updates.append(&mut additional_update.updates);
  
 +                                      log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed.",
 +                                              log_bytes!(self.channel_id()), update_fail_htlcs.len() + update_fail_malformed_htlcs.len());
                                        Ok((Some(msgs::CommitmentUpdate {
                                                update_add_htlcs: Vec::new(),
                                                update_fulfill_htlcs: Vec::new(),
                                                commitment_signed
                                        }), to_forward_infos, revoked_htlcs, None, monitor_update, htlcs_to_fail))
                                } else {
 +                                      log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary.", log_bytes!(self.channel_id()));
                                        Ok((None, to_forward_infos, revoked_htlcs, self.maybe_propose_first_closing_signed(fee_estimator), monitor_update, htlcs_to_fail))
                                }
                        }
                }
  
                self.channel_state |= ChannelState::PeerDisconnected as u32;
 -              log_debug!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, log_bytes!(self.channel_id()));
 +              log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, log_bytes!(self.channel_id()));
        }
  
        /// Indicates that a ChannelMonitor update failed to be stored by the client and further
                self.monitor_pending_revoke_and_ack = false;
                self.monitor_pending_commitment_signed = false;
                let order = self.resend_order.clone();
 -              log_trace!(logger, "Restored monitor updating resulting in {}{} commitment update and {} RAA, with {} first",
 -                      if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
 -                      if commitment_update.is_some() { "a" } else { "no" },
 -                      if raa.is_some() { "an" } else { "no" },
 +              log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
 +                      log_bytes!(self.channel_id()), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
 +                      if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
                        match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
                (raa, commitment_update, order, forwards, failures, funding_broadcastable, funding_locked)
        }
                        }
                }
  
 -              log_trace!(logger, "Regenerated latest commitment update with {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
 -                              update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
 +              log_trace!(logger, "Regenerated latest commitment update in channel {} with {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
 +                              log_bytes!(self.channel_id()), update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
                msgs::CommitmentUpdate {
                        update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs,
                        update_fee: None,
        /// is_usable() and considers things like the channel being temporarily disabled.
        /// Allowed in any state (including after shutdown)
        pub fn is_live(&self) -> bool {
-               self.is_usable() && (self.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateFailed as u32) == 0)
+               self.is_usable() && (self.channel_state & (ChannelState::PeerDisconnected as u32) == 0)
        }
  
        /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
                                        // send it immediately instead of waiting for a best_block_updated call (which
                                        // may have already happened for this block).
                                        if let Some(funding_locked) = self.check_get_funding_locked(height) {
 +                                              log_info!(logger, "Sending a funding_locked to our peer for channel {}", log_bytes!(self.channel_id));
                                                return Ok(Some(funding_locked));
                                        }
                                }
                                for inp in tx.input.iter() {
                                        if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
 -                                              log_trace!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, log_bytes!(self.channel_id()));
 +                                              log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, log_bytes!(self.channel_id()));
                                                return Err(msgs::ErrorMessage {
                                                        channel_id: self.channel_id(),
                                                        data: "Commitment or closing transaction was confirmed on chain.".to_owned()
        ///
        /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
        /// back.
 -      pub fn best_block_updated(&mut self, height: u32, highest_header_time: u32) -> Result<(Option<msgs::FundingLocked>, Vec<(HTLCSource, PaymentHash)>), msgs::ErrorMessage> {
 +      pub fn best_block_updated<L: Deref>(&mut self, height: u32, highest_header_time: u32, logger: &L)
 +                      -> Result<(Option<msgs::FundingLocked>, Vec<(HTLCSource, PaymentHash)>), msgs::ErrorMessage> where L::Target: Logger {
                let mut timed_out_htlcs = Vec::new();
                let unforwarded_htlc_cltv_limit = height + HTLC_FAIL_BACK_BUFFER;
                self.holding_cell_htlc_updates.retain(|htlc_update| {
                self.update_time_counter = cmp::max(self.update_time_counter, highest_header_time);
  
                if let Some(funding_locked) = self.check_get_funding_locked(height) {
 +                      log_info!(logger, "Sending a funding_locked to our peer for channel {}", log_bytes!(self.channel_id));
                        return Ok((Some(funding_locked), timed_out_htlcs));
                }
  
        /// Indicates the funding transaction is no longer confirmed in the main chain. This may
        /// force-close the channel, but may also indicate a harmless reorganization of a block or two
        /// before the channel has reached funding_locked and we can just wait for more blocks.
 -      pub fn funding_transaction_unconfirmed(&mut self) -> Result<(), msgs::ErrorMessage> {
 +      pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), msgs::ErrorMessage> where L::Target: Logger {
                if self.funding_tx_confirmation_height != 0 {
                        // We handle the funding disconnection by calling best_block_updated with a height one
                        // below where our funding was connected, implying a reorg back to conf_height - 1.
                        // larger. If we don't know that time has moved forward, we can just set it to the last
                        // time we saw and it will be ignored.
                        let best_time = self.update_time_counter;
 -                      match self.best_block_updated(reorg_height, best_time) {
 +                      match self.best_block_updated(reorg_height, best_time, logger) {
                                Ok((funding_locked, timed_out_htlcs)) => {
                                        assert!(funding_locked.is_none(), "We can't generate a funding with 0 confirmations?");
                                        assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
                let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
                let data_loss_protect = if self.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
                        let remote_last_secret = self.commitment_secrets.get_secret(self.cur_counterparty_commitment_transaction_number + 2).unwrap();
 -                      log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {}", log_bytes!(remote_last_secret));
 +                      log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), log_bytes!(self.channel_id()));
                        OptionalField::Present(DataLossProtect {
                                your_last_per_commitment_secret: remote_last_secret,
                                my_current_per_commitment_point: dummy_pubkey
                        })
                } else {
 -                      log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret");
 +                      log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", log_bytes!(self.channel_id()));
                        OptionalField::Present(DataLossProtect {
                                your_last_per_commitment_secret: [0;32],
                                my_current_per_commitment_point: dummy_pubkey,
  
        /// Adds a pending outbound HTLC to this channel, note that you probably want
        /// send_htlc_and_commit instead cause you'll want both messages at once.
-       /// This returns an option instead of a pure UpdateAddHTLC as we may be in a state where we are
-       /// waiting on the remote peer to send us a revoke_and_ack during which time we cannot add new
-       /// HTLCs on the wire or we wouldn't be able to determine what they actually ACK'ed.
-       /// You MUST call send_commitment prior to any other calls on this Channel
+       ///
+       /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
+       /// the wire:
+       /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
+       ///   wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
+       ///   awaiting ACK.
+       /// * In cases where we're marked MonitorUpdateFailed, we cannot commit to a new state as we
+       ///   may not yet have sent the previous commitment update messages and will need to regenerate
+       ///   them.
+       ///
+       /// You MUST call send_commitment prior to calling any other methods on this Channel!
+       ///
        /// If an Err is returned, it's a ChannelError::Ignore!
        pub fn send_htlc(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError> {
                if (self.channel_state & (ChannelState::ChannelFunded as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelFunded as u32) {
                        return Err(ChannelError::Ignore(format!("Cannot send less than their minimum HTLC value ({})", self.counterparty_htlc_minimum_msat)));
                }
  
-               if (self.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateFailed as u32)) != 0 {
+               if (self.channel_state & (ChannelState::PeerDisconnected as u32)) != 0 {
                        // Note that this should never really happen, if we're !is_live() on receipt of an
                        // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
                        // the user to send directly into a !is_live() channel. However, if we
                        // disconnected during the time the previous hop was doing the commitment dance we may
                        // end up getting here after the forwarding delay. In any case, returning an
                        // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
-                       return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected/frozen for channel monitor update".to_owned()));
+                       return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
                }
  
                let (outbound_htlc_count, htlc_outbound_value_msat) = self.get_outbound_pending_htlc_stats();
                }
  
                // Now update local state:
-               if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == (ChannelState::AwaitingRemoteRevoke as u32) {
+               if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateFailed as u32)) != 0 {
                        self.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
                                amount_msat,
                                payment_hash,
                        signature = res.0;
                        htlc_signatures = res.1;
  
 -                      log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {}",
 +                      log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
                                encode::serialize_hex(&counterparty_commitment_tx.0.trust().built_transaction().transaction),
 -                              &counterparty_commitment_txid,
 -                              encode::serialize_hex(&self.get_funding_redeemscript()),
 -                              log_bytes!(signature.serialize_compact()[..]));
 +                              &counterparty_commitment_txid, encode::serialize_hex(&self.get_funding_redeemscript()),
 +                              log_bytes!(signature.serialize_compact()[..]), log_bytes!(self.channel_id()));
  
                        for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
 -                              log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {}",
 +                              log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
                                        encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, feerate_per_kw, self.get_holder_selected_contest_delay(), htlc, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
                                        encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &counterparty_keys)),
                                        log_bytes!(counterparty_keys.broadcaster_htlc_key.serialize()),
 -                                      log_bytes!(htlc_sig.serialize_compact()[..]));
 +                                      log_bytes!(htlc_sig.serialize_compact()[..]), log_bytes!(self.channel_id()));
                        }
                }
  
@@@ -4661,7 -4614,7 +4669,7 @@@ impl<Signer: Sign> Writeable for Channe
  
                self.channel_update_status.write(writer)?;
  
 -              write_tlv_fields!(writer, {}, {(0, self.announcement_sigs)});
 +              write_tlv_fields!(writer, {(0, self.announcement_sigs, option)});
  
                Ok(())
        }
@@@ -4834,7 -4787,7 +4842,7 @@@ impl<'a, Signer: Sign, K: Deref> Readab
                let channel_update_status = Readable::read(reader)?;
  
                let mut announcement_sigs = None;
 -              read_tlv_fields!(reader, {}, {(0, announcement_sigs)});
 +              read_tlv_fields!(reader, {(0, announcement_sigs, option)});
  
                let mut secp_ctx = Secp256k1::new();
                secp_ctx.seeded_randomize(&keys_source.get_secure_random_bytes());
                        next_local_commitment_tx_fee_info_cached: Mutex::new(None),
                        #[cfg(any(test, feature = "fuzztarget"))]
                        next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
 +
 +                      workaround_lnd_bug_4006: None,
                })
        }
  }
index e536d462c6eb5db3ba6a9f3dbfce3de1c78e46d6,292fa26733bcea467d69defe85c5e4e47b7f2804..267f97cfa1521232be709a73bbfe361cbd7fa0a3
@@@ -64,6 -64,7 +64,6 @@@ use util::errors::APIError
  use prelude::*;
  use core::{cmp, mem};
  use core::cell::RefCell;
 -use std::collections::{HashMap, hash_map, HashSet};
  use std::io::{Cursor, Read};
  use std::sync::{Arc, Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard};
  use core::sync::atomic::{AtomicUsize, Ordering};
@@@ -632,7 -633,7 +632,7 @@@ const CHECK_CLTV_EXPIRY_SANITY: u32 = M
  const CHECK_CLTV_EXPIRY_SANITY_2: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER;
  
  /// Details of a channel, as returned by ChannelManager::list_channels and ChannelManager::list_usable_channels
 -#[derive(Clone)]
 +#[derive(Clone, Debug, PartialEq)]
  pub struct ChannelDetails {
        /// The channel's ID (prior to funding transaction generation, this is a random 32 bytes,
        /// thereafter this is the txid of the funding transaction xor the funding transaction output).
        /// point after the funding transaction received enough confirmations).
        pub is_funding_locked: bool,
        /// True if the channel is (a) confirmed and funding_locked messages have been exchanged, (b)
-       /// the peer is connected, (c) no monitor update failure is pending resolution, and (d) the
-       /// channel is not currently negotiating a shutdown.
+       /// the peer is connected, and (c) the channel is not currently negotiating a shutdown.
        ///
        /// This is a strict superset of `is_funding_locked`.
        pub is_usable: bool,
@@@ -776,7 -776,7 +775,7 @@@ macro_rules! convert_chan_err 
                                (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id.clone()))
                        },
                        ChannelError::Close(msg) => {
 -                              log_trace!($self.logger, "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg);
 +                              log_error!($self.logger, "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg);
                                if let Some(short_id) = $channel.get_short_channel_id() {
                                        $short_to_id.remove(&short_id);
                                }
@@@ -1243,7 -1243,7 +1242,7 @@@ impl<Signer: Sign, M: Deref, T: Deref, 
        #[inline]
        fn finish_force_close_channel(&self, shutdown_res: ShutdownResult) {
                let (monitor_update_option, mut failed_htlcs) = shutdown_res;
 -              log_trace!(self.logger, "Finishing force-closure of channel {} HTLCs to fail", failed_htlcs.len());
 +              log_debug!(self.logger, "Finishing force-closure of channel with {} HTLCs to fail", failed_htlcs.len());
                for htlc_source in failed_htlcs.drain(..) {
                        self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
                }
                                return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()});
                        }
                };
 -              log_trace!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
 +              log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
                self.finish_force_close_channel(chan.force_shutdown(true));
                if let Ok(update) = self.get_channel_update(&chan) {
                        let mut channel_state = self.channel_state.lock().unwrap();
                                                        return Err(APIError::MonitorUpdateFailed);
                                                }
  
 +                                              log_debug!(self.logger, "Sending payment along path resulted in a commitment_signed for channel {}", log_bytes!(chan.get().channel_id()));
                                                channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
                                                        node_id: path.first().unwrap().pubkey,
                                                        updates: msgs::CommitmentUpdate {
                                                                                        onion_packet, ..
                                                                                }, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value },
                                                                                prev_funding_outpoint } => {
 -                                                                      log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", log_bytes!(payment_hash.0), prev_short_channel_id, short_chan_id);
 +                                                                      log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, log_bytes!(payment_hash.0), short_chan_id);
                                                                        let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
                                                                                short_channel_id: prev_short_channel_id,
                                                                                outpoint: prev_funding_outpoint,
                                                                        panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
                                                                },
                                                                HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
 -                                                                      log_trace!(self.logger, "Failing HTLC back to channel with short id {} after delay", short_chan_id);
 +                                                                      log_trace!(self.logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
                                                                        match chan.get_mut().get_update_fail_htlc(htlc_id, err_packet, &self.logger) {
                                                                                Err(e) => {
                                                                                        if let ChannelError::Ignore(msg) = e {
 -                                                                                              log_trace!(self.logger, "Failed to fail backwards to short_id {}: {}", short_chan_id, msg);
 +                                                                                              log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
                                                                                        } else {
                                                                                                panic!("Stated return value requirements in get_update_fail_htlc() were not met");
                                                                                        }
                                                                handle_errors.push((chan.get().get_counterparty_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true)));
                                                                continue;
                                                        }
 +                                                      log_debug!(self.logger, "Forwarding HTLCs resulted in a commitment update with {} HTLCs added and {} HTLCs failed for channel {}",
 +                                                              add_htlc_msgs.len(), fail_htlc_msgs.len(), log_bytes!(chan.get().channel_id()));
                                                        channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
                                                                node_id: chan.get().get_counterparty_node_id(),
                                                                updates: msgs::CommitmentUpdate {
                                                }
                                        }
                                        if let Some((msg, commitment_signed)) = msgs {
 +                                              log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}",
 +                                                      log_bytes!(payment_preimage.0), log_bytes!(chan.get().channel_id()));
                                                channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
                                                        node_id: chan.get().get_counterparty_node_id(),
                                                        updates: msgs::CommitmentUpdate {
                                if chan.get().get_counterparty_node_id() != *counterparty_node_id {
                                        return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                }
 -                              try_chan_entry!(self, chan.get_mut().funding_locked(&msg), channel_state, chan);
 +                              try_chan_entry!(self, chan.get_mut().funding_locked(&msg, &self.logger), channel_state, chan);
                                if let Some(announcement_sigs) = self.get_announcement_sigs(chan.get()) {
                                        log_trace!(self.logger, "Sending announcement_signatures for {} in response to funding_locked", log_bytes!(chan.get().channel_id()));
                                        // If we see locking block before receiving remote funding_locked, we broadcast our
                Ok(())
        }
  
 -      fn internal_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) -> Result<(), MsgHandleErrInternal> {
 +      /// Returns ShouldPersist if anything changed, otherwise either SkipPersist or an Err.
 +      fn internal_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) -> Result<NotifyOption, MsgHandleErrInternal> {
                let mut channel_state_lock = self.channel_state.lock().unwrap();
                let channel_state = &mut *channel_state_lock;
                let chan_id = match channel_state.short_to_id.get(&msg.contents.short_channel_id) {
                        Some(chan_id) => chan_id.clone(),
                        None => {
                                // It's not a local channel
 -                              return Ok(())
 +                              return Ok(NotifyOption::SkipPersist)
                        }
                };
                match channel_state.by_id.entry(chan_id) {
                        hash_map::Entry::Occupied(mut chan) => {
                                if chan.get().get_counterparty_node_id() != *counterparty_node_id {
 -                                      // TODO: see issue #153, need a consistent behavior on obnoxious behavior from random node
 -                                      return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), chan_id));
 +                                      if chan.get().should_announce() {
 +                                              // If the announcement is about a channel of ours which is public, some
 +                                              // other peer may simply be forwarding all its gossip to us. Don't provide
 +                                              // a scary-looking error message and return Ok instead.
 +                                              return Ok(NotifyOption::SkipPersist);
 +                                      }
 +                                      return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id));
                                }
                                try_chan_entry!(self, chan.get_mut().channel_update(&msg), channel_state, chan);
                        },
                        hash_map::Entry::Vacant(_) => unreachable!()
                }
 -              Ok(())
 +              Ok(NotifyOption::DoPersist)
        }
  
        fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> {
 -              let (htlcs_failed_forward, chan_restoration_res) = {
 +              let (htlcs_failed_forward, need_lnd_workaround, chan_restoration_res) = {
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
  
                                                        msg,
                                                });
                                        }
 -                                      (htlcs_failed_forward, handle_chan_restoration_locked!(self, channel_state_lock, channel_state, chan, revoke_and_ack, commitment_update, order, monitor_update_opt, Vec::new(), None, funding_locked))
 +                                      let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take();
 +                                      (htlcs_failed_forward, need_lnd_workaround,
 +                                              handle_chan_restoration_locked!(self, channel_state_lock, channel_state, chan, revoke_and_ack, commitment_update, order, monitor_update_opt, Vec::new(), None, funding_locked))
                                },
                                hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                        }
                };
                post_handle_chan_restoration!(self, chan_restoration_res);
                self.fail_holding_cell_htlcs(htlcs_failed_forward, msg.channel_id);
 +
 +              if let Some(funding_locked_msg) = need_lnd_workaround {
 +                      self.internal_funding_locked(counterparty_node_id, &funding_locked_msg)?;
 +              }
                Ok(())
        }
  
                                                if let Err(_e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
                                                        unimplemented!();
                                                }
 +                                              log_debug!(self.logger, "Updating fee resulted in a commitment_signed for channel {}", log_bytes!(chan.get().channel_id()));
                                                channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
                                                        node_id: chan.get().get_counterparty_node_id(),
                                                        updates: msgs::CommitmentUpdate {
@@@ -3822,7 -3804,7 +3821,7 @@@ wher
                        *best_block = BestBlock::new(header.prev_blockhash, new_height)
                }
  
 -              self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time));
 +              self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time, &self.logger));
        }
  }
  
@@@ -3858,7 -3840,7 +3857,7 @@@ wher
  
                *self.best_block.write().unwrap() = BestBlock::new(block_hash, height);
  
 -              self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time));
 +              self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, &self.logger));
  
                macro_rules! max_time {
                        ($timestamp: expr) => {
                self.do_chain_event(None, |channel| {
                        if let Some(funding_txo) = channel.get_funding_txo() {
                                if funding_txo.txid == *txid {
 -                                      channel.funding_transaction_unconfirmed().map(|_| (None, Vec::new()))
 +                                      channel.funding_transaction_unconfirmed(&self.logger).map(|_| (None, Vec::new()))
                                } else { Ok((None, Vec::new())) }
                        } else { Ok((None, Vec::new())) }
                });
@@@ -4117,13 -4099,8 +4116,13 @@@ impl<Signer: Sign, M: Deref , T: Deref 
        }
  
        fn handle_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) {
 -              let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 -              let _ = handle_error!(self, self.internal_channel_update(counterparty_node_id, msg), *counterparty_node_id);
 +              PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
 +                      if let Ok(persist) = handle_error!(self, self.internal_channel_update(counterparty_node_id, msg), *counterparty_node_id) {
 +                              persist
 +                      } else {
 +                              NotifyOption::SkipPersist
 +                      }
 +              });
        }
  
        fn handle_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) {
@@@ -4343,22 -4320,22 +4342,22 @@@ const MIN_SERIALIZATION_VERSION: u8 = 1
  
  impl_writeable_tlv_based_enum!(PendingHTLCRouting,
        (0, Forward) => {
 -              (0, onion_packet),
 -              (2, short_channel_id),
 -      }, {}, {},
 +              (0, onion_packet, required),
 +              (2, short_channel_id, required),
 +      },
        (1, Receive) => {
 -              (0, payment_data),
 -              (2, incoming_cltv_expiry),
 -      }, {}, {}
 +              (0, payment_data, required),
 +              (2, incoming_cltv_expiry, required),
 +      }
  ;);
  
  impl_writeable_tlv_based!(PendingHTLCInfo, {
 -      (0, routing),
 -      (2, incoming_shared_secret),
 -      (4, payment_hash),
 -      (6, amt_to_forward),
 -      (8, outgoing_cltv_value)
 -}, {}, {});
 +      (0, routing, required),
 +      (2, incoming_shared_secret, required),
 +      (4, payment_hash, required),
 +      (6, amt_to_forward, required),
 +      (8, outgoing_cltv_value, required)
 +});
  
  impl_writeable_tlv_based_enum!(HTLCFailureMsg, ;
        (0, Relay),
@@@ -4370,58 -4347,60 +4369,58 @@@ impl_writeable_tlv_based_enum!(PendingH
  );
  
  impl_writeable_tlv_based!(HTLCPreviousHopData, {
 -      (0, short_channel_id),
 -      (2, outpoint),
 -      (4, htlc_id),
 -      (6, incoming_packet_shared_secret)
 -}, {}, {});
 +      (0, short_channel_id, required),
 +      (2, outpoint, required),
 +      (4, htlc_id, required),
 +      (6, incoming_packet_shared_secret, required)
 +});
  
  impl_writeable_tlv_based!(ClaimableHTLC, {
 -      (0, prev_hop),
 -      (2, value),
 -      (4, payment_data),
 -      (6, cltv_expiry),
 -}, {}, {});
 +      (0, prev_hop, required),
 +      (2, value, required),
 +      (4, payment_data, required),
 +      (6, cltv_expiry, required),
 +});
  
  impl_writeable_tlv_based_enum!(HTLCSource,
        (0, OutboundRoute) => {
 -              (0, session_priv),
 -              (2, first_hop_htlc_msat),
 -      }, {}, {
 -              (4, path),
 -      };
 +              (0, session_priv, required),
 +              (2, first_hop_htlc_msat, required),
 +              (4, path, vec_type),
 +      }, ;
        (1, PreviousHopData)
  );
  
  impl_writeable_tlv_based_enum!(HTLCFailReason,
        (0, LightningError) => {
 -              (0, err),
 -      }, {}, {},
 +              (0, err, required),
 +      },
        (1, Reason) => {
 -              (0, failure_code),
 -      }, {}, {
 -              (2, data),
 +              (0, failure_code, required),
 +              (2, data, vec_type),
        },
  ;);
  
  impl_writeable_tlv_based_enum!(HTLCForwardInfo,
        (0, AddHTLC) => {
 -              (0, forward_info),
 -              (2, prev_short_channel_id),
 -              (4, prev_htlc_id),
 -              (6, prev_funding_outpoint),
 -      }, {}, {},
 +              (0, forward_info, required),
 +              (2, prev_short_channel_id, required),
 +              (4, prev_htlc_id, required),
 +              (6, prev_funding_outpoint, required),
 +      },
        (1, FailHTLC) => {
 -              (0, htlc_id),
 -              (2, err_packet),
 -      }, {}, {},
 +              (0, htlc_id, required),
 +              (2, err_packet, required),
 +      },
  ;);
  
  impl_writeable_tlv_based!(PendingInboundPayment, {
 -      (0, payment_secret),
 -      (2, expiry_time),
 -      (4, user_payment_id),
 -      (6, payment_preimage),
 -      (8, min_value_msat),
 -}, {}, {});
 +      (0, payment_secret, required),
 +      (2, expiry_time, required),
 +      (4, user_payment_id, required),
 +      (6, payment_preimage, required),
 +      (8, min_value_msat, required),
 +});
  
  impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable for ChannelManager<Signer, M, T, K, F, L>
        where M::Target: chain::Watch<Signer>,
                        session_priv.write(writer)?;
                }
  
 -              write_tlv_fields!(writer, {}, {});
 +              write_tlv_fields!(writer, {});
  
                Ok(())
        }
@@@ -4663,11 -4642,6 +4662,11 @@@ impl<'a, Signer: Sign, M: Deref, T: Der
                                                channel.get_cur_counterparty_commitment_transaction_number() < monitor.get_cur_counterparty_commitment_number() ||
                                                channel.get_latest_monitor_update_id() > monitor.get_latest_update_id() {
                                        // If the channel is ahead of the monitor, return InvalidValue:
 +                                      log_error!(args.logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!");
 +                                      log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
 +                                              log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id());
 +                                      log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
 +                                      log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
                                        return Err(DecodeError::InvalidValue);
                                } else if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() ||
                                                channel.get_revoked_counterparty_commitment_transaction_number() > monitor.get_min_seen_secret() ||
                                        by_id.insert(channel.channel_id(), channel);
                                }
                        } else {
 +                              log_error!(args.logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", log_bytes!(channel.channel_id()));
 +                              log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
 +                              log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
                                return Err(DecodeError::InvalidValue);
                        }
                }
                        }
                }
  
 -              read_tlv_fields!(reader, {}, {});
 +              read_tlv_fields!(reader, {});
  
                let mut secp_ctx = Secp256k1::new();
                secp_ctx.seeded_randomize(&args.keys_manager.get_secure_random_bytes());
@@@ -4829,9 -4800,6 +4828,9 @@@ mod tests 
        use core::sync::atomic::{AtomicBool, Ordering};
        use std::thread;
        use core::time::Duration;
 +      use ln::functional_test_utils::*;
 +      use ln::features::InitFeatures;
 +      use ln::msgs::ChannelMessageHandler;
  
        #[test]
        fn test_wait_timeout() {
                        }
                }
        }
 +
 +      #[test]
 +      fn test_notify_limits() {
 +              // Check that a few cases which don't require the persistence of a new ChannelManager,
 +              // indeed, do not cause the persistence of a new ChannelManager.
 +              let chanmon_cfgs = create_chanmon_cfgs(3);
 +              let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
 +              let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
 +              let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
 +
 +              let mut chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
 +
 +              // We check that the channel info nodes have doesn't change too early, even though we try
 +              // to connect messages with new values
 +              chan.0.contents.fee_base_msat *= 2;
 +              chan.1.contents.fee_base_msat *= 2;
 +              let node_a_chan_info = nodes[0].node.list_channels()[0].clone();
 +              let node_b_chan_info = nodes[1].node.list_channels()[0].clone();
 +
 +              // The first two nodes (which opened a channel) should now require fresh persistence
 +              assert!(nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
 +              assert!(nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
 +              // ... but the last node should not.
 +              assert!(!nodes[2].node.await_persistable_update_timeout(Duration::from_millis(1)));
 +              // After persisting the first two nodes they should no longer need fresh persistence.
 +              assert!(!nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
 +              assert!(!nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
 +
 +              // Node 3, unrelated to the only channel, shouldn't care if it receives a channel_update
 +              // about the channel.
 +              nodes[2].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &chan.0);
 +              nodes[2].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &chan.1);
 +              assert!(!nodes[2].node.await_persistable_update_timeout(Duration::from_millis(1)));
 +
 +              // The nodes which are a party to the channel should also ignore messages from unrelated
 +              // parties.
 +              nodes[0].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.0);
 +              nodes[0].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.1);
 +              nodes[1].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.0);
 +              nodes[1].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.1);
 +              assert!(!nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
 +              assert!(!nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
 +
 +              // At this point the channel info given by peers should still be the same.
 +              assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
 +              assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info);
 +      }
  }
  
  #[cfg(all(any(test, feature = "_test_utils"), feature = "unstable"))]