Merge pull request #1866 from TheBlueMatt/2022-11-noisy-no-graph
authorvalentinewallace <valentinewallace@users.noreply.github.com>
Tue, 22 Nov 2022 19:55:05 +0000 (14:55 -0500)
committerGitHub <noreply@github.com>
Tue, 22 Nov 2022 19:55:05 +0000 (14:55 -0500)
Drop verbose log entries in BP when no network graph is provided

lightning/src/ln/chanmon_update_fail_tests.rs
lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/reload_tests.rs

index 9b0e303676254fc89daaabe3a90322901fa9a5f2..e78b8999f7ec88c6047b5e5297c75a4b9d332fac 100644 (file)
@@ -324,7 +324,6 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
                                nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed);
                                check_added_monitors!(nodes[0], 1);
                                assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
-                               nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
                        }
 
                        (update_fulfill_htlcs[0].clone(), commitment_signed.clone())
@@ -633,7 +632,6 @@ fn test_monitor_update_fail_cs() {
        chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
-       nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
        check_added_monitors!(nodes[1], 1);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 
@@ -664,7 +662,6 @@ fn test_monitor_update_fail_cs() {
                        chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
                        nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
                        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
-                       nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
                        check_added_monitors!(nodes[0], 1);
                        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
                },
@@ -726,7 +723,6 @@ fn test_monitor_update_fail_no_rebroadcast() {
        chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_raa);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
-       nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
        check_added_monitors!(nodes[1], 1);
@@ -784,12 +780,11 @@ fn test_monitor_update_raa_while_paused() {
        nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_2.msgs[0]);
        nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg);
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
-       nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
        check_added_monitors!(nodes[0], 1);
+       assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 
        nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
-       nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Existing pending monitor update prevented responses to RAA".to_string(), 1);
        check_added_monitors!(nodes[0], 1);
 
        chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
@@ -875,7 +870,6 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
        chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
-       nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
        assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        check_added_monitors!(nodes[1], 1);
@@ -911,8 +905,6 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
                nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &send_event.commitment_msg);
                check_added_monitors!(nodes[1], 1);
                assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
-               nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
-               assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
                (Some(payment_preimage_4), Some(payment_hash_4))
        } else { (None, None) };
 
@@ -1136,7 +1128,7 @@ fn test_monitor_update_fail_reestablish() {
                get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id())
                        .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected
 
-       nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
+       nodes[1].node.get_and_clear_pending_msg_events(); // Free the holding cell
        check_added_monitors!(nodes[1], 1);
 
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
@@ -1228,12 +1220,11 @@ fn raa_no_response_awaiting_raa_state() {
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
        nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
-       nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
        check_added_monitors!(nodes[1], 1);
+       assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 
        nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
-       nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Existing pending monitor update prevented responses to RAA".to_string(), 1);
        check_added_monitors!(nodes[1], 1);
 
        chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
@@ -1331,7 +1322,6 @@ fn claim_while_disconnected_monitor_update_fail() {
 
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect);
        let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
-       nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
        check_added_monitors!(nodes[1], 1);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 
@@ -1348,7 +1338,6 @@ fn claim_while_disconnected_monitor_update_fail() {
        nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.commitment_signed);
        check_added_monitors!(nodes[1], 1);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
-       nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
        // Note that nodes[1] not updating monitor here is OK - it wont take action on the new HTLC
        // until we've channel_monitor_update'd and updated for the new commitment transaction.
 
@@ -1440,7 +1429,6 @@ fn monitor_failed_no_reestablish_response() {
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
        nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
-       nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
        check_added_monitors!(nodes[1], 1);
 
        // Now disconnect and immediately reconnect, delivering the channel_reestablish while nodes[1]
@@ -1540,7 +1528,6 @@ fn first_message_on_recv_ordering() {
        // to the next message also tests resetting the delivery order.
        nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
-       nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
        check_added_monitors!(nodes[1], 1);
 
        // Now deliver the update_add_htlc/commitment_signed for the second payment, which does need an
@@ -1550,7 +1537,6 @@ fn first_message_on_recv_ordering() {
        nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
        check_added_monitors!(nodes[1], 1);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
-       nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
 
        chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
@@ -1599,7 +1585,7 @@ fn test_monitor_update_fail_claim() {
        chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        nodes[1].node.claim_funds(payment_preimage_1);
        expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
-       nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1);
+       assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        check_added_monitors!(nodes[1], 1);
 
        // Note that at this point there is a pending commitment transaction update for A being held by
@@ -1730,7 +1716,6 @@ fn test_monitor_update_on_pending_forwards() {
        expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
        check_added_monitors!(nodes[1], 1);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
-       nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
 
        chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
@@ -1791,9 +1776,7 @@ fn monitor_update_claim_fail_no_response() {
        expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
        check_added_monitors!(nodes[1], 1);
 
-       let events = nodes[1].node.get_and_clear_pending_msg_events();
-       assert_eq!(events.len(), 0);
-       nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1);
+       assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 
        chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
@@ -1841,9 +1824,8 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf:
 
        chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
-       assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
-       nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
        check_added_monitors!(nodes[0], 1);
+       assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
        assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
        chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
index 95ae6987710d904a5508de4960df008340ae1779..c555d6cabd1d28f9ded7a247afebebec3976706a 100644 (file)
@@ -439,8 +439,6 @@ pub(super) struct ReestablishResponses {
        pub raa: Option<msgs::RevokeAndACK>,
        pub commitment_update: Option<msgs::CommitmentUpdate>,
        pub order: RAACommitmentOrder,
-       pub mon_update: Option<ChannelMonitorUpdate>,
-       pub holding_cell_failed_htlcs: Vec<(HTLCSource, PaymentHash)>,
        pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
        pub shutdown_msg: Option<msgs::Shutdown>,
 }
@@ -3964,9 +3962,8 @@ impl<Signer: Sign> Channel<Signer> {
                                // Short circuit the whole handler as there is nothing we can resend them
                                return Ok(ReestablishResponses {
                                        channel_ready: None,
-                                       raa: None, commitment_update: None, mon_update: None,
+                                       raa: None, commitment_update: None,
                                        order: RAACommitmentOrder::CommitmentFirst,
-                                       holding_cell_failed_htlcs: Vec::new(),
                                        shutdown_msg, announcement_sigs,
                                });
                        }
@@ -3979,9 +3976,8 @@ impl<Signer: Sign> Channel<Signer> {
                                        next_per_commitment_point,
                                        short_channel_id_alias: Some(self.outbound_scid_alias),
                                }),
-                               raa: None, commitment_update: None, mon_update: None,
+                               raa: None, commitment_update: None,
                                order: RAACommitmentOrder::CommitmentFirst,
-                               holding_cell_failed_htlcs: Vec::new(),
                                shutdown_msg, announcement_sigs,
                        });
                }
@@ -4024,46 +4020,12 @@ impl<Signer: Sign> Channel<Signer> {
                                log_debug!(logger, "Reconnected channel {} with no loss", log_bytes!(self.channel_id()));
                        }
 
-                       if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
-                               // We're up-to-date and not waiting on a remote revoke (if we are our
-                               // channel_reestablish should result in them sending a revoke_and_ack), but we may
-                               // have received some updates while we were disconnected. Free the holding cell
-                               // now!
-                               match self.free_holding_cell_htlcs(logger) {
-                                       Err(ChannelError::Close(msg)) => Err(ChannelError::Close(msg)),
-                                       Err(ChannelError::Warn(_)) | Err(ChannelError::Ignore(_)) =>
-                                               panic!("Got non-channel-failing result from free_holding_cell_htlcs"),
-                                       Ok((Some((commitment_update, monitor_update)), holding_cell_failed_htlcs)) => {
-                                               Ok(ReestablishResponses {
-                                                       channel_ready, shutdown_msg, announcement_sigs,
-                                                       raa: required_revoke,
-                                                       commitment_update: Some(commitment_update),
-                                                       order: self.resend_order.clone(),
-                                                       mon_update: Some(monitor_update),
-                                                       holding_cell_failed_htlcs,
-                                               })
-                                       },
-                                       Ok((None, holding_cell_failed_htlcs)) => {
-                                               Ok(ReestablishResponses {
-                                                       channel_ready, shutdown_msg, announcement_sigs,
-                                                       raa: required_revoke,
-                                                       commitment_update: None,
-                                                       order: self.resend_order.clone(),
-                                                       mon_update: None,
-                                                       holding_cell_failed_htlcs,
-                                               })
-                                       },
-                               }
-                       } else {
-                               Ok(ReestablishResponses {
-                                       channel_ready, shutdown_msg, announcement_sigs,
-                                       raa: required_revoke,
-                                       commitment_update: None,
-                                       order: self.resend_order.clone(),
-                                       mon_update: None,
-                                       holding_cell_failed_htlcs: Vec::new(),
-                               })
-                       }
+                       Ok(ReestablishResponses {
+                               channel_ready, shutdown_msg, announcement_sigs,
+                               raa: required_revoke,
+                               commitment_update: None,
+                               order: self.resend_order.clone(),
+                       })
                } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
                        if required_revoke.is_some() {
                                log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", log_bytes!(self.channel_id()));
@@ -4075,9 +4037,8 @@ impl<Signer: Sign> Channel<Signer> {
                                self.monitor_pending_commitment_signed = true;
                                Ok(ReestablishResponses {
                                        channel_ready, shutdown_msg, announcement_sigs,
-                                       commitment_update: None, raa: None, mon_update: None,
+                                       commitment_update: None, raa: None,
                                        order: self.resend_order.clone(),
-                                       holding_cell_failed_htlcs: Vec::new(),
                                })
                        } else {
                                Ok(ReestablishResponses {
@@ -4085,8 +4046,6 @@ impl<Signer: Sign> Channel<Signer> {
                                        raa: required_revoke,
                                        commitment_update: Some(self.get_last_commitment_update(logger)),
                                        order: self.resend_order.clone(),
-                                       mon_update: None,
-                                       holding_cell_failed_htlcs: Vec::new(),
                                })
                        }
                } else {
index 3526726478887093f077a7c8bd79f465a2a940e5..b22dacdd686d1ceca2511c8b44046c14fa7e787f 100644 (file)
@@ -398,13 +398,6 @@ pub(super) enum RAACommitmentOrder {
 // Note this is only exposed in cfg(test):
 pub(super) struct ChannelHolder<Signer: Sign> {
        pub(super) by_id: HashMap<[u8; 32], Channel<Signer>>,
-       /// Map from payment hash to the payment data and any HTLCs which are to us and can be
-       /// failed/claimed by the user.
-       ///
-       /// Note that while this is held in the same mutex as the channels themselves, no consistency
-       /// guarantees are made about the channels given here actually existing anymore by the time you
-       /// go to read them!
-       claimable_htlcs: HashMap<PaymentHash, (events::PaymentPurpose, Vec<ClaimableHTLC>)>,
        /// Messages to send to peers - pushed to in the same lock that they are generated in (except
        /// for broadcast messages, where ordering isn't as strict).
        pub(super) pending_msg_events: Vec<MessageSendEvent>,
@@ -673,19 +666,21 @@ pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, M, T, F, L> = ChannelManage
 //  |
 //  |__`forward_htlcs`
 //  |
-//  |__`channel_state`
-//  |   |
-//  |   |__`id_to_peer`
+//  |__`pending_inbound_payments`
 //  |   |
-//  |   |__`short_to_chan_info`
+//  |   |__`claimable_htlcs`
 //  |   |
-//  |   |__`per_peer_state`
+//  |   |__`pending_outbound_payments`
 //  |       |
-//  |       |__`outbound_scid_aliases`
-//  |       |
-//  |       |__`pending_inbound_payments`
+//  |       |__`channel_state`
+//  |           |
+//  |           |__`id_to_peer`
 //  |           |
-//  |           |__`pending_outbound_payments`
+//  |           |__`short_to_chan_info`
+//  |           |
+//  |           |__`per_peer_state`
+//  |               |
+//  |               |__`outbound_scid_aliases`
 //  |               |
 //  |               |__`best_block`
 //  |               |
@@ -756,6 +751,15 @@ pub struct ChannelManager<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
        #[cfg(not(test))]
        forward_htlcs: Mutex<HashMap<u64, Vec<HTLCForwardInfo>>>,
 
+       /// Map from payment hash to the payment data and any HTLCs which are to us and can be
+       /// failed/claimed by the user.
+       ///
+       /// Note that, no consistency guarantees are made about the channels given here actually
+       /// existing anymore by the time you go to read them!
+       ///
+       /// See `ChannelManager` struct-level documentation for lock order requirements.
+       claimable_htlcs: Mutex<HashMap<PaymentHash, (events::PaymentPurpose, Vec<ClaimableHTLC>)>>,
+
        /// The set of outbound SCID aliases across all our channels, including unconfirmed channels
        /// and some closed channels which reached a usable state prior to being closed. This is used
        /// only to avoid duplicates, and is not persisted explicitly to disk, but rebuilt from the
@@ -1517,134 +1521,6 @@ macro_rules! emit_channel_ready_event {
        }
 }
 
-macro_rules! handle_chan_restoration_locked {
-       ($self: ident, $channel_lock: expr, $channel_state: expr, $channel_entry: expr,
-        $raa: expr, $commitment_update: expr, $order: expr, $chanmon_update: expr,
-        $pending_forwards: expr, $funding_broadcastable: expr, $channel_ready: expr, $announcement_sigs: expr) => { {
-               let mut htlc_forwards = None;
-
-               let chanmon_update: Option<ChannelMonitorUpdate> = $chanmon_update; // Force type-checking to resolve
-               let chanmon_update_is_none = chanmon_update.is_none();
-               let counterparty_node_id = $channel_entry.get().get_counterparty_node_id();
-               let res = loop {
-                       let forwards: Vec<(PendingHTLCInfo, u64)> = $pending_forwards; // Force type-checking to resolve
-                       if !forwards.is_empty() {
-                               htlc_forwards = Some(($channel_entry.get().get_short_channel_id().unwrap_or($channel_entry.get().outbound_scid_alias()),
-                                       $channel_entry.get().get_funding_txo().unwrap(), forwards));
-                       }
-
-                       if chanmon_update.is_some() {
-                               // On reconnect, we, by definition, only resend a channel_ready if there have been
-                               // no commitment updates, so the only channel monitor update which could also be
-                               // associated with a channel_ready would be the funding_created/funding_signed
-                               // monitor update. That monitor update failing implies that we won't send
-                               // channel_ready until it's been updated, so we can't have a channel_ready and a
-                               // monitor update here (so we don't bother to handle it correctly below).
-                               assert!($channel_ready.is_none());
-                               // A channel monitor update makes no sense without either a channel_ready or a
-                               // commitment update to process after it. Since we can't have a channel_ready, we
-                               // only bother to handle the monitor-update + commitment_update case below.
-                               assert!($commitment_update.is_some());
-                       }
-
-                       if let Some(msg) = $channel_ready {
-                               // Similar to the above, this implies that we're letting the channel_ready fly
-                               // before it should be allowed to.
-                               assert!(chanmon_update.is_none());
-                               send_channel_ready!($self, $channel_state.pending_msg_events, $channel_entry.get(), msg);
-                       }
-                       if let Some(msg) = $announcement_sigs {
-                               $channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
-                                       node_id: counterparty_node_id,
-                                       msg,
-                               });
-                       }
-
-                       emit_channel_ready_event!($self, $channel_entry.get_mut());
-
-                       let funding_broadcastable: Option<Transaction> = $funding_broadcastable; // Force type-checking to resolve
-                       if let Some(monitor_update) = chanmon_update {
-                               // We only ever broadcast a funding transaction in response to a funding_signed
-                               // message and the resulting monitor update. Thus, on channel_reestablish
-                               // message handling we can't have a funding transaction to broadcast. When
-                               // processing a monitor update finishing resulting in a funding broadcast, we
-                               // cannot have a second monitor update, thus this case would indicate a bug.
-                               assert!(funding_broadcastable.is_none());
-                               // Given we were just reconnected or finished updating a channel monitor, the
-                               // only case where we can get a new ChannelMonitorUpdate would be if we also
-                               // have some commitment updates to send as well.
-                               assert!($commitment_update.is_some());
-                               match $self.chain_monitor.update_channel($channel_entry.get().get_funding_txo().unwrap(), monitor_update) {
-                                       ChannelMonitorUpdateStatus::Completed => {},
-                                       e => {
-                                               // channel_reestablish doesn't guarantee the order it returns is sensical
-                                               // for the messages it returns, but if we're setting what messages to
-                                               // re-transmit on monitor update success, we need to make sure it is sane.
-                                               let mut order = $order;
-                                               if $raa.is_none() {
-                                                       order = RAACommitmentOrder::CommitmentFirst;
-                                               }
-                                               break handle_monitor_update_res!($self, e, $channel_entry, order, $raa.is_some(), true);
-                                       }
-                               }
-                       }
-
-                       macro_rules! handle_cs { () => {
-                               if let Some(update) = $commitment_update {
-                                       $channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
-                                               node_id: counterparty_node_id,
-                                               updates: update,
-                                       });
-                               }
-                       } }
-                       macro_rules! handle_raa { () => {
-                               if let Some(revoke_and_ack) = $raa {
-                                       $channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
-                                               node_id: counterparty_node_id,
-                                               msg: revoke_and_ack,
-                                       });
-                               }
-                       } }
-                       match $order {
-                               RAACommitmentOrder::CommitmentFirst => {
-                                       handle_cs!();
-                                       handle_raa!();
-                               },
-                               RAACommitmentOrder::RevokeAndACKFirst => {
-                                       handle_raa!();
-                                       handle_cs!();
-                               },
-                       }
-                       if let Some(tx) = funding_broadcastable {
-                               log_info!($self.logger, "Broadcasting funding transaction with txid {}", tx.txid());
-                               $self.tx_broadcaster.broadcast_transaction(&tx);
-                       }
-                       break Ok(());
-               };
-
-               if chanmon_update_is_none {
-                       // If there was no ChannelMonitorUpdate, we should never generate an Err in the res loop
-                       // above. Doing so would imply calling handle_err!() from channel_monitor_updated() which
-                       // should *never* end up calling back to `chain_monitor.update_channel()`.
-                       assert!(res.is_ok());
-               }
-
-               (htlc_forwards, res, counterparty_node_id)
-       } }
-}
-
-macro_rules! post_handle_chan_restoration {
-       ($self: ident, $locked_res: expr) => { {
-               let (htlc_forwards, res, counterparty_node_id) = $locked_res;
-
-               let _ = handle_error!($self, res, counterparty_node_id);
-
-               if let Some(forwards) = htlc_forwards {
-                       $self.forward_htlcs(&mut [forwards][..]);
-               }
-       } }
-}
-
 impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F, L>
        where M::Target: chain::Watch<<K::Target as KeysInterface>::Signer>,
         T::Target: BroadcasterInterface,
@@ -1678,13 +1554,13 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
 
                        channel_state: Mutex::new(ChannelHolder{
                                by_id: HashMap::new(),
-                               claimable_htlcs: HashMap::new(),
                                pending_msg_events: Vec::new(),
                        }),
                        outbound_scid_aliases: Mutex::new(HashSet::new()),
                        pending_inbound_payments: Mutex::new(HashMap::new()),
                        pending_outbound_payments: Mutex::new(HashMap::new()),
                        forward_htlcs: Mutex::new(HashMap::new()),
+                       claimable_htlcs: Mutex::new(HashMap::new()),
                        id_to_peer: Mutex::new(HashMap::new()),
                        short_to_chan_info: FairRwLock::new(HashMap::new()),
 
@@ -1922,14 +1798,16 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                                        if *counterparty_node_id != chan_entry.get().get_counterparty_node_id(){
                                                return Err(APIError::APIMisuseError { err: "The passed counterparty_node_id doesn't match the channel's counterparty node_id".to_owned() });
                                        }
-                                       let per_peer_state = self.per_peer_state.read().unwrap();
-                                       let (shutdown_msg, monitor_update, htlcs) = match per_peer_state.get(&counterparty_node_id) {
-                                               Some(peer_state) => {
-                                                       let peer_state = peer_state.lock().unwrap();
-                                                       let their_features = &peer_state.latest_features;
-                                                       chan_entry.get_mut().get_shutdown(&self.keys_manager, their_features, target_feerate_sats_per_1000_weight)?
-                                               },
-                                               None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", counterparty_node_id) }),
+                                       let (shutdown_msg, monitor_update, htlcs) = {
+                                               let per_peer_state = self.per_peer_state.read().unwrap();
+                                               match per_peer_state.get(&counterparty_node_id) {
+                                                       Some(peer_state) => {
+                                                               let peer_state = peer_state.lock().unwrap();
+                                                               let their_features = &peer_state.latest_features;
+                                                               chan_entry.get_mut().get_shutdown(&self.keys_manager, their_features, target_feerate_sats_per_1000_weight)?
+                                                       },
+                                                       None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", counterparty_node_id) }),
+                                               }
                                        };
                                        failed_htlcs = htlcs;
 
@@ -3154,8 +3032,6 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                        mem::swap(&mut forward_htlcs, &mut self.forward_htlcs.lock().unwrap());
 
                        for (short_chan_id, mut pending_forwards) in forward_htlcs {
-                               let mut channel_state_lock = self.channel_state.lock().unwrap();
-                               let channel_state = &mut *channel_state_lock;
                                if short_chan_id != 0 {
                                        macro_rules! forwarding_channel_not_found {
                                                () => {
@@ -3258,6 +3134,8 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                                                        continue;
                                                }
                                        };
+                                       let mut channel_state_lock = self.channel_state.lock().unwrap();
+                                       let channel_state = &mut *channel_state_lock;
                                        match channel_state.by_id.entry(forward_chan_id) {
                                                hash_map::Entry::Vacant(_) => {
                                                        forwarding_channel_not_found!();
@@ -3455,7 +3333,8 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                                                                                                payment_secret: $payment_data.payment_secret,
                                                                                        }
                                                                                };
-                                                                               let (_, htlcs) = channel_state.claimable_htlcs.entry(payment_hash)
+                                                                               let mut claimable_htlcs = self.claimable_htlcs.lock().unwrap();
+                                                                               let (_, htlcs) = claimable_htlcs.entry(payment_hash)
                                                                                        .or_insert_with(|| (purpose(), Vec::new()));
                                                                                if htlcs.len() == 1 {
                                                                                        if let OnionPayload::Spontaneous(_) = htlcs[0].onion_payload {
@@ -3523,7 +3402,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                                                                                                check_total_value!(payment_data, payment_preimage);
                                                                                        },
                                                                                        OnionPayload::Spontaneous(preimage) => {
-                                                                                               match channel_state.claimable_htlcs.entry(payment_hash) {
+                                                                                               match self.claimable_htlcs.lock().unwrap().entry(payment_hash) {
                                                                                                        hash_map::Entry::Vacant(e) => {
                                                                                                                let purpose = events::PaymentPurpose::SpontaneousPayment(preimage);
                                                                                                                e.insert((purpose.clone(), vec![claimable_htlc]));
@@ -3812,29 +3691,29 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
 
                                        true
                                });
+                       }
 
-                               channel_state.claimable_htlcs.retain(|payment_hash, (_, htlcs)| {
-                                       if htlcs.is_empty() {
-                                               // This should be unreachable
-                                               debug_assert!(false);
+                       self.claimable_htlcs.lock().unwrap().retain(|payment_hash, (_, htlcs)| {
+                               if htlcs.is_empty() {
+                                       // This should be unreachable
+                                       debug_assert!(false);
+                                       return false;
+                               }
+                               if let OnionPayload::Invoice { .. } = htlcs[0].onion_payload {
+                                       // Check if we've received all the parts we need for an MPP (the value of the parts adds to total_msat).
+                                       // In this case we're not going to handle any timeouts of the parts here.
+                                       if htlcs[0].total_msat == htlcs.iter().fold(0, |total, htlc| total + htlc.value) {
+                                               return true;
+                                       } else if htlcs.into_iter().any(|htlc| {
+                                               htlc.timer_ticks += 1;
+                                               return htlc.timer_ticks >= MPP_TIMEOUT_TICKS
+                                       }) {
+                                               timed_out_mpp_htlcs.extend(htlcs.into_iter().map(|htlc| (htlc.prev_hop.clone(), payment_hash.clone())));
                                                return false;
                                        }
-                                       if let OnionPayload::Invoice { .. } = htlcs[0].onion_payload {
-                                               // Check if we've received all the parts we need for an MPP (the value of the parts adds to total_msat).
-                                               // In this case we're not going to handle any timeouts of the parts here.
-                                               if htlcs[0].total_msat == htlcs.iter().fold(0, |total, htlc| total + htlc.value) {
-                                                       return true;
-                                               } else if htlcs.into_iter().any(|htlc| {
-                                                       htlc.timer_ticks += 1;
-                                                       return htlc.timer_ticks >= MPP_TIMEOUT_TICKS
-                                               }) {
-                                                       timed_out_mpp_htlcs.extend(htlcs.into_iter().map(|htlc| (htlc.prev_hop.clone(), payment_hash.clone())));
-                                                       return false;
-                                               }
-                                       }
-                                       true
-                               });
-                       }
+                               }
+                               true
+                       });
 
                        for htlc_source in timed_out_mpp_htlcs.drain(..) {
                                let receiver = HTLCDestination::FailedPayment { payment_hash: htlc_source.1 };
@@ -3867,10 +3746,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
        pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 
-               let removed_source = {
-                       let mut channel_state = self.channel_state.lock().unwrap();
-                       channel_state.claimable_htlcs.remove(payment_hash)
-               };
+               let removed_source = self.claimable_htlcs.lock().unwrap().remove(payment_hash);
                if let Some((_, mut sources)) = removed_source {
                        for htlc in sources.drain(..) {
                                let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
@@ -4172,7 +4048,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
 
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 
-               let removed_source = self.channel_state.lock().unwrap().claimable_htlcs.remove(&payment_hash);
+               let removed_source = self.claimable_htlcs.lock().unwrap().remove(&payment_hash);
                if let Some((payment_purpose, mut sources)) = removed_source {
                        assert!(!sources.is_empty());
 
@@ -4490,10 +4366,73 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                self.our_network_pubkey.clone()
        }
 
+       /// Handles a channel reentering a functional state, either due to reconnect or a monitor
+       /// update completion.
+       fn handle_channel_resumption(&self, pending_msg_events: &mut Vec<MessageSendEvent>,
+               channel: &mut Channel<<K::Target as KeysInterface>::Signer>, raa: Option<msgs::RevokeAndACK>,
+               commitment_update: Option<msgs::CommitmentUpdate>, order: RAACommitmentOrder,
+               pending_forwards: Vec<(PendingHTLCInfo, u64)>, funding_broadcastable: Option<Transaction>,
+               channel_ready: Option<msgs::ChannelReady>, announcement_sigs: Option<msgs::AnnouncementSignatures>)
+       -> Option<(u64, OutPoint, Vec<(PendingHTLCInfo, u64)>)> {
+               let mut htlc_forwards = None;
+
+               let counterparty_node_id = channel.get_counterparty_node_id();
+               if !pending_forwards.is_empty() {
+                       htlc_forwards = Some((channel.get_short_channel_id().unwrap_or(channel.outbound_scid_alias()),
+                               channel.get_funding_txo().unwrap(), pending_forwards));
+               }
+
+               if let Some(msg) = channel_ready {
+                       send_channel_ready!(self, pending_msg_events, channel, msg);
+               }
+               if let Some(msg) = announcement_sigs {
+                       pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
+                               node_id: counterparty_node_id,
+                               msg,
+                       });
+               }
+
+               emit_channel_ready_event!(self, channel);
+
+               macro_rules! handle_cs { () => {
+                       if let Some(update) = commitment_update {
+                               pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+                                       node_id: counterparty_node_id,
+                                       updates: update,
+                               });
+                       }
+               } }
+               macro_rules! handle_raa { () => {
+                       if let Some(revoke_and_ack) = raa {
+                               pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
+                                       node_id: counterparty_node_id,
+                                       msg: revoke_and_ack,
+                               });
+                       }
+               } }
+               match order {
+                       RAACommitmentOrder::CommitmentFirst => {
+                               handle_cs!();
+                               handle_raa!();
+                       },
+                       RAACommitmentOrder::RevokeAndACKFirst => {
+                               handle_raa!();
+                               handle_cs!();
+                       },
+               }
+
+               if let Some(tx) = funding_broadcastable {
+                       log_info!(self.logger, "Broadcasting funding transaction with txid {}", tx.txid());
+                       self.tx_broadcaster.broadcast_transaction(&tx);
+               }
+
+               htlc_forwards
+       }
+
        fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64) {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 
-               let chan_restoration_res;
+               let htlc_forwards;
                let (mut pending_failures, finalized_claims, counterparty_node_id) = {
                        let mut channel_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_lock;
@@ -4520,14 +4459,16 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                                        })
                                } else { None }
                        } else { None };
-                       chan_restoration_res = handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, updates.raa, updates.commitment_update, updates.order, None, updates.accepted_htlcs, updates.funding_broadcastable, updates.channel_ready, updates.announcement_sigs);
+                       htlc_forwards = self.handle_channel_resumption(&mut channel_state.pending_msg_events, channel.get_mut(), updates.raa, updates.commitment_update, updates.order, updates.accepted_htlcs, updates.funding_broadcastable, updates.channel_ready, updates.announcement_sigs);
                        if let Some(upd) = channel_update {
                                channel_state.pending_msg_events.push(upd);
                        }
 
                        (updates.failed_htlcs, updates.finalized_claimed_htlcs, counterparty_node_id)
                };
-               post_handle_chan_restoration!(self, chan_restoration_res);
+               if let Some(forwards) = htlc_forwards {
+                       self.forward_htlcs(&mut [forwards][..]);
+               }
                self.finalize_claims(finalized_claims);
                for failure in pending_failures.drain(..) {
                        let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id: funding_txo.to_channel_id() };
@@ -5278,8 +5219,8 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
        }
 
        fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> {
-               let chan_restoration_res;
-               let (htlcs_failed_forward, need_lnd_workaround) = {
+               let htlc_forwards;
+               let need_lnd_workaround = {
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
 
@@ -5313,19 +5254,21 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                                                }
                                        }
                                        let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take();
-                                       chan_restoration_res = handle_chan_restoration_locked!(
-                                               self, channel_state_lock, channel_state, chan, responses.raa, responses.commitment_update, responses.order,
-                                               responses.mon_update, Vec::new(), None, responses.channel_ready, responses.announcement_sigs);
+                                       htlc_forwards = self.handle_channel_resumption(
+                                               &mut channel_state.pending_msg_events, chan.get_mut(), responses.raa, responses.commitment_update, responses.order,
+                                               Vec::new(), None, responses.channel_ready, responses.announcement_sigs);
                                        if let Some(upd) = channel_update {
                                                channel_state.pending_msg_events.push(upd);
                                        }
-                                       (responses.holding_cell_failed_htlcs, need_lnd_workaround)
+                                       need_lnd_workaround
                                },
                                hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                        }
                };
-               post_handle_chan_restoration!(self, chan_restoration_res);
-               self.fail_holding_cell_htlcs(htlcs_failed_forward, msg.channel_id, counterparty_node_id);
+
+               if let Some(forwards) = htlc_forwards {
+                       self.forward_htlcs(&mut [forwards][..]);
+               }
 
                if let Some(channel_ready_msg) = need_lnd_workaround {
                        self.internal_channel_ready(counterparty_node_id, &channel_ready_msg)?;
@@ -6093,28 +6036,28 @@ where
                                }
                                true
                        });
+               }
 
-                       if let Some(height) = height_opt {
-                               channel_state.claimable_htlcs.retain(|payment_hash, (_, htlcs)| {
-                                       htlcs.retain(|htlc| {
-                                               // If height is approaching the number of blocks we think it takes us to get
-                                               // our commitment transaction confirmed before the HTLC expires, plus the
-                                               // number of blocks we generally consider it to take to do a commitment update,
-                                               // just give up on it and fail the HTLC.
-                                               if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER {
-                                                       let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
-                                                       htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(height));
-
-                                                       timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), HTLCFailReason::Reason {
-                                                               failure_code: 0x4000 | 15,
-                                                               data: htlc_msat_height_data
-                                                       }, HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }));
-                                                       false
-                                               } else { true }
-                                       });
-                                       !htlcs.is_empty() // Only retain this entry if htlcs has at least one entry.
+               if let Some(height) = height_opt {
+                       self.claimable_htlcs.lock().unwrap().retain(|payment_hash, (_, htlcs)| {
+                               htlcs.retain(|htlc| {
+                                       // If height is approaching the number of blocks we think it takes us to get
+                                       // our commitment transaction confirmed before the HTLC expires, plus the
+                                       // number of blocks we generally consider it to take to do a commitment update,
+                                       // just give up on it and fail the HTLC.
+                                       if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER {
+                                               let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
+                                               htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(height));
+
+                                               timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), HTLCFailReason::Reason {
+                                                       failure_code: 0x4000 | 15,
+                                                       data: htlc_msat_height_data
+                                               }, HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }));
+                                               false
+                                       } else { true }
                                });
-                       }
+                               !htlcs.is_empty() // Only retain this entry if htlcs has at least one entry.
+                       });
                }
 
                self.handle_init_event_channel_failures(failed_channels);
@@ -6934,10 +6877,13 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable for ChannelMana
                        }
                }
 
-               let channel_state = self.channel_state.lock().unwrap();
+               let pending_inbound_payments = self.pending_inbound_payments.lock().unwrap();
+               let claimable_htlcs = self.claimable_htlcs.lock().unwrap();
+               let pending_outbound_payments = self.pending_outbound_payments.lock().unwrap();
+
                let mut htlc_purposes: Vec<&events::PaymentPurpose> = Vec::new();
-               (channel_state.claimable_htlcs.len() as u64).write(writer)?;
-               for (payment_hash, (purpose, previous_hops)) in channel_state.claimable_htlcs.iter() {
+               (claimable_htlcs.len() as u64).write(writer)?;
+               for (payment_hash, (purpose, previous_hops)) in claimable_htlcs.iter() {
                        payment_hash.write(writer)?;
                        (previous_hops.len() as u64).write(writer)?;
                        for htlc in previous_hops.iter() {
@@ -6954,8 +6900,6 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable for ChannelMana
                        peer_state.latest_features.write(writer)?;
                }
 
-               let pending_inbound_payments = self.pending_inbound_payments.lock().unwrap();
-               let pending_outbound_payments = self.pending_outbound_payments.lock().unwrap();
                let events = self.pending_events.lock().unwrap();
                (events.len() as u64).write(writer)?;
                for event in events.iter() {
@@ -7548,7 +7492,6 @@ impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
 
                        channel_state: Mutex::new(ChannelHolder {
                                by_id,
-                               claimable_htlcs,
                                pending_msg_events: Vec::new(),
                        }),
                        inbound_payment_key: expanded_inbound_key,
@@ -7556,6 +7499,7 @@ impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                        pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()),
 
                        forward_htlcs: Mutex::new(forward_htlcs),
+                       claimable_htlcs: Mutex::new(claimable_htlcs),
                        outbound_scid_aliases: Mutex::new(outbound_scid_aliases),
                        id_to_peer: Mutex::new(id_to_peer),
                        short_to_chan_info: FairRwLock::new(short_to_chan_info),
index 677fdccfd371819b7692cf90176716fd0b20d409..6b247b28f097d30b9576b13c6fa435343ecf4fd1 100644 (file)
@@ -2413,6 +2413,14 @@ macro_rules! handle_chan_reestablish_msgs {
                                assert_eq!(*node_id, $dst_node.node.get_our_node_id());
                        }
 
+                       let mut had_channel_update = false; // ChannelUpdate may be now or later, but not both
+                       if let Some(&MessageSendEvent::SendChannelUpdate { ref node_id, ref msg }) = msg_events.get(idx) {
+                               assert_eq!(*node_id, $dst_node.node.get_our_node_id());
+                               idx += 1;
+                               assert_eq!(msg.contents.flags & 2, 0); // "disabled" flag must not be set as we just reconnected.
+                               had_channel_update = true;
+                       }
+
                        let mut revoke_and_ack = None;
                        let mut commitment_update = None;
                        let order = if let Some(ev) = msg_events.get(idx) {
@@ -2457,6 +2465,7 @@ macro_rules! handle_chan_reestablish_msgs {
                                assert_eq!(*node_id, $dst_node.node.get_our_node_id());
                                idx += 1;
                                assert_eq!(msg.contents.flags & 2, 0); // "disabled" flag must not be set as we just reconnected.
+                               assert!(!had_channel_update);
                        }
 
                        assert_eq!(msg_events.len(), idx);
index dc3d53922137a720ba78366c5341bcd250164a92..1056ceebdd9dc963e9255c6be690c1310de8bd6d 100644 (file)
@@ -786,9 +786,9 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) {
                let ds_msgs = nodes[3].node.get_and_clear_pending_msg_events();
                check_added_monitors!(nodes[3], 1);
                assert_eq!(ds_msgs.len(), 2);
-               if let MessageSendEvent::SendChannelUpdate { .. } = ds_msgs[1] {} else { panic!(); }
+               if let MessageSendEvent::SendChannelUpdate { .. } = ds_msgs[0] {} else { panic!(); }
 
-               let cs_updates = match ds_msgs[0] {
+               let cs_updates = match ds_msgs[1] {
                        MessageSendEvent::UpdateHTLCs { ref updates, .. } => {
                                nodes[2].node.handle_update_fulfill_htlc(&nodes[3].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
                                check_added_monitors!(nodes[2], 1);