From 9dfe42cf8681afc9c3f84e0c85a4e2a30c1156a8 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 17 Mar 2023 04:55:30 +0000 Subject: [PATCH] Store + process pending `ChannelMonitorUpdate`s in `Channel` The previous commits set up the ability for us to hold `ChannelMonitorUpdate`s which are pending until we're ready to pass them to users and have them be applied. However, if the `ChannelManager` is persisted while we're waiting to give the user a `ChannelMonitorUpdate` we'll be confused on restart - seeing our latest `ChannelMonitor` state as stale compared to our `ChannelManager` - a critical error. Luckily the solution is trivial, we simply need to store the pending `ChannelMonitorUpdate` state and load it with the `ChannelManager` data, allowing stale monitors on load as long as we have the missing pending updates between where we are and the latest `ChannelMonitor` state. --- lightning/src/ln/channel.rs | 16 +++++++++++++++- lightning/src/ln/channelmanager.rs | 7 ++----- 2 files changed, 17 insertions(+), 6 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index de26fa46..73683eec 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -489,6 +489,11 @@ struct PendingChannelMonitorUpdate { blocked: bool, } +impl_writeable_tlv_based!(PendingChannelMonitorUpdate, { + (0, update, required), + (2, blocked, required), +}); + // TODO: We should refactor this to be an Inbound/OutboundChannel until initial setup handshaking // has been completed, and then turn into a Channel to get compiler-time enforcement of things like // calling channel_id() before we're set up or things like get_outbound_funding_signed on an @@ -4984,6 +4989,11 @@ impl Channel { (self.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 } + pub fn get_latest_complete_monitor_update_id(&self) -> u64 { + if self.pending_monitor_updates.is_empty() { return self.get_latest_monitor_update_id(); } + self.pending_monitor_updates[0].update.update_id - 1 + } + /// Returns the next blocked monitor update, if one exists, and a bool which indicates a /// further blocked monitor update exists after the next. pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(&ChannelMonitorUpdate, bool)> { @@ -6597,6 +6607,7 @@ impl Writeable for Channel { (28, holder_max_accepted_htlcs, option), (29, self.temporary_channel_id, option), (31, channel_pending_event_emitted, option), + (33, self.pending_monitor_updates, vec_type), }); Ok(()) @@ -6873,6 +6884,8 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch let mut temporary_channel_id: Option<[u8; 32]> = None; let mut holder_max_accepted_htlcs: Option = None; + let mut pending_monitor_updates = Some(Vec::new()); + read_tlv_fields!(reader, { (0, announcement_sigs, option), (1, minimum_depth, option), @@ -6895,6 +6908,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch (28, holder_max_accepted_htlcs, option), (29, temporary_channel_id, option), (31, channel_pending_event_emitted, option), + (33, pending_monitor_updates, vec_type), }); let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id { @@ -7064,7 +7078,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch channel_type: channel_type.unwrap(), channel_keys_id, - pending_monitor_updates: Vec::new(), + pending_monitor_updates: pending_monitor_updates.unwrap(), }) } } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 2e17664e..326206ed 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -7646,14 +7646,11 @@ where let funding_txo = channel.get_funding_txo().ok_or(DecodeError::InvalidValue)?; funding_txo_set.insert(funding_txo.clone()); if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) { - if channel.get_cur_holder_commitment_transaction_number() < monitor.get_cur_holder_commitment_number() || - channel.get_revoked_counterparty_commitment_transaction_number() < monitor.get_min_seen_secret() || - channel.get_cur_counterparty_commitment_transaction_number() < monitor.get_cur_counterparty_commitment_number() || - channel.get_latest_monitor_update_id() > monitor.get_latest_update_id() { + if channel.get_latest_complete_monitor_update_id() > monitor.get_latest_update_id() { // If the channel is ahead of the monitor, return InvalidValue: log_error!(args.logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!"); log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.", - log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id()); + log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_complete_monitor_update_id()); log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,"); log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); log_error!(args.logger, " Without the latest ChannelMonitor we cannot continue without risking funds."); -- 2.30.2