X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fchain%2Fchainmonitor.rs;h=0f128af4afffaf48cb4de4b384030bca4c547575;hb=9c9e5f896c967d459d18d0c592c0a7a9efb3df98;hp=086a848af77aa05c7ea4e8e97e93c4e6fb5e9628;hpb=5e6a0d5f3820d9c3edc3abf5639f8fb7c0592417;p=rust-lightning diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index 086a848a..0f128af4 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -110,7 +110,7 @@ impl MonitorUpdateId { /// If at some point no further progress can be made towards persisting the pending updates, the /// node should simply shut down. /// -/// * If the persistence has failed and cannot be retried further (e.g. because of some timeout), +/// * If the persistence has failed and cannot be retried further (e.g. because of an outage), /// [`ChannelMonitorUpdateStatus::UnrecoverableError`] can be used, though this will result in /// an immediate panic and future operations in LDK generally failing. /// @@ -125,7 +125,10 @@ impl MonitorUpdateId { /// [`ChainMonitor::channel_monitor_updated`] must be called once for *each* update which occurs. /// /// If at some point no further progress can be made towards persisting a pending update, the node -/// should simply shut down. +/// should simply shut down. Until then, the background task should either loop indefinitely, or +/// persistence should be regularly retried with [`ChainMonitor::list_pending_monitor_updates`] +/// and [`ChainMonitor::get_monitor`] (note that if a full monitor is persisted all pending +/// monitor updates may be marked completed). /// /// # Using remote watchtowers /// @@ -164,8 +167,8 @@ pub trait Persist { /// updated monitor itself to disk/backups. See the [`Persist`] trait documentation for more /// details. /// - /// During blockchain synchronization operations, this may be called with no - /// [`ChannelMonitorUpdate`], in which case the full [`ChannelMonitor`] needs to be persisted. + /// During blockchain synchronization operations, and in some rare cases, this may be called with + /// no [`ChannelMonitorUpdate`], in which case the full [`ChannelMonitor`] needs to be persisted. /// Note that after the full [`ChannelMonitor`] is persisted any previous /// [`ChannelMonitorUpdate`]s which were persisted should be discarded - they can no longer be /// applied to the persisted [`ChannelMonitor`] as they were already applied. @@ -321,7 +324,6 @@ where C::Target: chain::Filter, if self.update_monitor_with_chain_data(header, best_height, txdata, &process, funding_outpoint, &monitor_state).is_err() { // Take the monitors lock for writing so that we poison it and any future // operations going forward fail immediately. - core::mem::drop(monitor_state); core::mem::drop(monitor_lock); let _poison = self.monitors.write().unwrap(); log_error!(self.logger, "{}", err_str); @@ -432,7 +434,8 @@ where C::Target: chain::Filter, /// claims which are awaiting confirmation. /// /// Includes the balances from each [`ChannelMonitor`] *except* those included in - /// `ignored_channels`. + /// `ignored_channels`, allowing you to filter out balances from channels which are still open + /// (and whose balance should likely be pulled from the [`ChannelDetails`]). /// /// See [`ChannelMonitor::get_claimable_balances`] for more details on the exact criteria for /// inclusion in the return value. @@ -763,7 +766,7 @@ where C::Target: chain::Filter, Some(monitor_state) => { let monitor = &monitor_state.monitor; log_trace!(self.logger, "Updating ChannelMonitor for channel {}", log_funding_info!(monitor)); - let update_res = monitor.update_monitor(update, &self.broadcaster, &*self.fee_estimator, &self.logger); + let update_res = monitor.update_monitor(update, &self.broadcaster, &self.fee_estimator, &self.logger); let update_id = MonitorUpdateId::from_monitor_update(update); let mut pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap(); @@ -811,12 +814,7 @@ where C::Target: chain::Filter, let mut pending_monitor_events = self.pending_monitor_events.lock().unwrap().split_off(0); for monitor_state in self.monitors.read().unwrap().values() { let is_pending_monitor_update = monitor_state.has_pending_chainsync_updates(&monitor_state.pending_monitor_updates.lock().unwrap()); - if is_pending_monitor_update && - monitor_state.last_chain_persist_height.load(Ordering::Acquire) + LATENCY_GRACE_PERIOD_BLOCKS as usize - > self.highest_chain_height.load(Ordering::Acquire) - { - log_debug!(self.logger, "A Channel Monitor sync is still in progress, refusing to provide monitor events!"); - } else { + if !is_pending_monitor_update || monitor_state.last_chain_persist_height.load(Ordering::Acquire) + LATENCY_GRACE_PERIOD_BLOCKS as usize <= self.highest_chain_height.load(Ordering::Acquire) { if is_pending_monitor_update { log_error!(self.logger, "A ChannelMonitor sync took longer than {} blocks to complete.", LATENCY_GRACE_PERIOD_BLOCKS); log_error!(self.logger, " To avoid funds-loss, we are allowing monitor updates to be released.");