/// If at some point no further progress can be made towards persisting the pending updates, the
/// node should simply shut down.
///
-/// * If the persistence has failed and cannot be retried further (e.g. because of some timeout),
+/// * If the persistence has failed and cannot be retried further (e.g. because of an outage),
/// [`ChannelMonitorUpdateStatus::UnrecoverableError`] can be used, though this will result in
/// an immediate panic and future operations in LDK generally failing.
///
/// [`ChainMonitor::channel_monitor_updated`] must be called once for *each* update which occurs.
///
/// If at some point no further progress can be made towards persisting a pending update, the node
-/// should simply shut down.
+/// should simply shut down. Until then, the background task should either loop indefinitely, or
+/// persistence should be regularly retried with [`ChainMonitor::list_pending_monitor_updates`]
+/// and [`ChainMonitor::get_monitor`] (note that if a full monitor is persisted all pending
+/// monitor updates may be marked completed).
///
/// # Using remote watchtowers
///
/// updated monitor itself to disk/backups. See the [`Persist`] trait documentation for more
/// details.
///
- /// During blockchain synchronization operations, this may be called with no
- /// [`ChannelMonitorUpdate`], in which case the full [`ChannelMonitor`] needs to be persisted.
+ /// During blockchain synchronization operations, and in some rare cases, this may be called with
+ /// no [`ChannelMonitorUpdate`], in which case the full [`ChannelMonitor`] needs to be persisted.
/// Note that after the full [`ChannelMonitor`] is persisted any previous
/// [`ChannelMonitorUpdate`]s which were persisted should be discarded - they can no longer be
/// applied to the persisted [`ChannelMonitor`] as they were already applied.
if self.update_monitor_with_chain_data(header, best_height, txdata, &process, funding_outpoint, &monitor_state).is_err() {
// Take the monitors lock for writing so that we poison it and any future
// operations going forward fail immediately.
- core::mem::drop(monitor_state);
core::mem::drop(monitor_lock);
let _poison = self.monitors.write().unwrap();
log_error!(self.logger, "{}", err_str);
/// claims which are awaiting confirmation.
///
/// Includes the balances from each [`ChannelMonitor`] *except* those included in
- /// `ignored_channels`.
+ /// `ignored_channels`, allowing you to filter out balances from channels which are still open
+ /// (and whose balance should likely be pulled from the [`ChannelDetails`]).
///
/// See [`ChannelMonitor::get_claimable_balances`] for more details on the exact criteria for
/// inclusion in the return value.
Some(monitor_state) => {
let monitor = &monitor_state.monitor;
log_trace!(self.logger, "Updating ChannelMonitor for channel {}", log_funding_info!(monitor));
- let update_res = monitor.update_monitor(update, &self.broadcaster, &*self.fee_estimator, &self.logger);
+ let update_res = monitor.update_monitor(update, &self.broadcaster, &self.fee_estimator, &self.logger);
let update_id = MonitorUpdateId::from_monitor_update(update);
let mut pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap();
let mut pending_monitor_events = self.pending_monitor_events.lock().unwrap().split_off(0);
for monitor_state in self.monitors.read().unwrap().values() {
let is_pending_monitor_update = monitor_state.has_pending_chainsync_updates(&monitor_state.pending_monitor_updates.lock().unwrap());
- if is_pending_monitor_update &&
- monitor_state.last_chain_persist_height.load(Ordering::Acquire) + LATENCY_GRACE_PERIOD_BLOCKS as usize
- > self.highest_chain_height.load(Ordering::Acquire)
- {
- log_debug!(self.logger, "A Channel Monitor sync is still in progress, refusing to provide monitor events!");
- } else {
+ if !is_pending_monitor_update || monitor_state.last_chain_persist_height.load(Ordering::Acquire) + LATENCY_GRACE_PERIOD_BLOCKS as usize <= self.highest_chain_height.load(Ordering::Acquire) {
if is_pending_monitor_update {
log_error!(self.logger, "A ChannelMonitor sync took longer than {} blocks to complete.", LATENCY_GRACE_PERIOD_BLOCKS);
log_error!(self.logger, " To avoid funds-loss, we are allowing monitor updates to be released.");