+ /// Indicates the persistence of a [`ChannelMonitor`] has completed after
+ /// [`ChannelMonitorUpdateErr::TemporaryFailure`] was returned from an update operation.
+ ///
+ /// Thus, the anticipated use is, at a high level:
+ /// 1) This [`ChainMonitor`] calls [`Persist::update_persisted_channel`] which stores the
+ /// update to disk and begins updating any remote (e.g. watchtower/backup) copies,
+ /// returning [`ChannelMonitorUpdateErr::TemporaryFailure`],
+ /// 2) once all remote copies are updated, you call this function with the
+ /// `completed_update_id` that completed, and once all pending updates have completed the
+ /// channel will be re-enabled.
+ // Note that we re-enable only after `UpdateOrigin::OffChain` updates complete, we don't
+ // care about `UpdateOrigin::ChainSync` updates for the channel state being updated. We
+ // only care about `UpdateOrigin::ChainSync` for returning `MonitorEvent`s.
+ ///
+ /// Returns an [`APIError::APIMisuseError`] if `funding_txo` does not match any currently
+ /// registered [`ChannelMonitor`]s.
+ pub fn channel_monitor_updated(&self, funding_txo: OutPoint, completed_update_id: MonitorUpdateId) -> Result<(), APIError> {
+ let monitors = self.monitors.read().unwrap();
+ let monitor_data = if let Some(mon) = monitors.get(&funding_txo) { mon } else {
+ return Err(APIError::APIMisuseError { err: format!("No ChannelMonitor matching funding outpoint {:?} found", funding_txo) });
+ };
+ let mut pending_monitor_updates = monitor_data.pending_monitor_updates.lock().unwrap();
+ pending_monitor_updates.retain(|update_id| *update_id != completed_update_id);
+
+ match completed_update_id {
+ MonitorUpdateId { contents: UpdateOrigin::OffChain(_) } => {
+ // Note that we only check for `UpdateOrigin::OffChain` failures here - if
+ // we're being told that a `UpdateOrigin::OffChain` monitor update completed,
+ // we only care about ensuring we don't tell the `ChannelManager` to restore
+ // the channel to normal operation until all `UpdateOrigin::OffChain` updates
+ // complete.
+ // If there's some `UpdateOrigin::ChainSync` update still pending that's okay
+ // - we can still update our channel state, just as long as we don't return
+ // `MonitorEvent`s from the monitor back to the `ChannelManager` until they
+ // complete.
+ let monitor_is_pending_updates = monitor_data.has_pending_offchain_updates(&pending_monitor_updates);
+ if monitor_is_pending_updates || monitor_data.channel_perm_failed.load(Ordering::Acquire) {
+ // If there are still monitor updates pending (or an old monitor update
+ // finished after a later one perm-failed), we cannot yet construct an
+ // UpdateCompleted event.
+ return Ok(());
+ }
+ self.pending_monitor_events.lock().unwrap().push(MonitorEvent::UpdateCompleted {
+ funding_txo,
+ monitor_update_id: monitor_data.monitor.get_latest_update_id(),
+ });
+ },
+ MonitorUpdateId { contents: UpdateOrigin::ChainSync(_) } => {
+ if !monitor_data.has_pending_chainsync_updates(&pending_monitor_updates) {
+ monitor_data.last_chain_persist_height.store(self.highest_chain_height.load(Ordering::Acquire), Ordering::Release);
+ // The next time release_pending_monitor_events is called, any events for this
+ // ChannelMonitor will be returned.
+ }
+ },
+ }
+ Ok(())
+ }
+
+ /// This wrapper avoids having to update some of our tests for now as they assume the direct
+ /// chain::Watch API wherein we mark a monitor fully-updated by just calling
+ /// channel_monitor_updated once with the highest ID.
+ #[cfg(any(test, feature = "fuzztarget"))]
+ pub fn force_channel_monitor_updated(&self, funding_txo: OutPoint, monitor_update_id: u64) {
+ self.pending_monitor_events.lock().unwrap().push(MonitorEvent::UpdateCompleted {
+ funding_txo,
+ monitor_update_id,
+ });
+ }
+