+ /// Update one channel's data. The provided [`ChannelMonitor`] has already applied the given
+ /// update.
+ ///
+ /// Note that on every update, you **must** persist either the [`ChannelMonitorUpdate`] or the
+ /// updated monitor itself to disk/backups. See the [`Persist`] trait documentation for more
+ /// details.
+ ///
+ /// During blockchain synchronization operations, this may be called with no
+ /// [`ChannelMonitorUpdate`], in which case the full [`ChannelMonitor`] needs to be persisted.
+ /// Note that after the full [`ChannelMonitor`] is persisted any previous
+ /// [`ChannelMonitorUpdate`]s which were persisted should be discarded - they can no longer be
+ /// applied to the persisted [`ChannelMonitor`] as they were already applied.
+ ///
+ /// If an implementer chooses to persist the updates only, they need to make
+ /// sure that all the updates are applied to the `ChannelMonitors` *before*
+ /// the set of channel monitors is given to the `ChannelManager`
+ /// deserialization routine. See [`ChannelMonitor::update_monitor`] for
+ /// applying a monitor update to a monitor. If full `ChannelMonitors` are
+ /// persisted, then there is no need to persist individual updates.
+ ///
+ /// Note that there could be a performance tradeoff between persisting complete
+ /// channel monitors on every update vs. persisting only updates and applying
+ /// them in batches. The size of each monitor grows `O(number of state updates)`
+ /// whereas updates are small and `O(1)`.
+ ///
+ /// The `update_id` is used to identify this call to [`ChainMonitor::channel_monitor_updated`],
+ /// if you return [`ChannelMonitorUpdateErr::TemporaryFailure`].
+ ///
+ /// See [`Writeable::write`] on [`ChannelMonitor`] for writing out a `ChannelMonitor`,
+ /// [`Writeable::write`] on [`ChannelMonitorUpdate`] for writing out an update, and
+ /// [`ChannelMonitorUpdateErr`] for requirements when returning errors.
+ ///
+ /// [`Writeable::write`]: crate::util::ser::Writeable::write
+ fn update_persisted_channel(&self, channel_id: OutPoint, update: &Option<ChannelMonitorUpdate>, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> Result<(), ChannelMonitorUpdateErr>;
+}
+
+struct MonitorHolder<ChannelSigner: Sign> {
+ monitor: ChannelMonitor<ChannelSigner>,
+ /// The full set of pending monitor updates for this Channel.
+ ///
+ /// Note that this lock must be held during updates to prevent a race where we call
+ /// update_persisted_channel, the user returns a TemporaryFailure, and then calls
+ /// channel_monitor_updated immediately, racing our insertion of the pending update into the
+ /// contained Vec.
+ ///
+ /// Beyond the synchronization of updates themselves, we cannot handle user events until after
+ /// any chain updates have been stored on disk. Thus, we scan this list when returning updates
+ /// to the ChannelManager, refusing to return any updates for a ChannelMonitor which is still
+ /// being persisted fully to disk after a chain update.
+ ///
+ /// This avoids the possibility of handling, e.g. an on-chain claim, generating a claim monitor
+ /// event, resulting in the relevant ChannelManager generating a PaymentSent event and dropping
+ /// the pending payment entry, and then reloading before the monitor is persisted, resulting in
+ /// the ChannelManager re-adding the same payment entry, before the same block is replayed,
+ /// resulting in a duplicate PaymentSent event.
+ pending_monitor_updates: Mutex<Vec<MonitorUpdateId>>,
+ /// When the user returns a PermanentFailure error from an update_persisted_channel call during
+ /// block processing, we inform the ChannelManager that the channel should be closed
+ /// asynchronously. In order to ensure no further changes happen before the ChannelManager has
+ /// processed the closure event, we set this to true and return PermanentFailure for any other
+ /// chain::Watch events.
+ channel_perm_failed: AtomicBool,
+ /// The last block height at which no [`UpdateOrigin::ChainSync`] monitor updates were present
+ /// in `pending_monitor_updates`.
+ /// If it's been more than [`LATENCY_GRACE_PERIOD_BLOCKS`] since we started waiting on a chain
+ /// sync event, we let monitor events return to `ChannelManager` because we cannot hold them up
+ /// forever or we'll end up with HTLC preimages waiting to feed back into an upstream channel
+ /// forever, risking funds loss.
+ last_chain_persist_height: AtomicUsize,
+}
+
+impl<ChannelSigner: Sign> MonitorHolder<ChannelSigner> {
+ fn has_pending_offchain_updates(&self, pending_monitor_updates_lock: &MutexGuard<Vec<MonitorUpdateId>>) -> bool {
+ pending_monitor_updates_lock.iter().any(|update_id|
+ if let UpdateOrigin::OffChain(_) = update_id.contents { true } else { false })
+ }
+ fn has_pending_chainsync_updates(&self, pending_monitor_updates_lock: &MutexGuard<Vec<MonitorUpdateId>>) -> bool {
+ pending_monitor_updates_lock.iter().any(|update_id|
+ if let UpdateOrigin::ChainSync(_) = update_id.contents { true } else { false })
+ }
+}
+
+/// A read-only reference to a current ChannelMonitor.
+///
+/// Note that this holds a mutex in [`ChainMonitor`] and may block other events until it is
+/// released.
+pub struct LockedChannelMonitor<'a, ChannelSigner: Sign> {
+ lock: RwLockReadGuard<'a, HashMap<OutPoint, MonitorHolder<ChannelSigner>>>,
+ funding_txo: OutPoint,
+}
+
+impl<ChannelSigner: Sign> Deref for LockedChannelMonitor<'_, ChannelSigner> {
+ type Target = ChannelMonitor<ChannelSigner>;
+ fn deref(&self) -> &ChannelMonitor<ChannelSigner> {
+ &self.lock.get(&self.funding_txo).expect("Checked at construction").monitor
+ }
+}