+/// An enum representing the status of a channel monitor update persistence.
+#[derive(Clone, Copy, Debug, PartialEq)]
+pub enum ChannelMonitorUpdateStatus {
+ /// The update has been durably persisted and all copies of the relevant [`ChannelMonitor`]
+ /// have been updated.
+ ///
+ /// This includes performing any `fsync()` calls required to ensure the update is guaranteed to
+ /// be available on restart even if the application crashes.
+ Completed,
+ /// Used to indicate a temporary failure (eg connection to a watchtower or remote backup of
+ /// our state failed, but is expected to succeed at some point in the future).
+ ///
+ /// Such a failure will "freeze" a channel, preventing us from revoking old states or
+ /// submitting new commitment transactions to the counterparty. Once the update(s) which failed
+ /// have been successfully applied, a [`MonitorEvent::Completed`] can be used to restore the
+ /// channel to an operational state.
+ ///
+ /// Note that a given [`ChannelManager`] will *never* re-generate a [`ChannelMonitorUpdate`].
+ /// If you return this error you must ensure that it is written to disk safely before writing
+ /// the latest [`ChannelManager`] state, or you should return [`PermanentFailure`] instead.
+ ///
+ /// Even when a channel has been "frozen", updates to the [`ChannelMonitor`] can continue to
+ /// occur (e.g. if an inbound HTLC which we forwarded was claimed upstream, resulting in us
+ /// attempting to claim it on this channel) and those updates must still be persisted.
+ ///
+ /// No updates to the channel will be made which could invalidate other [`ChannelMonitor`]s
+ /// until a [`MonitorEvent::Completed`] is provided, even if you return no error on a later
+ /// monitor update for the same channel.
+ ///
+ /// For deployments where a copy of ChannelMonitors and other local state are backed up in a
+ /// remote location (with local copies persisted immediately), it is anticipated that all
+ /// updates will return [`InProgress`] until the remote copies could be updated.
+ ///
+ /// [`PermanentFailure`]: ChannelMonitorUpdateStatus::PermanentFailure
+ /// [`InProgress`]: ChannelMonitorUpdateStatus::InProgress
+ /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
+ InProgress,
+ /// Used to indicate no further channel monitor updates will be allowed (likely a disk failure
+ /// or a remote copy of this [`ChannelMonitor`] is no longer reachable and thus not updatable).
+ ///
+ /// When this is returned, [`ChannelManager`] will force-close the channel but *not* broadcast
+ /// our current commitment transaction. This avoids a dangerous case where a local disk failure
+ /// (e.g. the Linux-default remounting of the disk as read-only) causes [`PermanentFailure`]s
+ /// for all monitor updates. If we were to broadcast our latest commitment transaction and then
+ /// restart, we could end up reading a previous [`ChannelMonitor`] and [`ChannelManager`],
+ /// revoking our now-broadcasted state before seeing it confirm and losing all our funds.
+ ///
+ /// Note that this is somewhat of a tradeoff - if the disk is really gone and we may have lost
+ /// the data permanently, we really should broadcast immediately. If the data can be recovered
+ /// with manual intervention, we'd rather close the channel, rejecting future updates to it,
+ /// and broadcast the latest state only if we have HTLCs to claim which are timing out (which
+ /// we do as long as blocks are connected).
+ ///
+ /// In order to broadcast the latest local commitment transaction, you'll need to call
+ /// [`ChannelMonitor::get_latest_holder_commitment_txn`] and broadcast the resulting
+ /// transactions once you've safely ensured no further channel updates can be generated by your
+ /// [`ChannelManager`].
+ ///
+ /// Note that at least one final [`ChannelMonitorUpdate`] may still be provided, which must
+ /// still be processed by a running [`ChannelMonitor`]. This final update will mark the
+ /// [`ChannelMonitor`] as finalized, ensuring no further updates (e.g. revocation of the latest
+ /// commitment transaction) are allowed.
+ ///
+ /// Note that even if you return a [`PermanentFailure`] due to unavailability of secondary
+ /// [`ChannelMonitor`] copies, you should still make an attempt to store the update where
+ /// possible to ensure you can claim HTLC outputs on the latest commitment transaction
+ /// broadcasted later.
+ ///
+ /// In case of distributed watchtowers deployment, the new version must be written to disk, as
+ /// state may have been stored but rejected due to a block forcing a commitment broadcast. This
+ /// storage is used to claim outputs of rejected state confirmed onchain by another watchtower,
+ /// lagging behind on block processing.
+ ///
+ /// [`PermanentFailure`]: ChannelMonitorUpdateStatus::PermanentFailure
+ /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
+ PermanentFailure,
+}
+
+/// The `Watch` trait defines behavior for watching on-chain activity pertaining to channels as
+/// blocks are connected and disconnected.
+///
+/// Each channel is associated with a [`ChannelMonitor`]. Implementations of this trait are
+/// responsible for maintaining a set of monitors such that they can be updated accordingly as
+/// channel state changes and HTLCs are resolved. See method documentation for specific
+/// requirements.
+///
+/// Implementations **must** ensure that updates are successfully applied and persisted upon method
+/// completion. If an update fails with a [`PermanentFailure`], then it must immediately shut down
+/// without taking any further action such as persisting the current state.
+///
+/// If an implementation maintains multiple instances of a channel's monitor (e.g., by storing
+/// backup copies), then it must ensure that updates are applied across all instances. Otherwise, it
+/// could result in a revoked transaction being broadcast, allowing the counterparty to claim all
+/// funds in the channel. See [`ChannelMonitorUpdateStatus`] for more details about how to handle
+/// multiple instances.
+///
+/// [`PermanentFailure`]: ChannelMonitorUpdateStatus::PermanentFailure
+pub trait Watch<ChannelSigner: Sign> {
+ /// Watches a channel identified by `funding_txo` using `monitor`.
+ ///
+ /// Implementations are responsible for watching the chain for the funding transaction along
+ /// with any spends of outputs returned by [`get_outputs_to_watch`]. In practice, this means
+ /// calling [`block_connected`] and [`block_disconnected`] on the monitor.
+ ///
+ /// Note: this interface MUST error with [`ChannelMonitorUpdateStatus::PermanentFailure`] if
+ /// the given `funding_txo` has previously been registered via `watch_channel`.
+ ///
+ /// [`get_outputs_to_watch`]: channelmonitor::ChannelMonitor::get_outputs_to_watch
+ /// [`block_connected`]: channelmonitor::ChannelMonitor::block_connected
+ /// [`block_disconnected`]: channelmonitor::ChannelMonitor::block_disconnected
+ fn watch_channel(&self, funding_txo: OutPoint, monitor: ChannelMonitor<ChannelSigner>) -> ChannelMonitorUpdateStatus;
+
+ /// Updates a channel identified by `funding_txo` by applying `update` to its monitor.
+ ///
+ /// Implementations must call [`update_monitor`] with the given update. See
+ /// [`ChannelMonitorUpdateStatus`] for invariants around returning an error.
+ ///
+ /// [`update_monitor`]: channelmonitor::ChannelMonitor::update_monitor
+ fn update_channel(&self, funding_txo: OutPoint, update: ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus;
+
+ /// Returns any monitor events since the last call. Subsequent calls must only return new
+ /// events.
+ ///
+ /// Note that after any block- or transaction-connection calls to a [`ChannelMonitor`], no
+ /// further events may be returned here until the [`ChannelMonitor`] has been fully persisted
+ /// to disk.
+ ///
+ /// For details on asynchronous [`ChannelMonitor`] updating and returning
+ /// [`MonitorEvent::Completed`] here, see [`ChannelMonitorUpdateStatus::InProgress`].
+ fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)>;
+}
+
+/// The `Filter` trait defines behavior for indicating chain activity of interest pertaining to
+/// channels.
+///
+/// This is useful in order to have a [`Watch`] implementation convey to a chain source which
+/// transactions to be notified of. Notification may take the form of pre-filtering blocks or, in
+/// the case of [BIP 157]/[BIP 158], only fetching a block if the compact filter matches. If
+/// receiving full blocks from a chain source, any further filtering is unnecessary.
+///
+/// After an output has been registered, subsequent block retrievals from the chain source must not
+/// exclude any transactions matching the new criteria nor any in-block descendants of such
+/// transactions.
+///
+/// Note that use as part of a [`Watch`] implementation involves reentrancy. Therefore, the `Filter`
+/// should not block on I/O. Implementations should instead queue the newly monitored data to be
+/// processed later. Then, in order to block until the data has been processed, any [`Watch`]
+/// invocation that has called the `Filter` must return [`InProgress`].
+///
+/// [`InProgress`]: ChannelMonitorUpdateStatus::InProgress
+/// [BIP 157]: https://github.com/bitcoin/bips/blob/master/bip-0157.mediawiki
+/// [BIP 158]: https://github.com/bitcoin/bips/blob/master/bip-0158.mediawiki
+pub trait Filter {
+ /// Registers interest in a transaction with `txid` and having an output with `script_pubkey` as
+ /// a spending condition.
+ fn register_tx(&self, txid: &Txid, script_pubkey: &Script);
+
+ /// Registers interest in spends of a transaction output.
+ ///
+ /// Note that this method might be called during processing of a new block. You therefore need
+ /// to ensure that also dependent output spents within an already connected block are correctly
+ /// handled, e.g., by re-scanning the block in question whenever new outputs have been
+ /// registered mid-processing.
+ fn register_output(&self, output: WatchedOutput);
+}
+
+/// A transaction output watched by a [`ChannelMonitor`] for spends on-chain.
+///
+/// Used to convey to a [`Filter`] such an output with a given spending condition. Any transaction
+/// spending the output must be given to [`ChannelMonitor::block_connected`] either directly or via
+/// [`Confirm::transactions_confirmed`].
+///
+/// If `block_hash` is `Some`, this indicates the output was created in the corresponding block and
+/// may have been spent there. See [`Filter::register_output`] for details.
+///
+/// [`ChannelMonitor`]: channelmonitor::ChannelMonitor
+/// [`ChannelMonitor::block_connected`]: channelmonitor::ChannelMonitor::block_connected
+#[derive(Clone, PartialEq, Hash)]
+pub struct WatchedOutput {
+ /// First block where the transaction output may have been spent.
+ pub block_hash: Option<BlockHash>,
+
+ /// Outpoint identifying the transaction output.
+ pub outpoint: OutPoint,
+
+ /// Spending condition of the transaction output.
+ pub script_pubkey: Script,
+}
+
+impl<T: Listen> Listen for core::ops::Deref<Target = T> {
+ fn filtered_block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
+ (**self).filtered_block_connected(header, txdata, height);
+ }
+
+ fn block_disconnected(&self, header: &BlockHeader, height: u32) {
+ (**self).block_disconnected(header, height);
+ }
+}
+
+impl<T: core::ops::Deref, U: core::ops::Deref> Listen for (T, U)
+where
+ T::Target: Listen,
+ U::Target: Listen,
+{
+ fn filtered_block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
+ self.0.filtered_block_connected(header, txdata, height);
+ self.1.filtered_block_connected(header, txdata, height);
+ }
+
+ fn block_disconnected(&self, header: &BlockHeader, height: u32) {
+ self.0.block_disconnected(header, height);
+ self.1.block_disconnected(header, height);