1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 //! Logic to connect off-chain channel management with on-chain transaction monitoring.
12 //! [`ChainMonitor`] is an implementation of [`chain::Watch`] used both to process blocks and to
13 //! update [`ChannelMonitor`]s accordingly. If any on-chain events need further processing, it will
14 //! make those available as [`MonitorEvent`]s to be consumed.
16 //! [`ChainMonitor`] is parameterized by an optional chain source, which must implement the
17 //! [`chain::Filter`] trait. This provides a mechanism to signal new relevant outputs back to light
18 //! clients, such that transactions spending those outputs are included in block data.
20 //! [`ChainMonitor`] may be used directly to monitor channels locally or as a part of a distributed
21 //! setup to monitor channels remotely. In the latter case, a custom [`chain::Watch`] implementation
22 //! would be responsible for routing each update to a remote server and for retrieving monitor
23 //! events. The remote server would make use of [`ChainMonitor`] for block processing and for
24 //! servicing [`ChannelMonitor`] updates from the client.
26 use bitcoin::blockdata::block::Header;
27 use bitcoin::hash_types::{Txid, BlockHash};
30 use crate::chain::{ChannelMonitorUpdateStatus, Filter, WatchedOutput};
31 use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
32 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, Balance, MonitorEvent, TransactionOutputs, WithChannelMonitor};
33 use crate::chain::transaction::{OutPoint, TransactionData};
34 use crate::ln::types::ChannelId;
35 use crate::sign::ecdsa::EcdsaChannelSigner;
37 use crate::events::{Event, EventHandler};
38 use crate::util::logger::{Logger, WithContext};
39 use crate::util::errors::APIError;
40 use crate::util::wakers::{Future, Notifier};
41 use crate::ln::channel_state::ChannelDetails;
43 use crate::prelude::*;
44 use crate::sync::{RwLock, RwLockReadGuard, Mutex, MutexGuard};
46 use core::sync::atomic::{AtomicUsize, Ordering};
47 use bitcoin::hashes::Hash;
48 use bitcoin::secp256k1::PublicKey;
50 /// `Persist` defines behavior for persisting channel monitors: this could mean
51 /// writing once to disk, and/or uploading to one or more backup services.
53 /// Persistence can happen in one of two ways - synchronously completing before the trait method
54 /// calls return or asynchronously in the background.
56 /// # For those implementing synchronous persistence
58 /// * If persistence completes fully (including any relevant `fsync()` calls), the implementation
59 /// should return [`ChannelMonitorUpdateStatus::Completed`], indicating normal channel operation
62 /// * If persistence fails for some reason, implementations should consider returning
63 /// [`ChannelMonitorUpdateStatus::InProgress`] and retry all pending persistence operations in
64 /// the background with [`ChainMonitor::list_pending_monitor_updates`] and
65 /// [`ChainMonitor::get_monitor`].
67 /// Once a full [`ChannelMonitor`] has been persisted, all pending updates for that channel can
68 /// be marked as complete via [`ChainMonitor::channel_monitor_updated`].
70 /// If at some point no further progress can be made towards persisting the pending updates, the
71 /// node should simply shut down.
73 /// * If the persistence has failed and cannot be retried further (e.g. because of an outage),
74 /// [`ChannelMonitorUpdateStatus::UnrecoverableError`] can be used, though this will result in
75 /// an immediate panic and future operations in LDK generally failing.
77 /// # For those implementing asynchronous persistence
79 /// All calls should generally spawn a background task and immediately return
80 /// [`ChannelMonitorUpdateStatus::InProgress`]. Once the update completes,
81 /// [`ChainMonitor::channel_monitor_updated`] should be called with the corresponding
82 /// [`ChannelMonitor::get_latest_update_id`] or [`ChannelMonitorUpdate::update_id`].
84 /// Note that unlike the direct [`chain::Watch`] interface,
85 /// [`ChainMonitor::channel_monitor_updated`] must be called once for *each* update which occurs.
87 /// If at some point no further progress can be made towards persisting a pending update, the node
88 /// should simply shut down. Until then, the background task should either loop indefinitely, or
89 /// persistence should be regularly retried with [`ChainMonitor::list_pending_monitor_updates`]
90 /// and [`ChainMonitor::get_monitor`] (note that if a full monitor is persisted all pending
91 /// monitor updates may be marked completed).
93 /// # Using remote watchtowers
95 /// Watchtowers may be updated as a part of an implementation of this trait, utilizing the async
96 /// update process described above while the watchtower is being updated. The following methods are
97 /// provided for bulding transactions for a watchtower:
98 /// [`ChannelMonitor::initial_counterparty_commitment_tx`],
99 /// [`ChannelMonitor::counterparty_commitment_txs_from_update`],
100 /// [`ChannelMonitor::sign_to_local_justice_tx`], [`TrustedCommitmentTransaction::revokeable_output_index`],
101 /// [`TrustedCommitmentTransaction::build_to_local_justice_tx`].
103 /// [`TrustedCommitmentTransaction::revokeable_output_index`]: crate::ln::chan_utils::TrustedCommitmentTransaction::revokeable_output_index
104 /// [`TrustedCommitmentTransaction::build_to_local_justice_tx`]: crate::ln::chan_utils::TrustedCommitmentTransaction::build_to_local_justice_tx
105 pub trait Persist<ChannelSigner: EcdsaChannelSigner> {
106 /// Persist a new channel's data in response to a [`chain::Watch::watch_channel`] call. This is
107 /// called by [`ChannelManager`] for new channels, or may be called directly, e.g. on startup.
109 /// The data can be stored any way you want, but the identifier provided by LDK is the
110 /// channel's outpoint (and it is up to you to maintain a correct mapping between the outpoint
111 /// and the stored channel data). Note that you **must** persist every new monitor to disk.
113 /// The [`ChannelMonitor::get_latest_update_id`] uniquely links this call to [`ChainMonitor::channel_monitor_updated`].
114 /// For [`Persist::persist_new_channel`], it is only necessary to call [`ChainMonitor::channel_monitor_updated`]
115 /// when you return [`ChannelMonitorUpdateStatus::InProgress`].
117 /// See [`Writeable::write`] on [`ChannelMonitor`] for writing out a `ChannelMonitor`
118 /// and [`ChannelMonitorUpdateStatus`] for requirements when returning errors.
120 /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
121 /// [`Writeable::write`]: crate::util::ser::Writeable::write
122 fn persist_new_channel(&self, channel_funding_outpoint: OutPoint, monitor: &ChannelMonitor<ChannelSigner>) -> ChannelMonitorUpdateStatus;
124 /// Update one channel's data. The provided [`ChannelMonitor`] has already applied the given
127 /// Note that on every update, you **must** persist either the [`ChannelMonitorUpdate`] or the
128 /// updated monitor itself to disk/backups. See the [`Persist`] trait documentation for more
131 /// During blockchain synchronization operations, and in some rare cases, this may be called with
132 /// no [`ChannelMonitorUpdate`], in which case the full [`ChannelMonitor`] needs to be persisted.
133 /// Note that after the full [`ChannelMonitor`] is persisted any previous
134 /// [`ChannelMonitorUpdate`]s which were persisted should be discarded - they can no longer be
135 /// applied to the persisted [`ChannelMonitor`] as they were already applied.
137 /// If an implementer chooses to persist the updates only, they need to make
138 /// sure that all the updates are applied to the `ChannelMonitors` *before*
139 /// the set of channel monitors is given to the `ChannelManager`
140 /// deserialization routine. See [`ChannelMonitor::update_monitor`] for
141 /// applying a monitor update to a monitor. If full `ChannelMonitors` are
142 /// persisted, then there is no need to persist individual updates.
144 /// Note that there could be a performance tradeoff between persisting complete
145 /// channel monitors on every update vs. persisting only updates and applying
146 /// them in batches. The size of each monitor grows `O(number of state updates)`
147 /// whereas updates are small and `O(1)`.
149 /// The [`ChannelMonitorUpdate::update_id`] or [`ChannelMonitor::get_latest_update_id`] uniquely
150 /// links this call to [`ChainMonitor::channel_monitor_updated`].
151 /// For [`Persist::update_persisted_channel`], it is only necessary to call [`ChainMonitor::channel_monitor_updated`]
152 /// when a [`ChannelMonitorUpdate`] is provided and when you return [`ChannelMonitorUpdateStatus::InProgress`].
154 /// See [`Writeable::write`] on [`ChannelMonitor`] for writing out a `ChannelMonitor`,
155 /// [`Writeable::write`] on [`ChannelMonitorUpdate`] for writing out an update, and
156 /// [`ChannelMonitorUpdateStatus`] for requirements when returning errors.
158 /// [`Writeable::write`]: crate::util::ser::Writeable::write
159 fn update_persisted_channel(&self, channel_funding_outpoint: OutPoint, monitor_update: Option<&ChannelMonitorUpdate>, monitor: &ChannelMonitor<ChannelSigner>) -> ChannelMonitorUpdateStatus;
160 /// Prevents the channel monitor from being loaded on startup.
162 /// Archiving the data in a backup location (rather than deleting it fully) is useful for
163 /// hedging against data loss in case of unexpected failure.
164 fn archive_persisted_channel(&self, channel_funding_outpoint: OutPoint);
167 struct MonitorHolder<ChannelSigner: EcdsaChannelSigner> {
168 monitor: ChannelMonitor<ChannelSigner>,
169 /// The full set of pending monitor updates for this Channel.
171 /// Note that this lock must be held during updates to prevent a race where we call
172 /// update_persisted_channel, the user returns a
173 /// [`ChannelMonitorUpdateStatus::InProgress`], and then calls channel_monitor_updated
174 /// immediately, racing our insertion of the pending update into the contained Vec.
175 pending_monitor_updates: Mutex<Vec<u64>>,
178 impl<ChannelSigner: EcdsaChannelSigner> MonitorHolder<ChannelSigner> {
179 fn has_pending_updates(&self, pending_monitor_updates_lock: &MutexGuard<Vec<u64>>) -> bool {
180 !pending_monitor_updates_lock.is_empty()
184 /// A read-only reference to a current ChannelMonitor.
186 /// Note that this holds a mutex in [`ChainMonitor`] and may block other events until it is
188 pub struct LockedChannelMonitor<'a, ChannelSigner: EcdsaChannelSigner> {
189 lock: RwLockReadGuard<'a, HashMap<OutPoint, MonitorHolder<ChannelSigner>>>,
190 funding_txo: OutPoint,
193 impl<ChannelSigner: EcdsaChannelSigner> Deref for LockedChannelMonitor<'_, ChannelSigner> {
194 type Target = ChannelMonitor<ChannelSigner>;
195 fn deref(&self) -> &ChannelMonitor<ChannelSigner> {
196 &self.lock.get(&self.funding_txo).expect("Checked at construction").monitor
200 /// An implementation of [`chain::Watch`] for monitoring channels.
202 /// Connected and disconnected blocks must be provided to `ChainMonitor` as documented by
203 /// [`chain::Watch`]. May be used in conjunction with [`ChannelManager`] to monitor channels locally
204 /// or used independently to monitor channels remotely. See the [module-level documentation] for
207 /// Note that `ChainMonitor` should regularly trigger rebroadcasts/fee bumps of pending claims from
208 /// a force-closed channel. This is crucial in preventing certain classes of pinning attacks,
209 /// detecting substantial mempool feerate changes between blocks, and ensuring reliability if
210 /// broadcasting fails. We recommend invoking this every 30 seconds, or lower if running in an
211 /// environment with spotty connections, like on mobile.
213 /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
214 /// [module-level documentation]: crate::chain::chainmonitor
215 /// [`rebroadcast_pending_claims`]: Self::rebroadcast_pending_claims
216 pub struct ChainMonitor<ChannelSigner: EcdsaChannelSigner, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref>
217 where C::Target: chain::Filter,
218 T::Target: BroadcasterInterface,
219 F::Target: FeeEstimator,
221 P::Target: Persist<ChannelSigner>,
223 monitors: RwLock<HashMap<OutPoint, MonitorHolder<ChannelSigner>>>,
224 chain_source: Option<C>,
229 /// "User-provided" (ie persistence-completion/-failed) [`MonitorEvent`]s. These came directly
230 /// from the user and not from a [`ChannelMonitor`].
231 pending_monitor_events: Mutex<Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, Option<PublicKey>)>>,
232 /// The best block height seen, used as a proxy for the passage of time.
233 highest_chain_height: AtomicUsize,
235 /// A [`Notifier`] used to wake up the background processor in case we have any [`Event`]s for
236 /// it to give to users (or [`MonitorEvent`]s for `ChannelManager` to process).
237 event_notifier: Notifier,
240 impl<ChannelSigner: EcdsaChannelSigner, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref> ChainMonitor<ChannelSigner, C, T, F, L, P>
241 where C::Target: chain::Filter,
242 T::Target: BroadcasterInterface,
243 F::Target: FeeEstimator,
245 P::Target: Persist<ChannelSigner>,
247 /// Dispatches to per-channel monitors, which are responsible for updating their on-chain view
248 /// of a channel and reacting accordingly based on transactions in the given chain data. See
249 /// [`ChannelMonitor::block_connected`] for details. Any HTLCs that were resolved on chain will
250 /// be returned by [`chain::Watch::release_pending_monitor_events`].
252 /// Calls back to [`chain::Filter`] if any monitor indicated new outputs to watch. Subsequent
253 /// calls must not exclude any transactions matching the new outputs nor any in-block
254 /// descendants of such transactions. It is not necessary to re-fetch the block to obtain
255 /// updated `txdata`.
257 /// Calls which represent a new blockchain tip height should set `best_height`.
258 fn process_chain_data<FN>(&self, header: &Header, best_height: Option<u32>, txdata: &TransactionData, process: FN)
260 FN: Fn(&ChannelMonitor<ChannelSigner>, &TransactionData) -> Vec<TransactionOutputs>
262 let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
263 let funding_outpoints = hash_set_from_iter(self.monitors.read().unwrap().keys().cloned());
264 let channel_count = funding_outpoints.len();
265 for funding_outpoint in funding_outpoints.iter() {
266 let monitor_lock = self.monitors.read().unwrap();
267 if let Some(monitor_state) = monitor_lock.get(funding_outpoint) {
268 if self.update_monitor_with_chain_data(header, best_height, txdata, &process, funding_outpoint, &monitor_state, channel_count).is_err() {
269 // Take the monitors lock for writing so that we poison it and any future
270 // operations going forward fail immediately.
271 core::mem::drop(monitor_lock);
272 let _poison = self.monitors.write().unwrap();
273 log_error!(self.logger, "{}", err_str);
274 panic!("{}", err_str);
279 // do some followup cleanup if any funding outpoints were added in between iterations
280 let monitor_states = self.monitors.write().unwrap();
281 for (funding_outpoint, monitor_state) in monitor_states.iter() {
282 if !funding_outpoints.contains(funding_outpoint) {
283 if self.update_monitor_with_chain_data(header, best_height, txdata, &process, funding_outpoint, &monitor_state, channel_count).is_err() {
284 log_error!(self.logger, "{}", err_str);
285 panic!("{}", err_str);
290 if let Some(height) = best_height {
291 // If the best block height is being updated, update highest_chain_height under the
292 // monitors write lock.
293 let old_height = self.highest_chain_height.load(Ordering::Acquire);
294 let new_height = height as usize;
295 if new_height > old_height {
296 self.highest_chain_height.store(new_height, Ordering::Release);
301 fn update_monitor_with_chain_data<FN>(
302 &self, header: &Header, best_height: Option<u32>, txdata: &TransactionData, process: FN, funding_outpoint: &OutPoint,
303 monitor_state: &MonitorHolder<ChannelSigner>, channel_count: usize,
304 ) -> Result<(), ()> where FN: Fn(&ChannelMonitor<ChannelSigner>, &TransactionData) -> Vec<TransactionOutputs> {
305 let monitor = &monitor_state.monitor;
306 let logger = WithChannelMonitor::from(&self.logger, &monitor, None);
308 let mut txn_outputs = process(monitor, txdata);
310 let get_partition_key = |funding_outpoint: &OutPoint| {
311 let funding_txid_hash = funding_outpoint.txid.to_raw_hash();
312 let funding_txid_hash_bytes = funding_txid_hash.as_byte_array();
313 let funding_txid_u32 = u32::from_be_bytes([funding_txid_hash_bytes[0], funding_txid_hash_bytes[1], funding_txid_hash_bytes[2], funding_txid_hash_bytes[3]]);
314 funding_txid_u32.wrapping_add(best_height.unwrap_or_default())
317 let partition_factor = if channel_count < 15 {
323 let has_pending_claims = monitor_state.monitor.has_pending_claims();
324 if has_pending_claims || get_partition_key(funding_outpoint) % partition_factor == 0 {
325 log_trace!(logger, "Syncing Channel Monitor for channel {}", log_funding_info!(monitor));
326 match self.persister.update_persisted_channel(*funding_outpoint, None, monitor) {
327 ChannelMonitorUpdateStatus::Completed =>
328 log_trace!(logger, "Finished syncing Channel Monitor for channel {} for block-data",
329 log_funding_info!(monitor)
331 ChannelMonitorUpdateStatus::InProgress => {
332 log_trace!(logger, "Channel Monitor sync for channel {} in progress.", log_funding_info!(monitor));
334 ChannelMonitorUpdateStatus::UnrecoverableError => {
340 // Register any new outputs with the chain source for filtering, storing any dependent
341 // transactions from within the block that previously had not been included in txdata.
342 if let Some(ref chain_source) = self.chain_source {
343 let block_hash = header.block_hash();
344 for (txid, mut outputs) in txn_outputs.drain(..) {
345 for (idx, output) in outputs.drain(..) {
346 // Register any new outputs with the chain source for filtering
347 let output = WatchedOutput {
348 block_hash: Some(block_hash),
349 outpoint: OutPoint { txid, index: idx as u16 },
350 script_pubkey: output.script_pubkey,
352 log_trace!(logger, "Adding monitoring for spends of outpoint {} to the filter", output.outpoint);
353 chain_source.register_output(output);
360 /// Creates a new `ChainMonitor` used to watch on-chain activity pertaining to channels.
362 /// When an optional chain source implementing [`chain::Filter`] is provided, the chain monitor
363 /// will call back to it indicating transactions and outputs of interest. This allows clients to
364 /// pre-filter blocks or only fetch blocks matching a compact filter. Otherwise, clients may
365 /// always need to fetch full blocks absent another means for determining which blocks contain
366 /// transactions relevant to the watched channels.
367 pub fn new(chain_source: Option<C>, broadcaster: T, logger: L, feeest: F, persister: P) -> Self {
369 monitors: RwLock::new(new_hash_map()),
373 fee_estimator: feeest,
375 pending_monitor_events: Mutex::new(Vec::new()),
376 highest_chain_height: AtomicUsize::new(0),
377 event_notifier: Notifier::new(),
381 /// Gets the balances in the contained [`ChannelMonitor`]s which are claimable on-chain or
382 /// claims which are awaiting confirmation.
384 /// Includes the balances from each [`ChannelMonitor`] *except* those included in
385 /// `ignored_channels`, allowing you to filter out balances from channels which are still open
386 /// (and whose balance should likely be pulled from the [`ChannelDetails`]).
388 /// See [`ChannelMonitor::get_claimable_balances`] for more details on the exact criteria for
389 /// inclusion in the return value.
390 pub fn get_claimable_balances(&self, ignored_channels: &[&ChannelDetails]) -> Vec<Balance> {
391 let mut ret = Vec::new();
392 let monitor_states = self.monitors.read().unwrap();
393 for (_, monitor_state) in monitor_states.iter().filter(|(funding_outpoint, _)| {
394 for chan in ignored_channels {
395 if chan.funding_txo.as_ref() == Some(funding_outpoint) {
401 ret.append(&mut monitor_state.monitor.get_claimable_balances());
406 /// Gets the [`LockedChannelMonitor`] for a given funding outpoint, returning an `Err` if no
407 /// such [`ChannelMonitor`] is currently being monitored for.
409 /// Note that the result holds a mutex over our monitor set, and should not be held
411 pub fn get_monitor(&self, funding_txo: OutPoint) -> Result<LockedChannelMonitor<'_, ChannelSigner>, ()> {
412 let lock = self.monitors.read().unwrap();
413 if lock.get(&funding_txo).is_some() {
414 Ok(LockedChannelMonitor { lock, funding_txo })
420 /// Lists the funding outpoint and channel ID of each [`ChannelMonitor`] being monitored.
422 /// Note that [`ChannelMonitor`]s are not removed when a channel is closed as they are always
423 /// monitoring for on-chain state resolutions.
424 pub fn list_monitors(&self) -> Vec<(OutPoint, ChannelId)> {
425 self.monitors.read().unwrap().iter().map(|(outpoint, monitor_holder)| {
426 let channel_id = monitor_holder.monitor.channel_id();
427 (*outpoint, channel_id)
431 #[cfg(not(c_bindings))]
432 /// Lists the pending updates for each [`ChannelMonitor`] (by `OutPoint` being monitored).
433 /// Each `Vec<u64>` contains `update_id`s from [`ChannelMonitor::get_latest_update_id`] for updates
434 /// that have not yet been fully persisted. Note that if a full monitor is persisted all the pending
435 /// monitor updates must be individually marked completed by calling [`ChainMonitor::channel_monitor_updated`].
436 pub fn list_pending_monitor_updates(&self) -> HashMap<OutPoint, Vec<u64>> {
437 hash_map_from_iter(self.monitors.read().unwrap().iter().map(|(outpoint, holder)| {
438 (*outpoint, holder.pending_monitor_updates.lock().unwrap().clone())
443 /// Lists the pending updates for each [`ChannelMonitor`] (by `OutPoint` being monitored).
444 /// Each `Vec<u64>` contains `update_id`s from [`ChannelMonitor::get_latest_update_id`] for updates
445 /// that have not yet been fully persisted. Note that if a full monitor is persisted all the pending
446 /// monitor updates must be individually marked completed by calling [`ChainMonitor::channel_monitor_updated`].
447 pub fn list_pending_monitor_updates(&self) -> Vec<(OutPoint, Vec<u64>)> {
448 self.monitors.read().unwrap().iter().map(|(outpoint, holder)| {
449 (*outpoint, holder.pending_monitor_updates.lock().unwrap().clone())
455 pub fn remove_monitor(&self, funding_txo: &OutPoint) -> ChannelMonitor<ChannelSigner> {
456 self.monitors.write().unwrap().remove(funding_txo).unwrap().monitor
459 /// Indicates the persistence of a [`ChannelMonitor`] has completed after
460 /// [`ChannelMonitorUpdateStatus::InProgress`] was returned from an update operation.
462 /// Thus, the anticipated use is, at a high level:
463 /// 1) This [`ChainMonitor`] calls [`Persist::update_persisted_channel`] which stores the
464 /// update to disk and begins updating any remote (e.g. watchtower/backup) copies,
465 /// returning [`ChannelMonitorUpdateStatus::InProgress`],
466 /// 2) once all remote copies are updated, you call this function with [`ChannelMonitor::get_latest_update_id`]
467 /// or [`ChannelMonitorUpdate::update_id`] as the `completed_update_id`, and once all pending
468 /// updates have completed the channel will be re-enabled.
470 /// It is only necessary to call [`ChainMonitor::channel_monitor_updated`] when you return [`ChannelMonitorUpdateStatus::InProgress`]
471 /// from [`Persist`] and either:
472 /// 1. A new [`ChannelMonitor`] was added in [`Persist::persist_new_channel`], or
473 /// 2. A [`ChannelMonitorUpdate`] was provided as part of [`Persist::update_persisted_channel`].
474 /// Note that we don't care about calls to [`Persist::update_persisted_channel`] where no
475 /// [`ChannelMonitorUpdate`] was provided.
477 /// Returns an [`APIError::APIMisuseError`] if `funding_txo` does not match any currently
478 /// registered [`ChannelMonitor`]s.
479 pub fn channel_monitor_updated(&self, funding_txo: OutPoint, completed_update_id: u64) -> Result<(), APIError> {
480 let monitors = self.monitors.read().unwrap();
481 let monitor_data = if let Some(mon) = monitors.get(&funding_txo) { mon } else {
482 return Err(APIError::APIMisuseError { err: format!("No ChannelMonitor matching funding outpoint {:?} found", funding_txo) });
484 let mut pending_monitor_updates = monitor_data.pending_monitor_updates.lock().unwrap();
485 pending_monitor_updates.retain(|update_id| *update_id != completed_update_id);
487 // Note that we only check for pending non-chainsync monitor updates and we don't track monitor
488 // updates resulting from chainsync in `pending_monitor_updates`.
489 let monitor_is_pending_updates = monitor_data.has_pending_updates(&pending_monitor_updates);
490 log_debug!(self.logger, "Completed off-chain monitor update {} for channel with funding outpoint {:?}, {}",
493 if monitor_is_pending_updates {
494 "still have pending off-chain updates"
496 "all off-chain updates complete, returning a MonitorEvent"
498 if monitor_is_pending_updates {
499 // If there are still monitor updates pending, we cannot yet construct a
503 let channel_id = monitor_data.monitor.channel_id();
504 self.pending_monitor_events.lock().unwrap().push((funding_txo, channel_id, vec![MonitorEvent::Completed {
505 funding_txo, channel_id,
506 monitor_update_id: monitor_data.monitor.get_latest_update_id(),
507 }], monitor_data.monitor.get_counterparty_node_id()));
509 self.event_notifier.notify();
513 /// This wrapper avoids having to update some of our tests for now as they assume the direct
514 /// chain::Watch API wherein we mark a monitor fully-updated by just calling
515 /// channel_monitor_updated once with the highest ID.
516 #[cfg(any(test, fuzzing))]
517 pub fn force_channel_monitor_updated(&self, funding_txo: OutPoint, monitor_update_id: u64) {
518 let monitors = self.monitors.read().unwrap();
519 let (counterparty_node_id, channel_id) = if let Some(m) = monitors.get(&funding_txo) {
520 (m.monitor.get_counterparty_node_id(), m.monitor.channel_id())
522 (None, ChannelId::v1_from_funding_outpoint(funding_txo))
524 self.pending_monitor_events.lock().unwrap().push((funding_txo, channel_id, vec![MonitorEvent::Completed {
528 }], counterparty_node_id));
529 self.event_notifier.notify();
532 #[cfg(any(test, feature = "_test_utils"))]
533 pub fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
534 use crate::events::EventsProvider;
535 let events = core::cell::RefCell::new(Vec::new());
536 let event_handler = |event: events::Event| events.borrow_mut().push(event);
537 self.process_pending_events(&event_handler);
541 /// Processes any events asynchronously in the order they were generated since the last call
542 /// using the given event handler.
544 /// See the trait-level documentation of [`EventsProvider`] for requirements.
546 /// [`EventsProvider`]: crate::events::EventsProvider
547 pub async fn process_pending_events_async<Future: core::future::Future, H: Fn(Event) -> Future>(
550 // Sadly we can't hold the monitors read lock through an async call. Thus we have to do a
551 // crazy dance to process a monitor's events then only remove them once we've done so.
552 let mons_to_process = self.monitors.read().unwrap().keys().cloned().collect::<Vec<_>>();
553 for funding_txo in mons_to_process {
555 super::channelmonitor::process_events_body!(
556 self.monitors.read().unwrap().get(&funding_txo).map(|m| &m.monitor), ev, handler(ev).await);
560 /// Gets a [`Future`] that completes when an event is available either via
561 /// [`chain::Watch::release_pending_monitor_events`] or
562 /// [`EventsProvider::process_pending_events`].
564 /// Note that callbacks registered on the [`Future`] MUST NOT call back into this
565 /// [`ChainMonitor`] and should instead register actions to be taken later.
567 /// [`EventsProvider::process_pending_events`]: crate::events::EventsProvider::process_pending_events
568 pub fn get_update_future(&self) -> Future {
569 self.event_notifier.get_future()
572 /// Triggers rebroadcasts/fee-bumps of pending claims from a force-closed channel. This is
573 /// crucial in preventing certain classes of pinning attacks, detecting substantial mempool
574 /// feerate changes between blocks, and ensuring reliability if broadcasting fails. We recommend
575 /// invoking this every 30 seconds, or lower if running in an environment with spotty
576 /// connections, like on mobile.
577 pub fn rebroadcast_pending_claims(&self) {
578 let monitors = self.monitors.read().unwrap();
579 for (_, monitor_holder) in &*monitors {
580 monitor_holder.monitor.rebroadcast_pending_claims(
581 &*self.broadcaster, &*self.fee_estimator, &self.logger
586 /// Triggers rebroadcasts of pending claims from force-closed channels after a transaction
587 /// signature generation failure.
589 /// `monitor_opt` can be used as a filter to only trigger them for a specific channel monitor.
590 pub fn signer_unblocked(&self, monitor_opt: Option<OutPoint>) {
591 let monitors = self.monitors.read().unwrap();
592 if let Some(funding_txo) = monitor_opt {
593 if let Some(monitor_holder) = monitors.get(&funding_txo) {
594 monitor_holder.monitor.signer_unblocked(
595 &*self.broadcaster, &*self.fee_estimator, &self.logger
599 for (_, monitor_holder) in &*monitors {
600 monitor_holder.monitor.signer_unblocked(
601 &*self.broadcaster, &*self.fee_estimator, &self.logger
607 /// Archives fully resolved channel monitors by calling [`Persist::archive_persisted_channel`].
609 /// This is useful for pruning fully resolved monitors from the monitor set and primary
610 /// storage so they are not kept in memory and reloaded on restart.
612 /// Should be called occasionally (once every handful of blocks or on startup).
614 /// Depending on the implementation of [`Persist::archive_persisted_channel`] the monitor
615 /// data could be moved to an archive location or removed entirely.
616 pub fn archive_fully_resolved_channel_monitors(&self) {
617 let mut have_monitors_to_prune = false;
618 for (_, monitor_holder) in self.monitors.read().unwrap().iter() {
619 let logger = WithChannelMonitor::from(&self.logger, &monitor_holder.monitor, None);
620 if monitor_holder.monitor.is_fully_resolved(&logger) {
621 have_monitors_to_prune = true;
624 if have_monitors_to_prune {
625 let mut monitors = self.monitors.write().unwrap();
626 monitors.retain(|funding_txo, monitor_holder| {
627 let logger = WithChannelMonitor::from(&self.logger, &monitor_holder.monitor, None);
628 if monitor_holder.monitor.is_fully_resolved(&logger) {
630 "Archiving fully resolved ChannelMonitor for funding txo {}",
633 self.persister.archive_persisted_channel(*funding_txo);
643 impl<ChannelSigner: EcdsaChannelSigner, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref>
644 chain::Listen for ChainMonitor<ChannelSigner, C, T, F, L, P>
646 C::Target: chain::Filter,
647 T::Target: BroadcasterInterface,
648 F::Target: FeeEstimator,
650 P::Target: Persist<ChannelSigner>,
652 fn filtered_block_connected(&self, header: &Header, txdata: &TransactionData, height: u32) {
653 log_debug!(self.logger, "New best block {} at height {} provided via block_connected", header.block_hash(), height);
654 self.process_chain_data(header, Some(height), &txdata, |monitor, txdata| {
655 monitor.block_connected(
656 header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &self.logger)
658 // Assume we may have some new events and wake the event processor
659 self.event_notifier.notify();
662 fn block_disconnected(&self, header: &Header, height: u32) {
663 let monitor_states = self.monitors.read().unwrap();
664 log_debug!(self.logger, "Latest block {} at height {} removed via block_disconnected", header.block_hash(), height);
665 for monitor_state in monitor_states.values() {
666 monitor_state.monitor.block_disconnected(
667 header, height, &*self.broadcaster, &*self.fee_estimator, &self.logger);
672 impl<ChannelSigner: EcdsaChannelSigner, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref>
673 chain::Confirm for ChainMonitor<ChannelSigner, C, T, F, L, P>
675 C::Target: chain::Filter,
676 T::Target: BroadcasterInterface,
677 F::Target: FeeEstimator,
679 P::Target: Persist<ChannelSigner>,
681 fn transactions_confirmed(&self, header: &Header, txdata: &TransactionData, height: u32) {
682 log_debug!(self.logger, "{} provided transactions confirmed at height {} in block {}", txdata.len(), height, header.block_hash());
683 self.process_chain_data(header, None, txdata, |monitor, txdata| {
684 monitor.transactions_confirmed(
685 header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &self.logger)
687 // Assume we may have some new events and wake the event processor
688 self.event_notifier.notify();
691 fn transaction_unconfirmed(&self, txid: &Txid) {
692 log_debug!(self.logger, "Transaction {} reorganized out of chain", txid);
693 let monitor_states = self.monitors.read().unwrap();
694 for monitor_state in monitor_states.values() {
695 monitor_state.monitor.transaction_unconfirmed(txid, &*self.broadcaster, &*self.fee_estimator, &self.logger);
699 fn best_block_updated(&self, header: &Header, height: u32) {
700 log_debug!(self.logger, "New best block {} at height {} provided via best_block_updated", header.block_hash(), height);
701 self.process_chain_data(header, Some(height), &[], |monitor, txdata| {
702 // While in practice there shouldn't be any recursive calls when given empty txdata,
703 // it's still possible if a chain::Filter implementation returns a transaction.
704 debug_assert!(txdata.is_empty());
705 monitor.best_block_updated(
706 header, height, &*self.broadcaster, &*self.fee_estimator, &self.logger
709 // Assume we may have some new events and wake the event processor
710 self.event_notifier.notify();
713 fn get_relevant_txids(&self) -> Vec<(Txid, u32, Option<BlockHash>)> {
714 let mut txids = Vec::new();
715 let monitor_states = self.monitors.read().unwrap();
716 for monitor_state in monitor_states.values() {
717 txids.append(&mut monitor_state.monitor.get_relevant_txids());
720 txids.sort_unstable_by(|a, b| a.0.cmp(&b.0).then(b.1.cmp(&a.1)));
721 txids.dedup_by_key(|(txid, _, _)| *txid);
726 impl<ChannelSigner: EcdsaChannelSigner, C: Deref , T: Deref , F: Deref , L: Deref , P: Deref >
727 chain::Watch<ChannelSigner> for ChainMonitor<ChannelSigner, C, T, F, L, P>
728 where C::Target: chain::Filter,
729 T::Target: BroadcasterInterface,
730 F::Target: FeeEstimator,
732 P::Target: Persist<ChannelSigner>,
734 fn watch_channel(&self, funding_outpoint: OutPoint, monitor: ChannelMonitor<ChannelSigner>) -> Result<ChannelMonitorUpdateStatus, ()> {
735 let logger = WithChannelMonitor::from(&self.logger, &monitor, None);
736 let mut monitors = self.monitors.write().unwrap();
737 let entry = match monitors.entry(funding_outpoint) {
738 hash_map::Entry::Occupied(_) => {
739 log_error!(logger, "Failed to add new channel data: channel monitor for given outpoint is already present");
742 hash_map::Entry::Vacant(e) => e,
744 log_trace!(logger, "Got new ChannelMonitor for channel {}", log_funding_info!(monitor));
745 let update_id = monitor.get_latest_update_id();
746 let mut pending_monitor_updates = Vec::new();
747 let persist_res = self.persister.persist_new_channel(funding_outpoint, &monitor);
749 ChannelMonitorUpdateStatus::InProgress => {
750 log_info!(logger, "Persistence of new ChannelMonitor for channel {} in progress", log_funding_info!(monitor));
751 pending_monitor_updates.push(update_id);
753 ChannelMonitorUpdateStatus::Completed => {
754 log_info!(logger, "Persistence of new ChannelMonitor for channel {} completed", log_funding_info!(monitor));
756 ChannelMonitorUpdateStatus::UnrecoverableError => {
757 let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
758 log_error!(logger, "{}", err_str);
759 panic!("{}", err_str);
762 if let Some(ref chain_source) = self.chain_source {
763 monitor.load_outputs_to_watch(chain_source , &self.logger);
765 entry.insert(MonitorHolder {
767 pending_monitor_updates: Mutex::new(pending_monitor_updates),
772 fn update_channel(&self, funding_txo: OutPoint, update: &ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus {
773 // `ChannelMonitorUpdate`'s `channel_id` is `None` prior to 0.0.121 and all channels in those
774 // versions are V1-established. For 0.0.121+ the `channel_id` fields is always `Some`.
775 let channel_id = update.channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(funding_txo));
776 // Update the monitor that watches the channel referred to by the given outpoint.
777 let monitors = self.monitors.read().unwrap();
778 match monitors.get(&funding_txo) {
780 let logger = WithContext::from(&self.logger, update.counterparty_node_id, Some(channel_id), None);
781 log_error!(logger, "Failed to update channel monitor: no such monitor registered");
783 // We should never ever trigger this from within ChannelManager. Technically a
784 // user could use this object with some proxying in between which makes this
785 // possible, but in tests and fuzzing, this should be a panic.
786 #[cfg(debug_assertions)]
787 panic!("ChannelManager generated a channel update for a channel that was not yet registered!");
788 #[cfg(not(debug_assertions))]
789 ChannelMonitorUpdateStatus::InProgress
791 Some(monitor_state) => {
792 let monitor = &monitor_state.monitor;
793 let logger = WithChannelMonitor::from(&self.logger, &monitor, None);
794 log_trace!(logger, "Updating ChannelMonitor to id {} for channel {}", update.update_id, log_funding_info!(monitor));
795 let update_res = monitor.update_monitor(update, &self.broadcaster, &self.fee_estimator, &self.logger);
797 let update_id = update.update_id;
798 let mut pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap();
799 let persist_res = if update_res.is_err() {
800 // Even if updating the monitor returns an error, the monitor's state will
801 // still be changed. Therefore, we should persist the updated monitor despite the error.
802 // We don't want to persist a `monitor_update` which results in a failure to apply later
803 // while reading `channel_monitor` with updates from storage. Instead, we should persist
804 // the entire `channel_monitor` here.
805 log_warn!(logger, "Failed to update ChannelMonitor for channel {}. Going ahead and persisting the entire ChannelMonitor", log_funding_info!(monitor));
806 self.persister.update_persisted_channel(funding_txo, None, monitor)
808 self.persister.update_persisted_channel(funding_txo, Some(update), monitor)
811 ChannelMonitorUpdateStatus::InProgress => {
812 pending_monitor_updates.push(update_id);
814 "Persistence of ChannelMonitorUpdate id {:?} for channel {} in progress",
816 log_funding_info!(monitor)
819 ChannelMonitorUpdateStatus::Completed => {
821 "Persistence of ChannelMonitorUpdate id {:?} for channel {} completed",
823 log_funding_info!(monitor)
826 ChannelMonitorUpdateStatus::UnrecoverableError => {
827 // Take the monitors lock for writing so that we poison it and any future
828 // operations going forward fail immediately.
829 core::mem::drop(pending_monitor_updates);
830 core::mem::drop(monitors);
831 let _poison = self.monitors.write().unwrap();
832 let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
833 log_error!(logger, "{}", err_str);
834 panic!("{}", err_str);
837 if update_res.is_err() {
838 ChannelMonitorUpdateStatus::InProgress
846 fn release_pending_monitor_events(&self) -> Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, Option<PublicKey>)> {
847 let mut pending_monitor_events = self.pending_monitor_events.lock().unwrap().split_off(0);
848 for monitor_state in self.monitors.read().unwrap().values() {
849 let monitor_events = monitor_state.monitor.get_and_clear_pending_monitor_events();
850 if monitor_events.len() > 0 {
851 let monitor_outpoint = monitor_state.monitor.get_funding_txo().0;
852 let monitor_channel_id = monitor_state.monitor.channel_id();
853 let counterparty_node_id = monitor_state.monitor.get_counterparty_node_id();
854 pending_monitor_events.push((monitor_outpoint, monitor_channel_id, monitor_events, counterparty_node_id));
857 pending_monitor_events
861 impl<ChannelSigner: EcdsaChannelSigner, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref> events::EventsProvider for ChainMonitor<ChannelSigner, C, T, F, L, P>
862 where C::Target: chain::Filter,
863 T::Target: BroadcasterInterface,
864 F::Target: FeeEstimator,
866 P::Target: Persist<ChannelSigner>,
868 /// Processes [`SpendableOutputs`] events produced from each [`ChannelMonitor`] upon maturity.
870 /// For channels featuring anchor outputs, this method will also process [`BumpTransaction`]
871 /// events produced from each [`ChannelMonitor`] while there is a balance to claim onchain
872 /// within each channel. As the confirmation of a commitment transaction may be critical to the
873 /// safety of funds, we recommend invoking this every 30 seconds, or lower if running in an
874 /// environment with spotty connections, like on mobile.
876 /// An [`EventHandler`] may safely call back to the provider, though this shouldn't be needed in
877 /// order to handle these events.
879 /// [`SpendableOutputs`]: events::Event::SpendableOutputs
880 /// [`BumpTransaction`]: events::Event::BumpTransaction
881 fn process_pending_events<H: Deref>(&self, handler: H) where H::Target: EventHandler {
882 for monitor_state in self.monitors.read().unwrap().values() {
883 monitor_state.monitor.process_pending_events(&handler);
890 use crate::{check_added_monitors, check_closed_event};
891 use crate::{expect_payment_path_successful, get_event_msg};
892 use crate::{get_htlc_update_msgs, get_revoke_commit_msgs};
893 use crate::chain::{ChannelMonitorUpdateStatus, Watch};
894 use crate::chain::channelmonitor::ANTI_REORG_DELAY;
895 use crate::events::{ClosureReason, Event, MessageSendEvent, MessageSendEventsProvider};
896 use crate::ln::functional_test_utils::*;
897 use crate::ln::msgs::ChannelMessageHandler;
899 const CHAINSYNC_MONITOR_PARTITION_FACTOR: u32 = 5;
902 fn test_async_ooo_offchain_updates() {
903 // Test that if we have multiple offchain updates being persisted and they complete
904 // out-of-order, the ChainMonitor waits until all have completed before informing the
906 let chanmon_cfgs = create_chanmon_cfgs(2);
907 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
908 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
909 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
910 create_announced_chan_between_nodes(&nodes, 0, 1);
912 // Route two payments to be claimed at the same time.
913 let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
914 let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
916 chanmon_cfgs[1].persister.offchain_monitor_updates.lock().unwrap().clear();
917 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
918 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
920 nodes[1].node.claim_funds(payment_preimage_1);
921 check_added_monitors!(nodes[1], 1);
922 nodes[1].node.claim_funds(payment_preimage_2);
923 check_added_monitors!(nodes[1], 1);
925 let persistences = chanmon_cfgs[1].persister.offchain_monitor_updates.lock().unwrap().clone();
926 assert_eq!(persistences.len(), 1);
927 let (funding_txo, updates) = persistences.iter().next().unwrap();
928 assert_eq!(updates.len(), 2);
930 // Note that updates is a HashMap so the ordering here is actually random. This shouldn't
931 // fail either way but if it fails intermittently it's depending on the ordering of updates.
932 let mut update_iter = updates.iter();
933 let next_update = update_iter.next().unwrap().clone();
934 // Should contain next_update when pending updates listed.
935 #[cfg(not(c_bindings))]
936 assert!(nodes[1].chain_monitor.chain_monitor.list_pending_monitor_updates().get(funding_txo)
937 .unwrap().contains(&next_update));
939 assert!(nodes[1].chain_monitor.chain_monitor.list_pending_monitor_updates().iter()
940 .find(|(txo, _)| txo == funding_txo).unwrap().1.contains(&next_update));
941 nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(*funding_txo, next_update.clone()).unwrap();
942 // Should not contain the previously pending next_update when pending updates listed.
943 #[cfg(not(c_bindings))]
944 assert!(!nodes[1].chain_monitor.chain_monitor.list_pending_monitor_updates().get(funding_txo)
945 .unwrap().contains(&next_update));
947 assert!(!nodes[1].chain_monitor.chain_monitor.list_pending_monitor_updates().iter()
948 .find(|(txo, _)| txo == funding_txo).unwrap().1.contains(&next_update));
949 assert!(nodes[1].chain_monitor.release_pending_monitor_events().is_empty());
950 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
951 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
952 nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(*funding_txo, update_iter.next().unwrap().clone()).unwrap();
954 let claim_events = nodes[1].node.get_and_clear_pending_events();
955 assert_eq!(claim_events.len(), 2);
956 match claim_events[0] {
957 Event::PaymentClaimed { ref payment_hash, amount_msat: 1_000_000, .. } => {
958 assert_eq!(payment_hash_1, *payment_hash);
960 _ => panic!("Unexpected event"),
962 match claim_events[1] {
963 Event::PaymentClaimed { ref payment_hash, amount_msat: 1_000_000, .. } => {
964 assert_eq!(payment_hash_2, *payment_hash);
966 _ => panic!("Unexpected event"),
969 // Now manually walk the commitment signed dance - because we claimed two payments
970 // back-to-back it doesn't fit into the neat walk commitment_signed_dance does.
972 let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
973 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
974 expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false);
975 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
976 check_added_monitors!(nodes[0], 1);
977 let (as_first_raa, as_first_update) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
979 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_first_raa);
980 check_added_monitors!(nodes[1], 1);
981 let bs_second_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
982 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_first_update);
983 check_added_monitors!(nodes[1], 1);
984 let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
986 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_second_updates.update_fulfill_htlcs[0]);
987 expect_payment_sent(&nodes[0], payment_preimage_2, None, false, false);
988 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_updates.commitment_signed);
989 check_added_monitors!(nodes[0], 1);
990 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa);
991 expect_payment_path_successful!(nodes[0]);
992 check_added_monitors!(nodes[0], 1);
993 let (as_second_raa, as_second_update) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
995 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
996 check_added_monitors!(nodes[1], 1);
997 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_update);
998 check_added_monitors!(nodes[1], 1);
999 let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
1001 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa);
1002 expect_payment_path_successful!(nodes[0]);
1003 check_added_monitors!(nodes[0], 1);
1007 fn test_chainsync_triggers_distributed_monitor_persistence() {
1008 let chanmon_cfgs = create_chanmon_cfgs(3);
1009 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1010 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1011 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1013 // Use FullBlockViaListen to avoid duplicate calls to process_chain_data and skips_blocks() in
1014 // case of other connect_styles.
1015 *nodes[0].connect_style.borrow_mut() = ConnectStyle::FullBlockViaListen;
1016 *nodes[1].connect_style.borrow_mut() = ConnectStyle::FullBlockViaListen;
1017 *nodes[2].connect_style.borrow_mut() = ConnectStyle::FullBlockViaListen;
1019 let _channel_1 = create_announced_chan_between_nodes(&nodes, 0, 1).2;
1020 let channel_2 = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1_000_000, 0).2;
1022 chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
1023 chanmon_cfgs[1].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
1024 chanmon_cfgs[2].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
1026 connect_blocks(&nodes[0], CHAINSYNC_MONITOR_PARTITION_FACTOR * 2);
1027 connect_blocks(&nodes[1], CHAINSYNC_MONITOR_PARTITION_FACTOR * 2);
1028 connect_blocks(&nodes[2], CHAINSYNC_MONITOR_PARTITION_FACTOR * 2);
1030 // Connecting [`DEFAULT_CHAINSYNC_PARTITION_FACTOR`] * 2 blocks should trigger only 2 writes
1031 // per monitor/channel.
1032 assert_eq!(2 * 2, chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().len());
1033 assert_eq!(2, chanmon_cfgs[1].persister.chain_sync_monitor_persistences.lock().unwrap().len());
1034 assert_eq!(2, chanmon_cfgs[2].persister.chain_sync_monitor_persistences.lock().unwrap().len());
1036 // Test that monitors with pending_claims are persisted on every block.
1037 // Now, close channel_2 i.e. b/w node-0 and node-2 to create pending_claim in node[0].
1038 nodes[0].node.force_close_broadcasting_latest_txn(&channel_2, &nodes[2].node.get_our_node_id(), "Channel force-closed".to_string()).unwrap();
1039 check_closed_event!(&nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false,
1040 [nodes[2].node.get_our_node_id()], 1000000);
1041 check_closed_broadcast(&nodes[0], 1, true);
1042 let close_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
1043 assert_eq!(close_tx.len(), 1);
1045 mine_transaction(&nodes[2], &close_tx[0]);
1046 check_added_monitors(&nodes[2], 1);
1047 check_closed_broadcast(&nodes[2], 1, true);
1048 check_closed_event!(&nodes[2], 1, ClosureReason::CommitmentTxConfirmed, false,
1049 [nodes[0].node.get_our_node_id()], 1000000);
1051 chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
1052 chanmon_cfgs[2].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
1054 // For channel_2, there should be a monitor write for every block connection.
1055 // We connect [`DEFAULT_CHAINSYNC_MONITOR_PARTITION_FACTOR`] blocks since we don't know when
1056 // channel_1 monitor persistence will occur, with [`DEFAULT_CHAINSYNC_MONITOR_PARTITION_FACTOR`]
1057 // it will be persisted exactly once.
1058 connect_blocks(&nodes[0], CHAINSYNC_MONITOR_PARTITION_FACTOR);
1059 connect_blocks(&nodes[2], CHAINSYNC_MONITOR_PARTITION_FACTOR);
1061 // DEFAULT_CHAINSYNC_MONITOR_PARTITION_FACTOR writes for channel_2 due to pending_claim, 1 for
1063 assert_eq!((CHAINSYNC_MONITOR_PARTITION_FACTOR + 1) as usize, chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().len());
1064 // For node[2], there is no pending_claim
1065 assert_eq!(1, chanmon_cfgs[2].persister.chain_sync_monitor_persistences.lock().unwrap().len());
1067 // Confirm claim for node[0] with ANTI_REORG_DELAY and reset monitor write counter.
1068 mine_transaction(&nodes[0], &close_tx[0]);
1069 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
1070 check_added_monitors(&nodes[0], 1);
1071 chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
1073 // Again connect 1 full cycle of DEFAULT_CHAINSYNC_MONITOR_PARTITION_FACTOR blocks, it should only
1074 // result in 1 write per monitor/channel.
1075 connect_blocks(&nodes[0], CHAINSYNC_MONITOR_PARTITION_FACTOR);
1076 assert_eq!(2, chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().len());
1080 #[cfg(feature = "std")]
1081 fn update_during_chainsync_poisons_channel() {
1082 let chanmon_cfgs = create_chanmon_cfgs(2);
1083 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1084 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1085 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1086 create_announced_chan_between_nodes(&nodes, 0, 1);
1087 *nodes[0].connect_style.borrow_mut() = ConnectStyle::FullBlockViaListen;
1089 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::UnrecoverableError);
1091 assert!(std::panic::catch_unwind(|| {
1092 // Returning an UnrecoverableError should always panic immediately
1093 // Connecting [`DEFAULT_CHAINSYNC_PARTITION_FACTOR`] blocks so that we trigger some persistence
1094 // after accounting for block-height based partitioning/distribution.
1095 connect_blocks(&nodes[0], CHAINSYNC_MONITOR_PARTITION_FACTOR);
1097 assert!(std::panic::catch_unwind(|| {
1098 // ...and also poison our locks causing later use to panic as well
1099 core::mem::drop(nodes);