1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 //! Logic to connect off-chain channel management with on-chain transaction monitoring.
12 //! [`ChainMonitor`] is an implementation of [`chain::Watch`] used both to process blocks and to
13 //! update [`ChannelMonitor`]s accordingly. If any on-chain events need further processing, it will
14 //! make those available as [`MonitorEvent`]s to be consumed.
16 //! [`ChainMonitor`] is parameterized by an optional chain source, which must implement the
17 //! [`chain::Filter`] trait. This provides a mechanism to signal new relevant outputs back to light
18 //! clients, such that transactions spending those outputs are included in block data.
20 //! [`ChainMonitor`] may be used directly to monitor channels locally or as a part of a distributed
21 //! setup to monitor channels remotely. In the latter case, a custom [`chain::Watch`] implementation
22 //! would be responsible for routing each update to a remote server and for retrieving monitor
23 //! events. The remote server would make use of [`ChainMonitor`] for block processing and for
24 //! servicing [`ChannelMonitor`] updates from the client.
26 use bitcoin::blockdata::block::Header;
27 use bitcoin::hash_types::{Txid, BlockHash};
30 use crate::chain::{ChannelMonitorUpdateStatus, Filter, WatchedOutput};
31 use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
32 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, Balance, MonitorEvent, TransactionOutputs, WithChannelMonitor};
33 use crate::chain::transaction::{OutPoint, TransactionData};
34 use crate::ln::ChannelId;
35 use crate::sign::ecdsa::WriteableEcdsaChannelSigner;
37 use crate::events::{Event, EventHandler};
38 use crate::util::logger::{Logger, WithContext};
39 use crate::util::errors::APIError;
40 use crate::util::wakers::{Future, Notifier};
41 use crate::ln::channelmanager::ChannelDetails;
43 use crate::prelude::*;
44 use crate::sync::{RwLock, RwLockReadGuard, Mutex, MutexGuard};
46 use core::sync::atomic::{AtomicUsize, Ordering};
47 use bitcoin::secp256k1::PublicKey;
49 /// `Persist` defines behavior for persisting channel monitors: this could mean
50 /// writing once to disk, and/or uploading to one or more backup services.
52 /// Persistence can happen in one of two ways - synchronously completing before the trait method
53 /// calls return or asynchronously in the background.
55 /// # For those implementing synchronous persistence
57 /// * If persistence completes fully (including any relevant `fsync()` calls), the implementation
58 /// should return [`ChannelMonitorUpdateStatus::Completed`], indicating normal channel operation
61 /// * If persistence fails for some reason, implementations should consider returning
62 /// [`ChannelMonitorUpdateStatus::InProgress`] and retry all pending persistence operations in
63 /// the background with [`ChainMonitor::list_pending_monitor_updates`] and
64 /// [`ChainMonitor::get_monitor`].
66 /// Once a full [`ChannelMonitor`] has been persisted, all pending updates for that channel can
67 /// be marked as complete via [`ChainMonitor::channel_monitor_updated`].
69 /// If at some point no further progress can be made towards persisting the pending updates, the
70 /// node should simply shut down.
72 /// * If the persistence has failed and cannot be retried further (e.g. because of an outage),
73 /// [`ChannelMonitorUpdateStatus::UnrecoverableError`] can be used, though this will result in
74 /// an immediate panic and future operations in LDK generally failing.
76 /// # For those implementing asynchronous persistence
78 /// All calls should generally spawn a background task and immediately return
79 /// [`ChannelMonitorUpdateStatus::InProgress`]. Once the update completes,
80 /// [`ChainMonitor::channel_monitor_updated`] should be called with the corresponding
81 /// [`ChannelMonitor::get_latest_update_id`] or [`ChannelMonitorUpdate::update_id`].
83 /// Note that unlike the direct [`chain::Watch`] interface,
84 /// [`ChainMonitor::channel_monitor_updated`] must be called once for *each* update which occurs.
86 /// If at some point no further progress can be made towards persisting a pending update, the node
87 /// should simply shut down. Until then, the background task should either loop indefinitely, or
88 /// persistence should be regularly retried with [`ChainMonitor::list_pending_monitor_updates`]
89 /// and [`ChainMonitor::get_monitor`] (note that if a full monitor is persisted all pending
90 /// monitor updates may be marked completed).
92 /// # Using remote watchtowers
94 /// Watchtowers may be updated as a part of an implementation of this trait, utilizing the async
95 /// update process described above while the watchtower is being updated. The following methods are
96 /// provided for bulding transactions for a watchtower:
97 /// [`ChannelMonitor::initial_counterparty_commitment_tx`],
98 /// [`ChannelMonitor::counterparty_commitment_txs_from_update`],
99 /// [`ChannelMonitor::sign_to_local_justice_tx`], [`TrustedCommitmentTransaction::revokeable_output_index`],
100 /// [`TrustedCommitmentTransaction::build_to_local_justice_tx`].
102 /// [`TrustedCommitmentTransaction::revokeable_output_index`]: crate::ln::chan_utils::TrustedCommitmentTransaction::revokeable_output_index
103 /// [`TrustedCommitmentTransaction::build_to_local_justice_tx`]: crate::ln::chan_utils::TrustedCommitmentTransaction::build_to_local_justice_tx
104 pub trait Persist<ChannelSigner: WriteableEcdsaChannelSigner> {
105 /// Persist a new channel's data in response to a [`chain::Watch::watch_channel`] call. This is
106 /// called by [`ChannelManager`] for new channels, or may be called directly, e.g. on startup.
108 /// The data can be stored any way you want, but the identifier provided by LDK is the
109 /// channel's outpoint (and it is up to you to maintain a correct mapping between the outpoint
110 /// and the stored channel data). Note that you **must** persist every new monitor to disk.
112 /// The [`ChannelMonitor::get_latest_update_id`] uniquely links this call to [`ChainMonitor::channel_monitor_updated`].
113 /// For [`Persist::persist_new_channel`], it is only necessary to call [`ChainMonitor::channel_monitor_updated`]
114 /// when you return [`ChannelMonitorUpdateStatus::InProgress`].
116 /// See [`Writeable::write`] on [`ChannelMonitor`] for writing out a `ChannelMonitor`
117 /// and [`ChannelMonitorUpdateStatus`] for requirements when returning errors.
119 /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
120 /// [`Writeable::write`]: crate::util::ser::Writeable::write
121 fn persist_new_channel(&self, channel_funding_outpoint: OutPoint, monitor: &ChannelMonitor<ChannelSigner>) -> ChannelMonitorUpdateStatus;
123 /// Update one channel's data. The provided [`ChannelMonitor`] has already applied the given
126 /// Note that on every update, you **must** persist either the [`ChannelMonitorUpdate`] or the
127 /// updated monitor itself to disk/backups. See the [`Persist`] trait documentation for more
130 /// During blockchain synchronization operations, and in some rare cases, this may be called with
131 /// no [`ChannelMonitorUpdate`], in which case the full [`ChannelMonitor`] needs to be persisted.
132 /// Note that after the full [`ChannelMonitor`] is persisted any previous
133 /// [`ChannelMonitorUpdate`]s which were persisted should be discarded - they can no longer be
134 /// applied to the persisted [`ChannelMonitor`] as they were already applied.
136 /// If an implementer chooses to persist the updates only, they need to make
137 /// sure that all the updates are applied to the `ChannelMonitors` *before*
138 /// the set of channel monitors is given to the `ChannelManager`
139 /// deserialization routine. See [`ChannelMonitor::update_monitor`] for
140 /// applying a monitor update to a monitor. If full `ChannelMonitors` are
141 /// persisted, then there is no need to persist individual updates.
143 /// Note that there could be a performance tradeoff between persisting complete
144 /// channel monitors on every update vs. persisting only updates and applying
145 /// them in batches. The size of each monitor grows `O(number of state updates)`
146 /// whereas updates are small and `O(1)`.
148 /// The [`ChannelMonitorUpdate::update_id`] or [`ChannelMonitor::get_latest_update_id`] uniquely
149 /// links this call to [`ChainMonitor::channel_monitor_updated`].
150 /// For [`Persist::update_persisted_channel`], it is only necessary to call [`ChainMonitor::channel_monitor_updated`]
151 /// when a [`ChannelMonitorUpdate`] is provided and when you return [`ChannelMonitorUpdateStatus::InProgress`].
153 /// See [`Writeable::write`] on [`ChannelMonitor`] for writing out a `ChannelMonitor`,
154 /// [`Writeable::write`] on [`ChannelMonitorUpdate`] for writing out an update, and
155 /// [`ChannelMonitorUpdateStatus`] for requirements when returning errors.
157 /// [`Writeable::write`]: crate::util::ser::Writeable::write
158 fn update_persisted_channel(&self, channel_funding_outpoint: OutPoint, monitor_update: Option<&ChannelMonitorUpdate>, monitor: &ChannelMonitor<ChannelSigner>) -> ChannelMonitorUpdateStatus;
159 /// Prevents the channel monitor from being loaded on startup.
161 /// Archiving the data in a backup location (rather than deleting it fully) is useful for
162 /// hedging against data loss in case of unexpected failure.
163 fn archive_persisted_channel(&self, channel_funding_outpoint: OutPoint);
166 struct MonitorHolder<ChannelSigner: WriteableEcdsaChannelSigner> {
167 monitor: ChannelMonitor<ChannelSigner>,
168 /// The full set of pending monitor updates for this Channel.
170 /// Note that this lock must be held during updates to prevent a race where we call
171 /// update_persisted_channel, the user returns a
172 /// [`ChannelMonitorUpdateStatus::InProgress`], and then calls channel_monitor_updated
173 /// immediately, racing our insertion of the pending update into the contained Vec.
174 pending_monitor_updates: Mutex<Vec<u64>>,
177 impl<ChannelSigner: WriteableEcdsaChannelSigner> MonitorHolder<ChannelSigner> {
178 fn has_pending_updates(&self, pending_monitor_updates_lock: &MutexGuard<Vec<u64>>) -> bool {
179 !pending_monitor_updates_lock.is_empty()
183 /// A read-only reference to a current ChannelMonitor.
185 /// Note that this holds a mutex in [`ChainMonitor`] and may block other events until it is
187 pub struct LockedChannelMonitor<'a, ChannelSigner: WriteableEcdsaChannelSigner> {
188 lock: RwLockReadGuard<'a, HashMap<OutPoint, MonitorHolder<ChannelSigner>>>,
189 funding_txo: OutPoint,
192 impl<ChannelSigner: WriteableEcdsaChannelSigner> Deref for LockedChannelMonitor<'_, ChannelSigner> {
193 type Target = ChannelMonitor<ChannelSigner>;
194 fn deref(&self) -> &ChannelMonitor<ChannelSigner> {
195 &self.lock.get(&self.funding_txo).expect("Checked at construction").monitor
199 /// An implementation of [`chain::Watch`] for monitoring channels.
201 /// Connected and disconnected blocks must be provided to `ChainMonitor` as documented by
202 /// [`chain::Watch`]. May be used in conjunction with [`ChannelManager`] to monitor channels locally
203 /// or used independently to monitor channels remotely. See the [module-level documentation] for
206 /// Note that `ChainMonitor` should regularly trigger rebroadcasts/fee bumps of pending claims from
207 /// a force-closed channel. This is crucial in preventing certain classes of pinning attacks,
208 /// detecting substantial mempool feerate changes between blocks, and ensuring reliability if
209 /// broadcasting fails. We recommend invoking this every 30 seconds, or lower if running in an
210 /// environment with spotty connections, like on mobile.
212 /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
213 /// [module-level documentation]: crate::chain::chainmonitor
214 /// [`rebroadcast_pending_claims`]: Self::rebroadcast_pending_claims
215 pub struct ChainMonitor<ChannelSigner: WriteableEcdsaChannelSigner, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref>
216 where C::Target: chain::Filter,
217 T::Target: BroadcasterInterface,
218 F::Target: FeeEstimator,
220 P::Target: Persist<ChannelSigner>,
222 monitors: RwLock<HashMap<OutPoint, MonitorHolder<ChannelSigner>>>,
223 chain_source: Option<C>,
228 /// "User-provided" (ie persistence-completion/-failed) [`MonitorEvent`]s. These came directly
229 /// from the user and not from a [`ChannelMonitor`].
230 pending_monitor_events: Mutex<Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, Option<PublicKey>)>>,
231 /// The best block height seen, used as a proxy for the passage of time.
232 highest_chain_height: AtomicUsize,
234 event_notifier: Notifier,
237 impl<ChannelSigner: WriteableEcdsaChannelSigner, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref> ChainMonitor<ChannelSigner, C, T, F, L, P>
238 where C::Target: chain::Filter,
239 T::Target: BroadcasterInterface,
240 F::Target: FeeEstimator,
242 P::Target: Persist<ChannelSigner>,
244 /// Dispatches to per-channel monitors, which are responsible for updating their on-chain view
245 /// of a channel and reacting accordingly based on transactions in the given chain data. See
246 /// [`ChannelMonitor::block_connected`] for details. Any HTLCs that were resolved on chain will
247 /// be returned by [`chain::Watch::release_pending_monitor_events`].
249 /// Calls back to [`chain::Filter`] if any monitor indicated new outputs to watch. Subsequent
250 /// calls must not exclude any transactions matching the new outputs nor any in-block
251 /// descendants of such transactions. It is not necessary to re-fetch the block to obtain
252 /// updated `txdata`.
254 /// Calls which represent a new blockchain tip height should set `best_height`.
255 fn process_chain_data<FN>(&self, header: &Header, best_height: Option<u32>, txdata: &TransactionData, process: FN)
257 FN: Fn(&ChannelMonitor<ChannelSigner>, &TransactionData) -> Vec<TransactionOutputs>
259 let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
260 let funding_outpoints = hash_set_from_iter(self.monitors.read().unwrap().keys().cloned());
261 for funding_outpoint in funding_outpoints.iter() {
262 let monitor_lock = self.monitors.read().unwrap();
263 if let Some(monitor_state) = monitor_lock.get(funding_outpoint) {
264 if self.update_monitor_with_chain_data(header, txdata, &process, funding_outpoint, &monitor_state).is_err() {
265 // Take the monitors lock for writing so that we poison it and any future
266 // operations going forward fail immediately.
267 core::mem::drop(monitor_lock);
268 let _poison = self.monitors.write().unwrap();
269 log_error!(self.logger, "{}", err_str);
270 panic!("{}", err_str);
275 // do some followup cleanup if any funding outpoints were added in between iterations
276 let monitor_states = self.monitors.write().unwrap();
277 for (funding_outpoint, monitor_state) in monitor_states.iter() {
278 if !funding_outpoints.contains(funding_outpoint) {
279 if self.update_monitor_with_chain_data(header, txdata, &process, funding_outpoint, &monitor_state).is_err() {
280 log_error!(self.logger, "{}", err_str);
281 panic!("{}", err_str);
286 if let Some(height) = best_height {
287 // If the best block height is being updated, update highest_chain_height under the
288 // monitors write lock.
289 let old_height = self.highest_chain_height.load(Ordering::Acquire);
290 let new_height = height as usize;
291 if new_height > old_height {
292 self.highest_chain_height.store(new_height, Ordering::Release);
297 fn update_monitor_with_chain_data<FN>(
298 &self, header: &Header, txdata: &TransactionData, process: FN, funding_outpoint: &OutPoint,
299 monitor_state: &MonitorHolder<ChannelSigner>
300 ) -> Result<(), ()> where FN: Fn(&ChannelMonitor<ChannelSigner>, &TransactionData) -> Vec<TransactionOutputs> {
301 let monitor = &monitor_state.monitor;
302 let logger = WithChannelMonitor::from(&self.logger, &monitor);
305 txn_outputs = process(monitor, txdata);
306 log_trace!(logger, "Syncing Channel Monitor for channel {}", log_funding_info!(monitor));
307 match self.persister.update_persisted_channel(*funding_outpoint, None, monitor) {
308 ChannelMonitorUpdateStatus::Completed =>
309 log_trace!(logger, "Finished syncing Channel Monitor for channel {} for block-data",
310 log_funding_info!(monitor)
312 ChannelMonitorUpdateStatus::InProgress => {
313 log_trace!(logger, "Channel Monitor sync for channel {} in progress.", log_funding_info!(monitor));
315 ChannelMonitorUpdateStatus::UnrecoverableError => {
321 // Register any new outputs with the chain source for filtering, storing any dependent
322 // transactions from within the block that previously had not been included in txdata.
323 if let Some(ref chain_source) = self.chain_source {
324 let block_hash = header.block_hash();
325 for (txid, mut outputs) in txn_outputs.drain(..) {
326 for (idx, output) in outputs.drain(..) {
327 // Register any new outputs with the chain source for filtering
328 let output = WatchedOutput {
329 block_hash: Some(block_hash),
330 outpoint: OutPoint { txid, index: idx as u16 },
331 script_pubkey: output.script_pubkey,
333 log_trace!(logger, "Adding monitoring for spends of outpoint {} to the filter", output.outpoint);
334 chain_source.register_output(output);
341 /// Creates a new `ChainMonitor` used to watch on-chain activity pertaining to channels.
343 /// When an optional chain source implementing [`chain::Filter`] is provided, the chain monitor
344 /// will call back to it indicating transactions and outputs of interest. This allows clients to
345 /// pre-filter blocks or only fetch blocks matching a compact filter. Otherwise, clients may
346 /// always need to fetch full blocks absent another means for determining which blocks contain
347 /// transactions relevant to the watched channels.
348 pub fn new(chain_source: Option<C>, broadcaster: T, logger: L, feeest: F, persister: P) -> Self {
350 monitors: RwLock::new(new_hash_map()),
354 fee_estimator: feeest,
356 pending_monitor_events: Mutex::new(Vec::new()),
357 highest_chain_height: AtomicUsize::new(0),
358 event_notifier: Notifier::new(),
362 /// Gets the balances in the contained [`ChannelMonitor`]s which are claimable on-chain or
363 /// claims which are awaiting confirmation.
365 /// Includes the balances from each [`ChannelMonitor`] *except* those included in
366 /// `ignored_channels`, allowing you to filter out balances from channels which are still open
367 /// (and whose balance should likely be pulled from the [`ChannelDetails`]).
369 /// See [`ChannelMonitor::get_claimable_balances`] for more details on the exact criteria for
370 /// inclusion in the return value.
371 pub fn get_claimable_balances(&self, ignored_channels: &[&ChannelDetails]) -> Vec<Balance> {
372 let mut ret = Vec::new();
373 let monitor_states = self.monitors.read().unwrap();
374 for (_, monitor_state) in monitor_states.iter().filter(|(funding_outpoint, _)| {
375 for chan in ignored_channels {
376 if chan.funding_txo.as_ref() == Some(funding_outpoint) {
382 ret.append(&mut monitor_state.monitor.get_claimable_balances());
387 /// Gets the [`LockedChannelMonitor`] for a given funding outpoint, returning an `Err` if no
388 /// such [`ChannelMonitor`] is currently being monitored for.
390 /// Note that the result holds a mutex over our monitor set, and should not be held
392 pub fn get_monitor(&self, funding_txo: OutPoint) -> Result<LockedChannelMonitor<'_, ChannelSigner>, ()> {
393 let lock = self.monitors.read().unwrap();
394 if lock.get(&funding_txo).is_some() {
395 Ok(LockedChannelMonitor { lock, funding_txo })
401 /// Lists the funding outpoint and channel ID of each [`ChannelMonitor`] being monitored.
403 /// Note that [`ChannelMonitor`]s are not removed when a channel is closed as they are always
404 /// monitoring for on-chain state resolutions.
405 pub fn list_monitors(&self) -> Vec<(OutPoint, ChannelId)> {
406 self.monitors.read().unwrap().iter().map(|(outpoint, monitor_holder)| {
407 let channel_id = monitor_holder.monitor.channel_id();
408 (*outpoint, channel_id)
412 #[cfg(not(c_bindings))]
413 /// Lists the pending updates for each [`ChannelMonitor`] (by `OutPoint` being monitored).
414 /// Each `Vec<u64>` contains `update_id`s from [`ChannelMonitor::get_latest_update_id`] for updates
415 /// that have not yet been fully persisted. Note that if a full monitor is persisted all the pending
416 /// monitor updates must be individually marked completed by calling [`ChainMonitor::channel_monitor_updated`].
417 pub fn list_pending_monitor_updates(&self) -> HashMap<OutPoint, Vec<u64>> {
418 hash_map_from_iter(self.monitors.read().unwrap().iter().map(|(outpoint, holder)| {
419 (*outpoint, holder.pending_monitor_updates.lock().unwrap().clone())
424 /// Lists the pending updates for each [`ChannelMonitor`] (by `OutPoint` being monitored).
425 /// Each `Vec<u64>` contains `update_id`s from [`ChannelMonitor::get_latest_update_id`] for updates
426 /// that have not yet been fully persisted. Note that if a full monitor is persisted all the pending
427 /// monitor updates must be individually marked completed by calling [`ChainMonitor::channel_monitor_updated`].
428 pub fn list_pending_monitor_updates(&self) -> Vec<(OutPoint, Vec<u64>)> {
429 self.monitors.read().unwrap().iter().map(|(outpoint, holder)| {
430 (*outpoint, holder.pending_monitor_updates.lock().unwrap().clone())
436 pub fn remove_monitor(&self, funding_txo: &OutPoint) -> ChannelMonitor<ChannelSigner> {
437 self.monitors.write().unwrap().remove(funding_txo).unwrap().monitor
440 /// Indicates the persistence of a [`ChannelMonitor`] has completed after
441 /// [`ChannelMonitorUpdateStatus::InProgress`] was returned from an update operation.
443 /// Thus, the anticipated use is, at a high level:
444 /// 1) This [`ChainMonitor`] calls [`Persist::update_persisted_channel`] which stores the
445 /// update to disk and begins updating any remote (e.g. watchtower/backup) copies,
446 /// returning [`ChannelMonitorUpdateStatus::InProgress`],
447 /// 2) once all remote copies are updated, you call this function with [`ChannelMonitor::get_latest_update_id`]
448 /// or [`ChannelMonitorUpdate::update_id`] as the `completed_update_id`, and once all pending
449 /// updates have completed the channel will be re-enabled.
451 /// It is only necessary to call [`ChainMonitor::channel_monitor_updated`] when you return [`ChannelMonitorUpdateStatus::InProgress`]
452 /// from [`Persist`] and either:
453 /// 1. A new [`ChannelMonitor`] was added in [`Persist::persist_new_channel`], or
454 /// 2. A [`ChannelMonitorUpdate`] was provided as part of [`Persist::update_persisted_channel`].
455 /// Note that we don't care about calls to [`Persist::update_persisted_channel`] where no
456 /// [`ChannelMonitorUpdate`] was provided.
458 /// Returns an [`APIError::APIMisuseError`] if `funding_txo` does not match any currently
459 /// registered [`ChannelMonitor`]s.
460 pub fn channel_monitor_updated(&self, funding_txo: OutPoint, completed_update_id: u64) -> Result<(), APIError> {
461 let monitors = self.monitors.read().unwrap();
462 let monitor_data = if let Some(mon) = monitors.get(&funding_txo) { mon } else {
463 return Err(APIError::APIMisuseError { err: format!("No ChannelMonitor matching funding outpoint {:?} found", funding_txo) });
465 let mut pending_monitor_updates = monitor_data.pending_monitor_updates.lock().unwrap();
466 pending_monitor_updates.retain(|update_id| *update_id != completed_update_id);
468 // Note that we only check for pending non-chainsync monitor updates and we don't track monitor
469 // updates resulting from chainsync in `pending_monitor_updates`.
470 let monitor_is_pending_updates = monitor_data.has_pending_updates(&pending_monitor_updates);
471 log_debug!(self.logger, "Completed off-chain monitor update {} for channel with funding outpoint {:?}, {}",
474 if monitor_is_pending_updates {
475 "still have pending off-chain updates"
477 "all off-chain updates complete, returning a MonitorEvent"
479 if monitor_is_pending_updates {
480 // If there are still monitor updates pending, we cannot yet construct a
484 let channel_id = monitor_data.monitor.channel_id();
485 self.pending_monitor_events.lock().unwrap().push((funding_txo, channel_id, vec![MonitorEvent::Completed {
486 funding_txo, channel_id,
487 monitor_update_id: monitor_data.monitor.get_latest_update_id(),
488 }], monitor_data.monitor.get_counterparty_node_id()));
490 self.event_notifier.notify();
494 /// This wrapper avoids having to update some of our tests for now as they assume the direct
495 /// chain::Watch API wherein we mark a monitor fully-updated by just calling
496 /// channel_monitor_updated once with the highest ID.
497 #[cfg(any(test, fuzzing))]
498 pub fn force_channel_monitor_updated(&self, funding_txo: OutPoint, monitor_update_id: u64) {
499 let monitors = self.monitors.read().unwrap();
500 let (counterparty_node_id, channel_id) = if let Some(m) = monitors.get(&funding_txo) {
501 (m.monitor.get_counterparty_node_id(), m.monitor.channel_id())
503 (None, ChannelId::v1_from_funding_outpoint(funding_txo))
505 self.pending_monitor_events.lock().unwrap().push((funding_txo, channel_id, vec![MonitorEvent::Completed {
509 }], counterparty_node_id));
510 self.event_notifier.notify();
513 #[cfg(any(test, feature = "_test_utils"))]
514 pub fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
515 use crate::events::EventsProvider;
516 let events = core::cell::RefCell::new(Vec::new());
517 let event_handler = |event: events::Event| events.borrow_mut().push(event);
518 self.process_pending_events(&event_handler);
522 /// Processes any events asynchronously in the order they were generated since the last call
523 /// using the given event handler.
525 /// See the trait-level documentation of [`EventsProvider`] for requirements.
527 /// [`EventsProvider`]: crate::events::EventsProvider
528 pub async fn process_pending_events_async<Future: core::future::Future, H: Fn(Event) -> Future>(
531 // Sadly we can't hold the monitors read lock through an async call. Thus we have to do a
532 // crazy dance to process a monitor's events then only remove them once we've done so.
533 let mons_to_process = self.monitors.read().unwrap().keys().cloned().collect::<Vec<_>>();
534 for funding_txo in mons_to_process {
536 super::channelmonitor::process_events_body!(
537 self.monitors.read().unwrap().get(&funding_txo).map(|m| &m.monitor), ev, handler(ev).await);
541 /// Gets a [`Future`] that completes when an event is available either via
542 /// [`chain::Watch::release_pending_monitor_events`] or
543 /// [`EventsProvider::process_pending_events`].
545 /// Note that callbacks registered on the [`Future`] MUST NOT call back into this
546 /// [`ChainMonitor`] and should instead register actions to be taken later.
548 /// [`EventsProvider::process_pending_events`]: crate::events::EventsProvider::process_pending_events
549 pub fn get_update_future(&self) -> Future {
550 self.event_notifier.get_future()
553 /// Triggers rebroadcasts/fee-bumps of pending claims from a force-closed channel. This is
554 /// crucial in preventing certain classes of pinning attacks, detecting substantial mempool
555 /// feerate changes between blocks, and ensuring reliability if broadcasting fails. We recommend
556 /// invoking this every 30 seconds, or lower if running in an environment with spotty
557 /// connections, like on mobile.
558 pub fn rebroadcast_pending_claims(&self) {
559 let monitors = self.monitors.read().unwrap();
560 for (_, monitor_holder) in &*monitors {
561 monitor_holder.monitor.rebroadcast_pending_claims(
562 &*self.broadcaster, &*self.fee_estimator, &self.logger
567 /// Triggers rebroadcasts of pending claims from force-closed channels after a transaction
568 /// signature generation failure.
570 /// `monitor_opt` can be used as a filter to only trigger them for a specific channel monitor.
571 pub fn signer_unblocked(&self, monitor_opt: Option<OutPoint>) {
572 let monitors = self.monitors.read().unwrap();
573 if let Some(funding_txo) = monitor_opt {
574 if let Some(monitor_holder) = monitors.get(&funding_txo) {
575 monitor_holder.monitor.signer_unblocked(
576 &*self.broadcaster, &*self.fee_estimator, &self.logger
580 for (_, monitor_holder) in &*monitors {
581 monitor_holder.monitor.signer_unblocked(
582 &*self.broadcaster, &*self.fee_estimator, &self.logger
588 /// Archives fully resolved channel monitors by calling [`Persist::archive_persisted_channel`].
590 /// This is useful for pruning fully resolved monitors from the monitor set and primary
591 /// storage so they are not kept in memory and reloaded on restart.
593 /// Should be called occasionally (once every handful of blocks or on startup).
595 /// Depending on the implementation of [`Persist::archive_persisted_channel`] the monitor
596 /// data could be moved to an archive location or removed entirely.
597 pub fn archive_fully_resolved_channel_monitors(&self) {
598 let mut have_monitors_to_prune = false;
599 for (_, monitor_holder) in self.monitors.read().unwrap().iter() {
600 let logger = WithChannelMonitor::from(&self.logger, &monitor_holder.monitor);
601 if monitor_holder.monitor.is_fully_resolved(&logger) {
602 have_monitors_to_prune = true;
605 if have_monitors_to_prune {
606 let mut monitors = self.monitors.write().unwrap();
607 monitors.retain(|funding_txo, monitor_holder| {
608 let logger = WithChannelMonitor::from(&self.logger, &monitor_holder.monitor);
609 if monitor_holder.monitor.is_fully_resolved(&logger) {
611 "Archiving fully resolved ChannelMonitor for funding txo {}",
614 self.persister.archive_persisted_channel(*funding_txo);
624 impl<ChannelSigner: WriteableEcdsaChannelSigner, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref>
625 chain::Listen for ChainMonitor<ChannelSigner, C, T, F, L, P>
627 C::Target: chain::Filter,
628 T::Target: BroadcasterInterface,
629 F::Target: FeeEstimator,
631 P::Target: Persist<ChannelSigner>,
633 fn filtered_block_connected(&self, header: &Header, txdata: &TransactionData, height: u32) {
634 log_debug!(self.logger, "New best block {} at height {} provided via block_connected", header.block_hash(), height);
635 self.process_chain_data(header, Some(height), &txdata, |monitor, txdata| {
636 monitor.block_connected(
637 header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &self.logger)
641 fn block_disconnected(&self, header: &Header, height: u32) {
642 let monitor_states = self.monitors.read().unwrap();
643 log_debug!(self.logger, "Latest block {} at height {} removed via block_disconnected", header.block_hash(), height);
644 for monitor_state in monitor_states.values() {
645 monitor_state.monitor.block_disconnected(
646 header, height, &*self.broadcaster, &*self.fee_estimator, &self.logger);
651 impl<ChannelSigner: WriteableEcdsaChannelSigner, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref>
652 chain::Confirm for ChainMonitor<ChannelSigner, C, T, F, L, P>
654 C::Target: chain::Filter,
655 T::Target: BroadcasterInterface,
656 F::Target: FeeEstimator,
658 P::Target: Persist<ChannelSigner>,
660 fn transactions_confirmed(&self, header: &Header, txdata: &TransactionData, height: u32) {
661 log_debug!(self.logger, "{} provided transactions confirmed at height {} in block {}", txdata.len(), height, header.block_hash());
662 self.process_chain_data(header, None, txdata, |monitor, txdata| {
663 monitor.transactions_confirmed(
664 header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &self.logger)
668 fn transaction_unconfirmed(&self, txid: &Txid) {
669 log_debug!(self.logger, "Transaction {} reorganized out of chain", txid);
670 let monitor_states = self.monitors.read().unwrap();
671 for monitor_state in monitor_states.values() {
672 monitor_state.monitor.transaction_unconfirmed(txid, &*self.broadcaster, &*self.fee_estimator, &self.logger);
676 fn best_block_updated(&self, header: &Header, height: u32) {
677 log_debug!(self.logger, "New best block {} at height {} provided via best_block_updated", header.block_hash(), height);
678 self.process_chain_data(header, Some(height), &[], |monitor, txdata| {
679 // While in practice there shouldn't be any recursive calls when given empty txdata,
680 // it's still possible if a chain::Filter implementation returns a transaction.
681 debug_assert!(txdata.is_empty());
682 monitor.best_block_updated(
683 header, height, &*self.broadcaster, &*self.fee_estimator, &self.logger
688 fn get_relevant_txids(&self) -> Vec<(Txid, u32, Option<BlockHash>)> {
689 let mut txids = Vec::new();
690 let monitor_states = self.monitors.read().unwrap();
691 for monitor_state in monitor_states.values() {
692 txids.append(&mut monitor_state.monitor.get_relevant_txids());
695 txids.sort_unstable_by(|a, b| a.0.cmp(&b.0).then(b.1.cmp(&a.1)));
696 txids.dedup_by_key(|(txid, _, _)| *txid);
701 impl<ChannelSigner: WriteableEcdsaChannelSigner, C: Deref , T: Deref , F: Deref , L: Deref , P: Deref >
702 chain::Watch<ChannelSigner> for ChainMonitor<ChannelSigner, C, T, F, L, P>
703 where C::Target: chain::Filter,
704 T::Target: BroadcasterInterface,
705 F::Target: FeeEstimator,
707 P::Target: Persist<ChannelSigner>,
709 fn watch_channel(&self, funding_outpoint: OutPoint, monitor: ChannelMonitor<ChannelSigner>) -> Result<ChannelMonitorUpdateStatus, ()> {
710 let logger = WithChannelMonitor::from(&self.logger, &monitor);
711 let mut monitors = self.monitors.write().unwrap();
712 let entry = match monitors.entry(funding_outpoint) {
713 hash_map::Entry::Occupied(_) => {
714 log_error!(logger, "Failed to add new channel data: channel monitor for given outpoint is already present");
717 hash_map::Entry::Vacant(e) => e,
719 log_trace!(logger, "Got new ChannelMonitor for channel {}", log_funding_info!(monitor));
720 let update_id = monitor.get_latest_update_id();
721 let mut pending_monitor_updates = Vec::new();
722 let persist_res = self.persister.persist_new_channel(funding_outpoint, &monitor);
724 ChannelMonitorUpdateStatus::InProgress => {
725 log_info!(logger, "Persistence of new ChannelMonitor for channel {} in progress", log_funding_info!(monitor));
726 pending_monitor_updates.push(update_id);
728 ChannelMonitorUpdateStatus::Completed => {
729 log_info!(logger, "Persistence of new ChannelMonitor for channel {} completed", log_funding_info!(monitor));
731 ChannelMonitorUpdateStatus::UnrecoverableError => {
732 let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
733 log_error!(logger, "{}", err_str);
734 panic!("{}", err_str);
737 if let Some(ref chain_source) = self.chain_source {
738 monitor.load_outputs_to_watch(chain_source , &self.logger);
740 entry.insert(MonitorHolder {
742 pending_monitor_updates: Mutex::new(pending_monitor_updates),
747 fn update_channel(&self, funding_txo: OutPoint, update: &ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus {
748 // `ChannelMonitorUpdate`'s `channel_id` is `None` prior to 0.0.121 and all channels in those
749 // versions are V1-established. For 0.0.121+ the `channel_id` fields is always `Some`.
750 let channel_id = update.channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(funding_txo));
751 // Update the monitor that watches the channel referred to by the given outpoint.
752 let monitors = self.monitors.read().unwrap();
753 match monitors.get(&funding_txo) {
755 let logger = WithContext::from(&self.logger, update.counterparty_node_id, Some(channel_id));
756 log_error!(logger, "Failed to update channel monitor: no such monitor registered");
758 // We should never ever trigger this from within ChannelManager. Technically a
759 // user could use this object with some proxying in between which makes this
760 // possible, but in tests and fuzzing, this should be a panic.
761 #[cfg(debug_assertions)]
762 panic!("ChannelManager generated a channel update for a channel that was not yet registered!");
763 #[cfg(not(debug_assertions))]
764 ChannelMonitorUpdateStatus::InProgress
766 Some(monitor_state) => {
767 let monitor = &monitor_state.monitor;
768 let logger = WithChannelMonitor::from(&self.logger, &monitor);
769 log_trace!(logger, "Updating ChannelMonitor to id {} for channel {}", update.update_id, log_funding_info!(monitor));
770 let update_res = monitor.update_monitor(update, &self.broadcaster, &self.fee_estimator, &self.logger);
772 let update_id = update.update_id;
773 let mut pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap();
774 let persist_res = if update_res.is_err() {
775 // Even if updating the monitor returns an error, the monitor's state will
776 // still be changed. Therefore, we should persist the updated monitor despite the error.
777 // We don't want to persist a `monitor_update` which results in a failure to apply later
778 // while reading `channel_monitor` with updates from storage. Instead, we should persist
779 // the entire `channel_monitor` here.
780 log_warn!(logger, "Failed to update ChannelMonitor for channel {}. Going ahead and persisting the entire ChannelMonitor", log_funding_info!(monitor));
781 self.persister.update_persisted_channel(funding_txo, None, monitor)
783 self.persister.update_persisted_channel(funding_txo, Some(update), monitor)
786 ChannelMonitorUpdateStatus::InProgress => {
787 pending_monitor_updates.push(update_id);
789 "Persistence of ChannelMonitorUpdate id {:?} for channel {} in progress",
791 log_funding_info!(monitor)
794 ChannelMonitorUpdateStatus::Completed => {
796 "Persistence of ChannelMonitorUpdate id {:?} for channel {} completed",
798 log_funding_info!(monitor)
801 ChannelMonitorUpdateStatus::UnrecoverableError => {
802 // Take the monitors lock for writing so that we poison it and any future
803 // operations going forward fail immediately.
804 core::mem::drop(pending_monitor_updates);
805 core::mem::drop(monitors);
806 let _poison = self.monitors.write().unwrap();
807 let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
808 log_error!(logger, "{}", err_str);
809 panic!("{}", err_str);
812 if update_res.is_err() {
813 ChannelMonitorUpdateStatus::InProgress
821 fn release_pending_monitor_events(&self) -> Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, Option<PublicKey>)> {
822 let mut pending_monitor_events = self.pending_monitor_events.lock().unwrap().split_off(0);
823 for monitor_state in self.monitors.read().unwrap().values() {
824 let monitor_events = monitor_state.monitor.get_and_clear_pending_monitor_events();
825 if monitor_events.len() > 0 {
826 let monitor_outpoint = monitor_state.monitor.get_funding_txo().0;
827 let monitor_channel_id = monitor_state.monitor.channel_id();
828 let counterparty_node_id = monitor_state.monitor.get_counterparty_node_id();
829 pending_monitor_events.push((monitor_outpoint, monitor_channel_id, monitor_events, counterparty_node_id));
832 pending_monitor_events
836 impl<ChannelSigner: WriteableEcdsaChannelSigner, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref> events::EventsProvider for ChainMonitor<ChannelSigner, C, T, F, L, P>
837 where C::Target: chain::Filter,
838 T::Target: BroadcasterInterface,
839 F::Target: FeeEstimator,
841 P::Target: Persist<ChannelSigner>,
843 /// Processes [`SpendableOutputs`] events produced from each [`ChannelMonitor`] upon maturity.
845 /// For channels featuring anchor outputs, this method will also process [`BumpTransaction`]
846 /// events produced from each [`ChannelMonitor`] while there is a balance to claim onchain
847 /// within each channel. As the confirmation of a commitment transaction may be critical to the
848 /// safety of funds, we recommend invoking this every 30 seconds, or lower if running in an
849 /// environment with spotty connections, like on mobile.
851 /// An [`EventHandler`] may safely call back to the provider, though this shouldn't be needed in
852 /// order to handle these events.
854 /// [`SpendableOutputs`]: events::Event::SpendableOutputs
855 /// [`BumpTransaction`]: events::Event::BumpTransaction
856 fn process_pending_events<H: Deref>(&self, handler: H) where H::Target: EventHandler {
857 for monitor_state in self.monitors.read().unwrap().values() {
858 monitor_state.monitor.process_pending_events(&handler);
865 use crate::check_added_monitors;
866 use crate::{expect_payment_path_successful, get_event_msg};
867 use crate::{get_htlc_update_msgs, get_revoke_commit_msgs};
868 use crate::chain::{ChannelMonitorUpdateStatus, Watch};
869 use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider};
870 use crate::ln::functional_test_utils::*;
871 use crate::ln::msgs::ChannelMessageHandler;
874 fn test_async_ooo_offchain_updates() {
875 // Test that if we have multiple offchain updates being persisted and they complete
876 // out-of-order, the ChainMonitor waits until all have completed before informing the
878 let chanmon_cfgs = create_chanmon_cfgs(2);
879 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
880 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
881 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
882 create_announced_chan_between_nodes(&nodes, 0, 1);
884 // Route two payments to be claimed at the same time.
885 let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
886 let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
888 chanmon_cfgs[1].persister.offchain_monitor_updates.lock().unwrap().clear();
889 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
890 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
892 nodes[1].node.claim_funds(payment_preimage_1);
893 check_added_monitors!(nodes[1], 1);
894 nodes[1].node.claim_funds(payment_preimage_2);
895 check_added_monitors!(nodes[1], 1);
897 let persistences = chanmon_cfgs[1].persister.offchain_monitor_updates.lock().unwrap().clone();
898 assert_eq!(persistences.len(), 1);
899 let (funding_txo, updates) = persistences.iter().next().unwrap();
900 assert_eq!(updates.len(), 2);
902 // Note that updates is a HashMap so the ordering here is actually random. This shouldn't
903 // fail either way but if it fails intermittently it's depending on the ordering of updates.
904 let mut update_iter = updates.iter();
905 let next_update = update_iter.next().unwrap().clone();
906 // Should contain next_update when pending updates listed.
907 #[cfg(not(c_bindings))]
908 assert!(nodes[1].chain_monitor.chain_monitor.list_pending_monitor_updates().get(funding_txo)
909 .unwrap().contains(&next_update));
911 assert!(nodes[1].chain_monitor.chain_monitor.list_pending_monitor_updates().iter()
912 .find(|(txo, _)| txo == funding_txo).unwrap().1.contains(&next_update));
913 nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(*funding_txo, next_update.clone()).unwrap();
914 // Should not contain the previously pending next_update when pending updates listed.
915 #[cfg(not(c_bindings))]
916 assert!(!nodes[1].chain_monitor.chain_monitor.list_pending_monitor_updates().get(funding_txo)
917 .unwrap().contains(&next_update));
919 assert!(!nodes[1].chain_monitor.chain_monitor.list_pending_monitor_updates().iter()
920 .find(|(txo, _)| txo == funding_txo).unwrap().1.contains(&next_update));
921 assert!(nodes[1].chain_monitor.release_pending_monitor_events().is_empty());
922 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
923 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
924 nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(*funding_txo, update_iter.next().unwrap().clone()).unwrap();
926 let claim_events = nodes[1].node.get_and_clear_pending_events();
927 assert_eq!(claim_events.len(), 2);
928 match claim_events[0] {
929 Event::PaymentClaimed { ref payment_hash, amount_msat: 1_000_000, .. } => {
930 assert_eq!(payment_hash_1, *payment_hash);
932 _ => panic!("Unexpected event"),
934 match claim_events[1] {
935 Event::PaymentClaimed { ref payment_hash, amount_msat: 1_000_000, .. } => {
936 assert_eq!(payment_hash_2, *payment_hash);
938 _ => panic!("Unexpected event"),
941 // Now manually walk the commitment signed dance - because we claimed two payments
942 // back-to-back it doesn't fit into the neat walk commitment_signed_dance does.
944 let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
945 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
946 expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false);
947 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
948 check_added_monitors!(nodes[0], 1);
949 let (as_first_raa, as_first_update) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
951 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_first_raa);
952 check_added_monitors!(nodes[1], 1);
953 let bs_second_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
954 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_first_update);
955 check_added_monitors!(nodes[1], 1);
956 let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
958 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_second_updates.update_fulfill_htlcs[0]);
959 expect_payment_sent(&nodes[0], payment_preimage_2, None, false, false);
960 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_updates.commitment_signed);
961 check_added_monitors!(nodes[0], 1);
962 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa);
963 expect_payment_path_successful!(nodes[0]);
964 check_added_monitors!(nodes[0], 1);
965 let (as_second_raa, as_second_update) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
967 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
968 check_added_monitors!(nodes[1], 1);
969 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_update);
970 check_added_monitors!(nodes[1], 1);
971 let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
973 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa);
974 expect_payment_path_successful!(nodes[0]);
975 check_added_monitors!(nodes[0], 1);
979 #[cfg(feature = "std")]
980 fn update_during_chainsync_poisons_channel() {
981 let chanmon_cfgs = create_chanmon_cfgs(2);
982 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
983 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
984 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
985 create_announced_chan_between_nodes(&nodes, 0, 1);
987 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::UnrecoverableError);
989 assert!(std::panic::catch_unwind(|| {
990 // Returning an UnrecoverableError should always panic immediately
991 connect_blocks(&nodes[0], 1);
993 assert!(std::panic::catch_unwind(|| {
994 // ...and also poison our locks causing later use to panic as well
995 core::mem::drop(nodes);