]> git.bitcoin.ninja Git - rust-lightning/blob - lightning/src/chain/chainmonitor.rs
Merge pull request #2589 from ErikDeSmedt/reexport_route_hint_hop
[rust-lightning] / lightning / src / chain / chainmonitor.rs
1 // This file is Copyright its original authors, visible in version control
2 // history.
3 //
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
8 // licenses.
9
10 //! Logic to connect off-chain channel management with on-chain transaction monitoring.
11 //!
12 //! [`ChainMonitor`] is an implementation of [`chain::Watch`] used both to process blocks and to
13 //! update [`ChannelMonitor`]s accordingly. If any on-chain events need further processing, it will
14 //! make those available as [`MonitorEvent`]s to be consumed.
15 //!
16 //! [`ChainMonitor`] is parameterized by an optional chain source, which must implement the
17 //! [`chain::Filter`] trait. This provides a mechanism to signal new relevant outputs back to light
18 //! clients, such that transactions spending those outputs are included in block data.
19 //!
20 //! [`ChainMonitor`] may be used directly to monitor channels locally or as a part of a distributed
21 //! setup to monitor channels remotely. In the latter case, a custom [`chain::Watch`] implementation
22 //! would be responsible for routing each update to a remote server and for retrieving monitor
23 //! events. The remote server would make use of [`ChainMonitor`] for block processing and for
24 //! servicing [`ChannelMonitor`] updates from the client.
25
26 use bitcoin::blockdata::block::BlockHeader;
27 use bitcoin::hash_types::{Txid, BlockHash};
28
29 use crate::chain;
30 use crate::chain::{ChannelMonitorUpdateStatus, Filter, WatchedOutput};
31 use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
32 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, Balance, MonitorEvent, TransactionOutputs, LATENCY_GRACE_PERIOD_BLOCKS};
33 use crate::chain::transaction::{OutPoint, TransactionData};
34 use crate::sign::WriteableEcdsaChannelSigner;
35 use crate::events;
36 use crate::events::{Event, EventHandler};
37 use crate::util::atomic_counter::AtomicCounter;
38 use crate::util::logger::Logger;
39 use crate::util::errors::APIError;
40 use crate::util::wakers::{Future, Notifier};
41 use crate::ln::channelmanager::ChannelDetails;
42
43 use crate::prelude::*;
44 use crate::sync::{RwLock, RwLockReadGuard, Mutex, MutexGuard};
45 use core::iter::FromIterator;
46 use core::ops::Deref;
47 use core::sync::atomic::{AtomicUsize, Ordering};
48 use bitcoin::secp256k1::PublicKey;
49
50 #[derive(Clone, Copy, Hash, PartialEq, Eq)]
51 /// A specific update's ID stored in a `MonitorUpdateId`, separated out to make the contents
52 /// entirely opaque.
53 enum UpdateOrigin {
54         /// An update that was generated by the `ChannelManager` (via our `chain::Watch`
55         /// implementation). This corresponds to an actual [`ChannelMonitorUpdate::update_id`] field
56         /// and [`ChannelMonitor::get_latest_update_id`].
57         OffChain(u64),
58         /// An update that was generated during blockchain processing. The ID here is specific to the
59         /// generating [`ChainMonitor`] and does *not* correspond to any on-disk IDs.
60         ChainSync(u64),
61 }
62
63 /// An opaque identifier describing a specific [`Persist`] method call.
64 #[derive(Clone, Copy, Hash, PartialEq, Eq)]
65 pub struct MonitorUpdateId {
66         contents: UpdateOrigin,
67 }
68
69 impl MonitorUpdateId {
70         pub(crate) fn from_monitor_update(update: &ChannelMonitorUpdate) -> Self {
71                 Self { contents: UpdateOrigin::OffChain(update.update_id) }
72         }
73         pub(crate) fn from_new_monitor<ChannelSigner: WriteableEcdsaChannelSigner>(monitor: &ChannelMonitor<ChannelSigner>) -> Self {
74                 Self { contents: UpdateOrigin::OffChain(monitor.get_latest_update_id()) }
75         }
76 }
77
78 /// `Persist` defines behavior for persisting channel monitors: this could mean
79 /// writing once to disk, and/or uploading to one or more backup services.
80 ///
81 /// Persistence can happen in one of two ways - synchronously completing before the trait method
82 /// calls return or asynchronously in the background.
83 ///
84 /// # For those implementing synchronous persistence
85 ///
86 ///  * If persistence completes fully (including any relevant `fsync()` calls), the implementation
87 ///    should return [`ChannelMonitorUpdateStatus::Completed`], indicating normal channel operation
88 ///    should continue.
89 ///
90 ///  * If persistence fails for some reason, implementations should consider returning
91 ///    [`ChannelMonitorUpdateStatus::InProgress`] and retry all pending persistence operations in
92 ///    the background with [`ChainMonitor::list_pending_monitor_updates`] and
93 ///    [`ChainMonitor::get_monitor`].
94 ///
95 ///    Once a full [`ChannelMonitor`] has been persisted, all pending updates for that channel can
96 ///    be marked as complete via [`ChainMonitor::channel_monitor_updated`].
97 ///
98 ///    If at some point no further progress can be made towards persisting the pending updates, the
99 ///    node should simply shut down.
100 ///
101 ///  * If the persistence has failed and cannot be retried further (e.g. because of some timeout),
102 ///    [`ChannelMonitorUpdateStatus::UnrecoverableError`] can be used, though this will result in
103 ///    an immediate panic and future operations in LDK generally failing.
104 ///
105 /// # For those implementing asynchronous persistence
106 ///
107 ///  All calls should generally spawn a background task and immediately return
108 ///  [`ChannelMonitorUpdateStatus::InProgress`]. Once the update completes,
109 ///  [`ChainMonitor::channel_monitor_updated`] should be called with the corresponding
110 ///  [`MonitorUpdateId`].
111 ///
112 ///  Note that unlike the direct [`chain::Watch`] interface,
113 ///  [`ChainMonitor::channel_monitor_updated`] must be called once for *each* update which occurs.
114 ///
115 ///  If at some point no further progress can be made towards persisting a pending update, the node
116 ///  should simply shut down.
117 ///
118 /// # Using remote watchtowers
119 ///
120 /// Watchtowers may be updated as a part of an implementation of this trait, utilizing the async
121 /// update process described above while the watchtower is being updated. The following methods are
122 /// provided for bulding transactions for a watchtower:
123 /// [`ChannelMonitor::initial_counterparty_commitment_tx`],
124 /// [`ChannelMonitor::counterparty_commitment_txs_from_update`],
125 /// [`ChannelMonitor::sign_to_local_justice_tx`], [`TrustedCommitmentTransaction::revokeable_output_index`],
126 /// [`TrustedCommitmentTransaction::build_to_local_justice_tx`].
127 ///
128 /// [`TrustedCommitmentTransaction::revokeable_output_index`]: crate::ln::chan_utils::TrustedCommitmentTransaction::revokeable_output_index
129 /// [`TrustedCommitmentTransaction::build_to_local_justice_tx`]: crate::ln::chan_utils::TrustedCommitmentTransaction::build_to_local_justice_tx
130 pub trait Persist<ChannelSigner: WriteableEcdsaChannelSigner> {
131         /// Persist a new channel's data in response to a [`chain::Watch::watch_channel`] call. This is
132         /// called by [`ChannelManager`] for new channels, or may be called directly, e.g. on startup.
133         ///
134         /// The data can be stored any way you want, but the identifier provided by LDK is the
135         /// channel's outpoint (and it is up to you to maintain a correct mapping between the outpoint
136         /// and the stored channel data). Note that you **must** persist every new monitor to disk.
137         ///
138         /// The `update_id` is used to identify this call to [`ChainMonitor::channel_monitor_updated`],
139         /// if you return [`ChannelMonitorUpdateStatus::InProgress`].
140         ///
141         /// See [`Writeable::write`] on [`ChannelMonitor`] for writing out a `ChannelMonitor`
142         /// and [`ChannelMonitorUpdateStatus`] for requirements when returning errors.
143         ///
144         /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
145         /// [`Writeable::write`]: crate::util::ser::Writeable::write
146         fn persist_new_channel(&self, channel_id: OutPoint, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> ChannelMonitorUpdateStatus;
147
148         /// Update one channel's data. The provided [`ChannelMonitor`] has already applied the given
149         /// update.
150         ///
151         /// Note that on every update, you **must** persist either the [`ChannelMonitorUpdate`] or the
152         /// updated monitor itself to disk/backups. See the [`Persist`] trait documentation for more
153         /// details.
154         ///
155         /// During blockchain synchronization operations, this may be called with no
156         /// [`ChannelMonitorUpdate`], in which case the full [`ChannelMonitor`] needs to be persisted.
157         /// Note that after the full [`ChannelMonitor`] is persisted any previous
158         /// [`ChannelMonitorUpdate`]s which were persisted should be discarded - they can no longer be
159         /// applied to the persisted [`ChannelMonitor`] as they were already applied.
160         ///
161         /// If an implementer chooses to persist the updates only, they need to make
162         /// sure that all the updates are applied to the `ChannelMonitors` *before*
163         /// the set of channel monitors is given to the `ChannelManager`
164         /// deserialization routine. See [`ChannelMonitor::update_monitor`] for
165         /// applying a monitor update to a monitor. If full `ChannelMonitors` are
166         /// persisted, then there is no need to persist individual updates.
167         ///
168         /// Note that there could be a performance tradeoff between persisting complete
169         /// channel monitors on every update vs. persisting only updates and applying
170         /// them in batches. The size of each monitor grows `O(number of state updates)`
171         /// whereas updates are small and `O(1)`.
172         ///
173         /// The `update_id` is used to identify this call to [`ChainMonitor::channel_monitor_updated`],
174         /// if you return [`ChannelMonitorUpdateStatus::InProgress`].
175         ///
176         /// See [`Writeable::write`] on [`ChannelMonitor`] for writing out a `ChannelMonitor`,
177         /// [`Writeable::write`] on [`ChannelMonitorUpdate`] for writing out an update, and
178         /// [`ChannelMonitorUpdateStatus`] for requirements when returning errors.
179         ///
180         /// [`Writeable::write`]: crate::util::ser::Writeable::write
181         fn update_persisted_channel(&self, channel_id: OutPoint, update: Option<&ChannelMonitorUpdate>, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> ChannelMonitorUpdateStatus;
182 }
183
184 struct MonitorHolder<ChannelSigner: WriteableEcdsaChannelSigner> {
185         monitor: ChannelMonitor<ChannelSigner>,
186         /// The full set of pending monitor updates for this Channel.
187         ///
188         /// Note that this lock must be held during updates to prevent a race where we call
189         /// update_persisted_channel, the user returns a
190         /// [`ChannelMonitorUpdateStatus::InProgress`], and then calls channel_monitor_updated
191         /// immediately, racing our insertion of the pending update into the contained Vec.
192         ///
193         /// Beyond the synchronization of updates themselves, we cannot handle user events until after
194         /// any chain updates have been stored on disk. Thus, we scan this list when returning updates
195         /// to the ChannelManager, refusing to return any updates for a ChannelMonitor which is still
196         /// being persisted fully to disk after a chain update.
197         ///
198         /// This avoids the possibility of handling, e.g. an on-chain claim, generating a claim monitor
199         /// event, resulting in the relevant ChannelManager generating a PaymentSent event and dropping
200         /// the pending payment entry, and then reloading before the monitor is persisted, resulting in
201         /// the ChannelManager re-adding the same payment entry, before the same block is replayed,
202         /// resulting in a duplicate PaymentSent event.
203         pending_monitor_updates: Mutex<Vec<MonitorUpdateId>>,
204         /// The last block height at which no [`UpdateOrigin::ChainSync`] monitor updates were present
205         /// in `pending_monitor_updates`.
206         /// If it's been more than [`LATENCY_GRACE_PERIOD_BLOCKS`] since we started waiting on a chain
207         /// sync event, we let monitor events return to `ChannelManager` because we cannot hold them up
208         /// forever or we'll end up with HTLC preimages waiting to feed back into an upstream channel
209         /// forever, risking funds loss.
210         last_chain_persist_height: AtomicUsize,
211 }
212
213 impl<ChannelSigner: WriteableEcdsaChannelSigner> MonitorHolder<ChannelSigner> {
214         fn has_pending_offchain_updates(&self, pending_monitor_updates_lock: &MutexGuard<Vec<MonitorUpdateId>>) -> bool {
215                 pending_monitor_updates_lock.iter().any(|update_id|
216                         if let UpdateOrigin::OffChain(_) = update_id.contents { true } else { false })
217         }
218         fn has_pending_chainsync_updates(&self, pending_monitor_updates_lock: &MutexGuard<Vec<MonitorUpdateId>>) -> bool {
219                 pending_monitor_updates_lock.iter().any(|update_id|
220                         if let UpdateOrigin::ChainSync(_) = update_id.contents { true } else { false })
221         }
222 }
223
224 /// A read-only reference to a current ChannelMonitor.
225 ///
226 /// Note that this holds a mutex in [`ChainMonitor`] and may block other events until it is
227 /// released.
228 pub struct LockedChannelMonitor<'a, ChannelSigner: WriteableEcdsaChannelSigner> {
229         lock: RwLockReadGuard<'a, HashMap<OutPoint, MonitorHolder<ChannelSigner>>>,
230         funding_txo: OutPoint,
231 }
232
233 impl<ChannelSigner: WriteableEcdsaChannelSigner> Deref for LockedChannelMonitor<'_, ChannelSigner> {
234         type Target = ChannelMonitor<ChannelSigner>;
235         fn deref(&self) -> &ChannelMonitor<ChannelSigner> {
236                 &self.lock.get(&self.funding_txo).expect("Checked at construction").monitor
237         }
238 }
239
240 /// An implementation of [`chain::Watch`] for monitoring channels.
241 ///
242 /// Connected and disconnected blocks must be provided to `ChainMonitor` as documented by
243 /// [`chain::Watch`]. May be used in conjunction with [`ChannelManager`] to monitor channels locally
244 /// or used independently to monitor channels remotely. See the [module-level documentation] for
245 /// details.
246 ///
247 /// Note that `ChainMonitor` should regularly trigger rebroadcasts/fee bumps of pending claims from
248 /// a force-closed channel. This is crucial in preventing certain classes of pinning attacks,
249 /// detecting substantial mempool feerate changes between blocks, and ensuring reliability if
250 /// broadcasting fails. We recommend invoking this every 30 seconds, or lower if running in an
251 /// environment with spotty connections, like on mobile.
252 ///
253 /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
254 /// [module-level documentation]: crate::chain::chainmonitor
255 /// [`rebroadcast_pending_claims`]: Self::rebroadcast_pending_claims
256 pub struct ChainMonitor<ChannelSigner: WriteableEcdsaChannelSigner, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref>
257         where C::Target: chain::Filter,
258         T::Target: BroadcasterInterface,
259         F::Target: FeeEstimator,
260         L::Target: Logger,
261         P::Target: Persist<ChannelSigner>,
262 {
263         monitors: RwLock<HashMap<OutPoint, MonitorHolder<ChannelSigner>>>,
264         /// When we generate a [`MonitorUpdateId`] for a chain-event monitor persistence, we need a
265         /// unique ID, which we calculate by simply getting the next value from this counter. Note that
266         /// the ID is never persisted so it's ok that they reset on restart.
267         sync_persistence_id: AtomicCounter,
268         chain_source: Option<C>,
269         broadcaster: T,
270         logger: L,
271         fee_estimator: F,
272         persister: P,
273         /// "User-provided" (ie persistence-completion/-failed) [`MonitorEvent`]s. These came directly
274         /// from the user and not from a [`ChannelMonitor`].
275         pending_monitor_events: Mutex<Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)>>,
276         /// The best block height seen, used as a proxy for the passage of time.
277         highest_chain_height: AtomicUsize,
278
279         event_notifier: Notifier,
280 }
281
282 impl<ChannelSigner: WriteableEcdsaChannelSigner, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref> ChainMonitor<ChannelSigner, C, T, F, L, P>
283 where C::Target: chain::Filter,
284             T::Target: BroadcasterInterface,
285             F::Target: FeeEstimator,
286             L::Target: Logger,
287             P::Target: Persist<ChannelSigner>,
288 {
289         /// Dispatches to per-channel monitors, which are responsible for updating their on-chain view
290         /// of a channel and reacting accordingly based on transactions in the given chain data. See
291         /// [`ChannelMonitor::block_connected`] for details. Any HTLCs that were resolved on chain will
292         /// be returned by [`chain::Watch::release_pending_monitor_events`].
293         ///
294         /// Calls back to [`chain::Filter`] if any monitor indicated new outputs to watch. Subsequent
295         /// calls must not exclude any transactions matching the new outputs nor any in-block
296         /// descendants of such transactions. It is not necessary to re-fetch the block to obtain
297         /// updated `txdata`.
298         ///
299         /// Calls which represent a new blockchain tip height should set `best_height`.
300         fn process_chain_data<FN>(&self, header: &BlockHeader, best_height: Option<u32>, txdata: &TransactionData, process: FN)
301         where
302                 FN: Fn(&ChannelMonitor<ChannelSigner>, &TransactionData) -> Vec<TransactionOutputs>
303         {
304                 let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
305                 let funding_outpoints: HashSet<OutPoint> = HashSet::from_iter(self.monitors.read().unwrap().keys().cloned());
306                 for funding_outpoint in funding_outpoints.iter() {
307                         let monitor_lock = self.monitors.read().unwrap();
308                         if let Some(monitor_state) = monitor_lock.get(funding_outpoint) {
309                                 if self.update_monitor_with_chain_data(header, best_height, txdata, &process, funding_outpoint, &monitor_state).is_err() {
310                                         // Take the monitors lock for writing so that we poison it and any future
311                                         // operations going forward fail immediately.
312                                         core::mem::drop(monitor_state);
313                                         core::mem::drop(monitor_lock);
314                                         let _poison = self.monitors.write().unwrap();
315                                         log_error!(self.logger, "{}", err_str);
316                                         panic!("{}", err_str);
317                                 }
318                         }
319                 }
320
321                 // do some followup cleanup if any funding outpoints were added in between iterations
322                 let monitor_states = self.monitors.write().unwrap();
323                 for (funding_outpoint, monitor_state) in monitor_states.iter() {
324                         if !funding_outpoints.contains(funding_outpoint) {
325                                 if self.update_monitor_with_chain_data(header, best_height, txdata, &process, funding_outpoint, &monitor_state).is_err() {
326                                         log_error!(self.logger, "{}", err_str);
327                                         panic!("{}", err_str);
328                                 }
329                         }
330                 }
331
332                 if let Some(height) = best_height {
333                         // If the best block height is being updated, update highest_chain_height under the
334                         // monitors write lock.
335                         let old_height = self.highest_chain_height.load(Ordering::Acquire);
336                         let new_height = height as usize;
337                         if new_height > old_height {
338                                 self.highest_chain_height.store(new_height, Ordering::Release);
339                         }
340                 }
341         }
342
343         fn update_monitor_with_chain_data<FN>(
344                 &self, header: &BlockHeader, best_height: Option<u32>, txdata: &TransactionData,
345                 process: FN, funding_outpoint: &OutPoint, monitor_state: &MonitorHolder<ChannelSigner>
346         ) -> Result<(), ()> where FN: Fn(&ChannelMonitor<ChannelSigner>, &TransactionData) -> Vec<TransactionOutputs> {
347                 let monitor = &monitor_state.monitor;
348                 let mut txn_outputs;
349                 {
350                         txn_outputs = process(monitor, txdata);
351                         let update_id = MonitorUpdateId {
352                                 contents: UpdateOrigin::ChainSync(self.sync_persistence_id.get_increment()),
353                         };
354                         let mut pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap();
355                         if let Some(height) = best_height {
356                                 if !monitor_state.has_pending_chainsync_updates(&pending_monitor_updates) {
357                                         // If there are not ChainSync persists awaiting completion, go ahead and
358                                         // set last_chain_persist_height here - we wouldn't want the first
359                                         // InProgress to always immediately be considered "overly delayed".
360                                         monitor_state.last_chain_persist_height.store(height as usize, Ordering::Release);
361                                 }
362                         }
363
364                         log_trace!(self.logger, "Syncing Channel Monitor for channel {}", log_funding_info!(monitor));
365                         match self.persister.update_persisted_channel(*funding_outpoint, None, monitor, update_id) {
366                                 ChannelMonitorUpdateStatus::Completed =>
367                                         log_trace!(self.logger, "Finished syncing Channel Monitor for channel {}", log_funding_info!(monitor)),
368                                 ChannelMonitorUpdateStatus::InProgress => {
369                                         log_debug!(self.logger, "Channel Monitor sync for channel {} in progress, holding events until completion!", log_funding_info!(monitor));
370                                         pending_monitor_updates.push(update_id);
371                                 },
372                                 ChannelMonitorUpdateStatus::UnrecoverableError => {
373                                         return Err(());
374                                 },
375                         }
376                 }
377
378                 // Register any new outputs with the chain source for filtering, storing any dependent
379                 // transactions from within the block that previously had not been included in txdata.
380                 if let Some(ref chain_source) = self.chain_source {
381                         let block_hash = header.block_hash();
382                         for (txid, mut outputs) in txn_outputs.drain(..) {
383                                 for (idx, output) in outputs.drain(..) {
384                                         // Register any new outputs with the chain source for filtering
385                                         let output = WatchedOutput {
386                                                 block_hash: Some(block_hash),
387                                                 outpoint: OutPoint { txid, index: idx as u16 },
388                                                 script_pubkey: output.script_pubkey,
389                                         };
390                                         chain_source.register_output(output)
391                                 }
392                         }
393                 }
394                 Ok(())
395         }
396
397         /// Creates a new `ChainMonitor` used to watch on-chain activity pertaining to channels.
398         ///
399         /// When an optional chain source implementing [`chain::Filter`] is provided, the chain monitor
400         /// will call back to it indicating transactions and outputs of interest. This allows clients to
401         /// pre-filter blocks or only fetch blocks matching a compact filter. Otherwise, clients may
402         /// always need to fetch full blocks absent another means for determining which blocks contain
403         /// transactions relevant to the watched channels.
404         pub fn new(chain_source: Option<C>, broadcaster: T, logger: L, feeest: F, persister: P) -> Self {
405                 Self {
406                         monitors: RwLock::new(HashMap::new()),
407                         sync_persistence_id: AtomicCounter::new(),
408                         chain_source,
409                         broadcaster,
410                         logger,
411                         fee_estimator: feeest,
412                         persister,
413                         pending_monitor_events: Mutex::new(Vec::new()),
414                         highest_chain_height: AtomicUsize::new(0),
415                         event_notifier: Notifier::new(),
416                 }
417         }
418
419         /// Gets the balances in the contained [`ChannelMonitor`]s which are claimable on-chain or
420         /// claims which are awaiting confirmation.
421         ///
422         /// Includes the balances from each [`ChannelMonitor`] *except* those included in
423         /// `ignored_channels`.
424         ///
425         /// See [`ChannelMonitor::get_claimable_balances`] for more details on the exact criteria for
426         /// inclusion in the return value.
427         pub fn get_claimable_balances(&self, ignored_channels: &[&ChannelDetails]) -> Vec<Balance> {
428                 let mut ret = Vec::new();
429                 let monitor_states = self.monitors.read().unwrap();
430                 for (_, monitor_state) in monitor_states.iter().filter(|(funding_outpoint, _)| {
431                         for chan in ignored_channels {
432                                 if chan.funding_txo.as_ref() == Some(funding_outpoint) {
433                                         return false;
434                                 }
435                         }
436                         true
437                 }) {
438                         ret.append(&mut monitor_state.monitor.get_claimable_balances());
439                 }
440                 ret
441         }
442
443         /// Gets the [`LockedChannelMonitor`] for a given funding outpoint, returning an `Err` if no
444         /// such [`ChannelMonitor`] is currently being monitored for.
445         ///
446         /// Note that the result holds a mutex over our monitor set, and should not be held
447         /// indefinitely.
448         pub fn get_monitor(&self, funding_txo: OutPoint) -> Result<LockedChannelMonitor<'_, ChannelSigner>, ()> {
449                 let lock = self.monitors.read().unwrap();
450                 if lock.get(&funding_txo).is_some() {
451                         Ok(LockedChannelMonitor { lock, funding_txo })
452                 } else {
453                         Err(())
454                 }
455         }
456
457         /// Lists the funding outpoint of each [`ChannelMonitor`] being monitored.
458         ///
459         /// Note that [`ChannelMonitor`]s are not removed when a channel is closed as they are always
460         /// monitoring for on-chain state resolutions.
461         pub fn list_monitors(&self) -> Vec<OutPoint> {
462                 self.monitors.read().unwrap().keys().map(|outpoint| *outpoint).collect()
463         }
464
465         #[cfg(not(c_bindings))]
466         /// Lists the pending updates for each [`ChannelMonitor`] (by `OutPoint` being monitored).
467         pub fn list_pending_monitor_updates(&self) -> HashMap<OutPoint, Vec<MonitorUpdateId>> {
468                 self.monitors.read().unwrap().iter().map(|(outpoint, holder)| {
469                         (*outpoint, holder.pending_monitor_updates.lock().unwrap().clone())
470                 }).collect()
471         }
472
473         #[cfg(c_bindings)]
474         /// Lists the pending updates for each [`ChannelMonitor`] (by `OutPoint` being monitored).
475         pub fn list_pending_monitor_updates(&self) -> Vec<(OutPoint, Vec<MonitorUpdateId>)> {
476                 self.monitors.read().unwrap().iter().map(|(outpoint, holder)| {
477                         (*outpoint, holder.pending_monitor_updates.lock().unwrap().clone())
478                 }).collect()
479         }
480
481
482         #[cfg(test)]
483         pub fn remove_monitor(&self, funding_txo: &OutPoint) -> ChannelMonitor<ChannelSigner> {
484                 self.monitors.write().unwrap().remove(funding_txo).unwrap().monitor
485         }
486
487         /// Indicates the persistence of a [`ChannelMonitor`] has completed after
488         /// [`ChannelMonitorUpdateStatus::InProgress`] was returned from an update operation.
489         ///
490         /// Thus, the anticipated use is, at a high level:
491         ///  1) This [`ChainMonitor`] calls [`Persist::update_persisted_channel`] which stores the
492         ///     update to disk and begins updating any remote (e.g. watchtower/backup) copies,
493         ///     returning [`ChannelMonitorUpdateStatus::InProgress`],
494         ///  2) once all remote copies are updated, you call this function with the
495         ///     `completed_update_id` that completed, and once all pending updates have completed the
496         ///     channel will be re-enabled.
497         //      Note that we re-enable only after `UpdateOrigin::OffChain` updates complete, we don't
498         //      care about `UpdateOrigin::ChainSync` updates for the channel state being updated. We
499         //      only care about `UpdateOrigin::ChainSync` for returning `MonitorEvent`s.
500         ///
501         /// Returns an [`APIError::APIMisuseError`] if `funding_txo` does not match any currently
502         /// registered [`ChannelMonitor`]s.
503         pub fn channel_monitor_updated(&self, funding_txo: OutPoint, completed_update_id: MonitorUpdateId) -> Result<(), APIError> {
504                 let monitors = self.monitors.read().unwrap();
505                 let monitor_data = if let Some(mon) = monitors.get(&funding_txo) { mon } else {
506                         return Err(APIError::APIMisuseError { err: format!("No ChannelMonitor matching funding outpoint {:?} found", funding_txo) });
507                 };
508                 let mut pending_monitor_updates = monitor_data.pending_monitor_updates.lock().unwrap();
509                 pending_monitor_updates.retain(|update_id| *update_id != completed_update_id);
510
511                 match completed_update_id {
512                         MonitorUpdateId { contents: UpdateOrigin::OffChain(_) } => {
513                                 // Note that we only check for `UpdateOrigin::OffChain` failures here - if
514                                 // we're being told that a `UpdateOrigin::OffChain` monitor update completed,
515                                 // we only care about ensuring we don't tell the `ChannelManager` to restore
516                                 // the channel to normal operation until all `UpdateOrigin::OffChain` updates
517                                 // complete.
518                                 // If there's some `UpdateOrigin::ChainSync` update still pending that's okay
519                                 // - we can still update our channel state, just as long as we don't return
520                                 // `MonitorEvent`s from the monitor back to the `ChannelManager` until they
521                                 // complete.
522                                 let monitor_is_pending_updates = monitor_data.has_pending_offchain_updates(&pending_monitor_updates);
523                                 if monitor_is_pending_updates {
524                                         // If there are still monitor updates pending, we cannot yet construct a
525                                         // Completed event.
526                                         return Ok(());
527                                 }
528                                 self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::Completed {
529                                         funding_txo,
530                                         monitor_update_id: monitor_data.monitor.get_latest_update_id(),
531                                 }], monitor_data.monitor.get_counterparty_node_id()));
532                         },
533                         MonitorUpdateId { contents: UpdateOrigin::ChainSync(_) } => {
534                                 if !monitor_data.has_pending_chainsync_updates(&pending_monitor_updates) {
535                                         monitor_data.last_chain_persist_height.store(self.highest_chain_height.load(Ordering::Acquire), Ordering::Release);
536                                         // The next time release_pending_monitor_events is called, any events for this
537                                         // ChannelMonitor will be returned.
538                                 }
539                         },
540                 }
541                 self.event_notifier.notify();
542                 Ok(())
543         }
544
545         /// This wrapper avoids having to update some of our tests for now as they assume the direct
546         /// chain::Watch API wherein we mark a monitor fully-updated by just calling
547         /// channel_monitor_updated once with the highest ID.
548         #[cfg(any(test, fuzzing))]
549         pub fn force_channel_monitor_updated(&self, funding_txo: OutPoint, monitor_update_id: u64) {
550                 let monitors = self.monitors.read().unwrap();
551                 let counterparty_node_id = monitors.get(&funding_txo).and_then(|m| m.monitor.get_counterparty_node_id());
552                 self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::Completed {
553                         funding_txo,
554                         monitor_update_id,
555                 }], counterparty_node_id));
556                 self.event_notifier.notify();
557         }
558
559         #[cfg(any(test, feature = "_test_utils"))]
560         pub fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
561                 use crate::events::EventsProvider;
562                 let events = core::cell::RefCell::new(Vec::new());
563                 let event_handler = |event: events::Event| events.borrow_mut().push(event);
564                 self.process_pending_events(&event_handler);
565                 events.into_inner()
566         }
567
568         /// Processes any events asynchronously in the order they were generated since the last call
569         /// using the given event handler.
570         ///
571         /// See the trait-level documentation of [`EventsProvider`] for requirements.
572         ///
573         /// [`EventsProvider`]: crate::events::EventsProvider
574         pub async fn process_pending_events_async<Future: core::future::Future, H: Fn(Event) -> Future>(
575                 &self, handler: H
576         ) {
577                 // Sadly we can't hold the monitors read lock through an async call. Thus we have to do a
578                 // crazy dance to process a monitor's events then only remove them once we've done so.
579                 let mons_to_process = self.monitors.read().unwrap().keys().cloned().collect::<Vec<_>>();
580                 for funding_txo in mons_to_process {
581                         let mut ev;
582                         super::channelmonitor::process_events_body!(
583                                 self.monitors.read().unwrap().get(&funding_txo).map(|m| &m.monitor), ev, handler(ev).await);
584                 }
585         }
586
587         /// Gets a [`Future`] that completes when an event is available either via
588         /// [`chain::Watch::release_pending_monitor_events`] or
589         /// [`EventsProvider::process_pending_events`].
590         ///
591         /// Note that callbacks registered on the [`Future`] MUST NOT call back into this
592         /// [`ChainMonitor`] and should instead register actions to be taken later.
593         ///
594         /// [`EventsProvider::process_pending_events`]: crate::events::EventsProvider::process_pending_events
595         pub fn get_update_future(&self) -> Future {
596                 self.event_notifier.get_future()
597         }
598
599         /// Triggers rebroadcasts/fee-bumps of pending claims from a force-closed channel. This is
600         /// crucial in preventing certain classes of pinning attacks, detecting substantial mempool
601         /// feerate changes between blocks, and ensuring reliability if broadcasting fails. We recommend
602         /// invoking this every 30 seconds, or lower if running in an environment with spotty
603         /// connections, like on mobile.
604         pub fn rebroadcast_pending_claims(&self) {
605                 let monitors = self.monitors.read().unwrap();
606                 for (_, monitor_holder) in &*monitors {
607                         monitor_holder.monitor.rebroadcast_pending_claims(
608                                 &*self.broadcaster, &*self.fee_estimator, &*self.logger
609                         )
610                 }
611         }
612 }
613
614 impl<ChannelSigner: WriteableEcdsaChannelSigner, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref>
615 chain::Listen for ChainMonitor<ChannelSigner, C, T, F, L, P>
616 where
617         C::Target: chain::Filter,
618         T::Target: BroadcasterInterface,
619         F::Target: FeeEstimator,
620         L::Target: Logger,
621         P::Target: Persist<ChannelSigner>,
622 {
623         fn filtered_block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
624                 log_debug!(self.logger, "New best block {} at height {} provided via block_connected", header.block_hash(), height);
625                 self.process_chain_data(header, Some(height), &txdata, |monitor, txdata| {
626                         monitor.block_connected(
627                                 header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger)
628                 });
629         }
630
631         fn block_disconnected(&self, header: &BlockHeader, height: u32) {
632                 let monitor_states = self.monitors.read().unwrap();
633                 log_debug!(self.logger, "Latest block {} at height {} removed via block_disconnected", header.block_hash(), height);
634                 for monitor_state in monitor_states.values() {
635                         monitor_state.monitor.block_disconnected(
636                                 header, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger);
637                 }
638         }
639 }
640
641 impl<ChannelSigner: WriteableEcdsaChannelSigner, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref>
642 chain::Confirm for ChainMonitor<ChannelSigner, C, T, F, L, P>
643 where
644         C::Target: chain::Filter,
645         T::Target: BroadcasterInterface,
646         F::Target: FeeEstimator,
647         L::Target: Logger,
648         P::Target: Persist<ChannelSigner>,
649 {
650         fn transactions_confirmed(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
651                 log_debug!(self.logger, "{} provided transactions confirmed at height {} in block {}", txdata.len(), height, header.block_hash());
652                 self.process_chain_data(header, None, txdata, |monitor, txdata| {
653                         monitor.transactions_confirmed(
654                                 header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger)
655                 });
656         }
657
658         fn transaction_unconfirmed(&self, txid: &Txid) {
659                 log_debug!(self.logger, "Transaction {} reorganized out of chain", txid);
660                 let monitor_states = self.monitors.read().unwrap();
661                 for monitor_state in monitor_states.values() {
662                         monitor_state.monitor.transaction_unconfirmed(txid, &*self.broadcaster, &*self.fee_estimator, &*self.logger);
663                 }
664         }
665
666         fn best_block_updated(&self, header: &BlockHeader, height: u32) {
667                 log_debug!(self.logger, "New best block {} at height {} provided via best_block_updated", header.block_hash(), height);
668                 self.process_chain_data(header, Some(height), &[], |monitor, txdata| {
669                         // While in practice there shouldn't be any recursive calls when given empty txdata,
670                         // it's still possible if a chain::Filter implementation returns a transaction.
671                         debug_assert!(txdata.is_empty());
672                         monitor.best_block_updated(
673                                 header, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger)
674                 });
675         }
676
677         fn get_relevant_txids(&self) -> Vec<(Txid, Option<BlockHash>)> {
678                 let mut txids = Vec::new();
679                 let monitor_states = self.monitors.read().unwrap();
680                 for monitor_state in monitor_states.values() {
681                         txids.append(&mut monitor_state.monitor.get_relevant_txids());
682                 }
683
684                 txids.sort_unstable();
685                 txids.dedup();
686                 txids
687         }
688 }
689
690 impl<ChannelSigner: WriteableEcdsaChannelSigner, C: Deref , T: Deref , F: Deref , L: Deref , P: Deref >
691 chain::Watch<ChannelSigner> for ChainMonitor<ChannelSigner, C, T, F, L, P>
692 where C::Target: chain::Filter,
693             T::Target: BroadcasterInterface,
694             F::Target: FeeEstimator,
695             L::Target: Logger,
696             P::Target: Persist<ChannelSigner>,
697 {
698         fn watch_channel(&self, funding_outpoint: OutPoint, monitor: ChannelMonitor<ChannelSigner>) -> Result<ChannelMonitorUpdateStatus, ()> {
699                 let mut monitors = self.monitors.write().unwrap();
700                 let entry = match monitors.entry(funding_outpoint) {
701                         hash_map::Entry::Occupied(_) => {
702                                 log_error!(self.logger, "Failed to add new channel data: channel monitor for given outpoint is already present");
703                                 return Err(());
704                         },
705                         hash_map::Entry::Vacant(e) => e,
706                 };
707                 log_trace!(self.logger, "Got new ChannelMonitor for channel {}", log_funding_info!(monitor));
708                 let update_id = MonitorUpdateId::from_new_monitor(&monitor);
709                 let mut pending_monitor_updates = Vec::new();
710                 let persist_res = self.persister.persist_new_channel(funding_outpoint, &monitor, update_id);
711                 match persist_res {
712                         ChannelMonitorUpdateStatus::InProgress => {
713                                 log_info!(self.logger, "Persistence of new ChannelMonitor for channel {} in progress", log_funding_info!(monitor));
714                                 pending_monitor_updates.push(update_id);
715                         },
716                         ChannelMonitorUpdateStatus::Completed => {
717                                 log_info!(self.logger, "Persistence of new ChannelMonitor for channel {} completed", log_funding_info!(monitor));
718                         },
719                         ChannelMonitorUpdateStatus::UnrecoverableError => {
720                                 let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
721                                 log_error!(self.logger, "{}", err_str);
722                                 panic!("{}", err_str);
723                         },
724                 }
725                 if let Some(ref chain_source) = self.chain_source {
726                         monitor.load_outputs_to_watch(chain_source);
727                 }
728                 entry.insert(MonitorHolder {
729                         monitor,
730                         pending_monitor_updates: Mutex::new(pending_monitor_updates),
731                         last_chain_persist_height: AtomicUsize::new(self.highest_chain_height.load(Ordering::Acquire)),
732                 });
733                 Ok(persist_res)
734         }
735
736         fn update_channel(&self, funding_txo: OutPoint, update: &ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus {
737                 // Update the monitor that watches the channel referred to by the given outpoint.
738                 let monitors = self.monitors.read().unwrap();
739                 let ret = match monitors.get(&funding_txo) {
740                         None => {
741                                 log_error!(self.logger, "Failed to update channel monitor: no such monitor registered");
742
743                                 // We should never ever trigger this from within ChannelManager. Technically a
744                                 // user could use this object with some proxying in between which makes this
745                                 // possible, but in tests and fuzzing, this should be a panic.
746                                 #[cfg(debug_assertions)]
747                                 panic!("ChannelManager generated a channel update for a channel that was not yet registered!");
748                                 #[cfg(not(debug_assertions))]
749                                 ChannelMonitorUpdateStatus::InProgress
750                         },
751                         Some(monitor_state) => {
752                                 let monitor = &monitor_state.monitor;
753                                 log_trace!(self.logger, "Updating ChannelMonitor for channel {}", log_funding_info!(monitor));
754                                 let update_res = monitor.update_monitor(update, &self.broadcaster, &*self.fee_estimator, &self.logger);
755                                 if update_res.is_err() {
756                                         log_error!(self.logger, "Failed to update ChannelMonitor for channel {}.", log_funding_info!(monitor));
757                                 }
758                                 // Even if updating the monitor returns an error, the monitor's state will
759                                 // still be changed. So, persist the updated monitor despite the error.
760                                 let update_id = MonitorUpdateId::from_monitor_update(update);
761                                 let mut pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap();
762                                 let persist_res = self.persister.update_persisted_channel(funding_txo, Some(update), monitor, update_id);
763                                 match persist_res {
764                                         ChannelMonitorUpdateStatus::InProgress => {
765                                                 pending_monitor_updates.push(update_id);
766                                                 log_debug!(self.logger, "Persistence of ChannelMonitorUpdate for channel {} in progress", log_funding_info!(monitor));
767                                         },
768                                         ChannelMonitorUpdateStatus::Completed => {
769                                                 log_debug!(self.logger, "Persistence of ChannelMonitorUpdate for channel {} completed", log_funding_info!(monitor));
770                                         },
771                                         ChannelMonitorUpdateStatus::UnrecoverableError => { /* we'll panic in a moment */ },
772                                 }
773                                 if update_res.is_err() {
774                                         ChannelMonitorUpdateStatus::InProgress
775                                 } else {
776                                         persist_res
777                                 }
778                         }
779                 };
780                 if let ChannelMonitorUpdateStatus::UnrecoverableError = ret {
781                         // Take the monitors lock for writing so that we poison it and any future
782                         // operations going forward fail immediately.
783                         core::mem::drop(monitors);
784                         let _poison = self.monitors.write().unwrap();
785                         let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
786                         log_error!(self.logger, "{}", err_str);
787                         panic!("{}", err_str);
788                 }
789                 ret
790         }
791
792         fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)> {
793                 let mut pending_monitor_events = self.pending_monitor_events.lock().unwrap().split_off(0);
794                 for monitor_state in self.monitors.read().unwrap().values() {
795                         let is_pending_monitor_update = monitor_state.has_pending_chainsync_updates(&monitor_state.pending_monitor_updates.lock().unwrap());
796                         if is_pending_monitor_update &&
797                                         monitor_state.last_chain_persist_height.load(Ordering::Acquire) + LATENCY_GRACE_PERIOD_BLOCKS as usize
798                                                 > self.highest_chain_height.load(Ordering::Acquire)
799                         {
800                                 log_debug!(self.logger, "A Channel Monitor sync is still in progress, refusing to provide monitor events!");
801                         } else {
802                                 if is_pending_monitor_update {
803                                         log_error!(self.logger, "A ChannelMonitor sync took longer than {} blocks to complete.", LATENCY_GRACE_PERIOD_BLOCKS);
804                                         log_error!(self.logger, "   To avoid funds-loss, we are allowing monitor updates to be released.");
805                                         log_error!(self.logger, "   This may cause duplicate payment events to be generated.");
806                                 }
807                                 let monitor_events = monitor_state.monitor.get_and_clear_pending_monitor_events();
808                                 if monitor_events.len() > 0 {
809                                         let monitor_outpoint = monitor_state.monitor.get_funding_txo().0;
810                                         let counterparty_node_id = monitor_state.monitor.get_counterparty_node_id();
811                                         pending_monitor_events.push((monitor_outpoint, monitor_events, counterparty_node_id));
812                                 }
813                         }
814                 }
815                 pending_monitor_events
816         }
817 }
818
819 impl<ChannelSigner: WriteableEcdsaChannelSigner, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref> events::EventsProvider for ChainMonitor<ChannelSigner, C, T, F, L, P>
820         where C::Target: chain::Filter,
821               T::Target: BroadcasterInterface,
822               F::Target: FeeEstimator,
823               L::Target: Logger,
824               P::Target: Persist<ChannelSigner>,
825 {
826         /// Processes [`SpendableOutputs`] events produced from each [`ChannelMonitor`] upon maturity.
827         ///
828         /// For channels featuring anchor outputs, this method will also process [`BumpTransaction`]
829         /// events produced from each [`ChannelMonitor`] while there is a balance to claim onchain
830         /// within each channel. As the confirmation of a commitment transaction may be critical to the
831         /// safety of funds, we recommend invoking this every 30 seconds, or lower if running in an
832         /// environment with spotty connections, like on mobile.
833         ///
834         /// An [`EventHandler`] may safely call back to the provider, though this shouldn't be needed in
835         /// order to handle these events.
836         ///
837         /// [`SpendableOutputs`]: events::Event::SpendableOutputs
838         /// [`BumpTransaction`]: events::Event::BumpTransaction
839         fn process_pending_events<H: Deref>(&self, handler: H) where H::Target: EventHandler {
840                 for monitor_state in self.monitors.read().unwrap().values() {
841                         monitor_state.monitor.process_pending_events(&handler);
842                 }
843         }
844 }
845
846 #[cfg(test)]
847 mod tests {
848         use crate::check_added_monitors;
849         use crate::{expect_payment_claimed, expect_payment_path_successful, get_event_msg};
850         use crate::{get_htlc_update_msgs, get_local_commitment_txn, get_revoke_commit_msgs, get_route_and_payment_hash, unwrap_send_err};
851         use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Watch};
852         use crate::chain::channelmonitor::LATENCY_GRACE_PERIOD_BLOCKS;
853         use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider};
854         use crate::ln::channelmanager::{PaymentSendFailure, PaymentId, RecipientOnionFields};
855         use crate::ln::functional_test_utils::*;
856         use crate::ln::msgs::ChannelMessageHandler;
857         use crate::util::errors::APIError;
858
859         #[test]
860         fn test_async_ooo_offchain_updates() {
861                 // Test that if we have multiple offchain updates being persisted and they complete
862                 // out-of-order, the ChainMonitor waits until all have completed before informing the
863                 // ChannelManager.
864                 let chanmon_cfgs = create_chanmon_cfgs(2);
865                 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
866                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
867                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
868                 create_announced_chan_between_nodes(&nodes, 0, 1);
869
870                 // Route two payments to be claimed at the same time.
871                 let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
872                 let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
873
874                 chanmon_cfgs[1].persister.offchain_monitor_updates.lock().unwrap().clear();
875                 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
876                 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
877
878                 nodes[1].node.claim_funds(payment_preimage_1);
879                 check_added_monitors!(nodes[1], 1);
880                 nodes[1].node.claim_funds(payment_preimage_2);
881                 check_added_monitors!(nodes[1], 1);
882
883                 let persistences = chanmon_cfgs[1].persister.offchain_monitor_updates.lock().unwrap().clone();
884                 assert_eq!(persistences.len(), 1);
885                 let (funding_txo, updates) = persistences.iter().next().unwrap();
886                 assert_eq!(updates.len(), 2);
887
888                 // Note that updates is a HashMap so the ordering here is actually random. This shouldn't
889                 // fail either way but if it fails intermittently it's depending on the ordering of updates.
890                 let mut update_iter = updates.iter();
891                 let next_update = update_iter.next().unwrap().clone();
892                 // Should contain next_update when pending updates listed.
893                 #[cfg(not(c_bindings))]
894                 assert!(nodes[1].chain_monitor.chain_monitor.list_pending_monitor_updates().get(funding_txo)
895                         .unwrap().contains(&next_update));
896                 #[cfg(c_bindings)]
897                 assert!(nodes[1].chain_monitor.chain_monitor.list_pending_monitor_updates().iter()
898                         .find(|(txo, _)| txo == funding_txo).unwrap().1.contains(&next_update));
899                 nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(*funding_txo, next_update.clone()).unwrap();
900                 // Should not contain the previously pending next_update when pending updates listed.
901                 #[cfg(not(c_bindings))]
902                 assert!(!nodes[1].chain_monitor.chain_monitor.list_pending_monitor_updates().get(funding_txo)
903                         .unwrap().contains(&next_update));
904                 #[cfg(c_bindings)]
905                 assert!(!nodes[1].chain_monitor.chain_monitor.list_pending_monitor_updates().iter()
906                         .find(|(txo, _)| txo == funding_txo).unwrap().1.contains(&next_update));
907                 assert!(nodes[1].chain_monitor.release_pending_monitor_events().is_empty());
908                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
909                 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
910                 nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(*funding_txo, update_iter.next().unwrap().clone()).unwrap();
911
912                 let claim_events = nodes[1].node.get_and_clear_pending_events();
913                 assert_eq!(claim_events.len(), 2);
914                 match claim_events[0] {
915                         Event::PaymentClaimed { ref payment_hash, amount_msat: 1_000_000, .. } => {
916                                 assert_eq!(payment_hash_1, *payment_hash);
917                         },
918                         _ => panic!("Unexpected event"),
919                 }
920                 match claim_events[1] {
921                         Event::PaymentClaimed { ref payment_hash, amount_msat: 1_000_000, .. } => {
922                                 assert_eq!(payment_hash_2, *payment_hash);
923                         },
924                         _ => panic!("Unexpected event"),
925                 }
926
927                 // Now manually walk the commitment signed dance - because we claimed two payments
928                 // back-to-back it doesn't fit into the neat walk commitment_signed_dance does.
929
930                 let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
931                 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
932                 expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false);
933                 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
934                 check_added_monitors!(nodes[0], 1);
935                 let (as_first_raa, as_first_update) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
936
937                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_first_raa);
938                 check_added_monitors!(nodes[1], 1);
939                 let bs_second_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
940                 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_first_update);
941                 check_added_monitors!(nodes[1], 1);
942                 let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
943
944                 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_second_updates.update_fulfill_htlcs[0]);
945                 expect_payment_sent(&nodes[0], payment_preimage_2, None, false, false);
946                 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_updates.commitment_signed);
947                 check_added_monitors!(nodes[0], 1);
948                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa);
949                 expect_payment_path_successful!(nodes[0]);
950                 check_added_monitors!(nodes[0], 1);
951                 let (as_second_raa, as_second_update) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
952
953                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
954                 check_added_monitors!(nodes[1], 1);
955                 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_update);
956                 check_added_monitors!(nodes[1], 1);
957                 let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
958
959                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa);
960                 expect_payment_path_successful!(nodes[0]);
961                 check_added_monitors!(nodes[0], 1);
962         }
963
964         fn do_chainsync_pauses_events(block_timeout: bool) {
965                 // When a chainsync monitor update occurs, any MonitorUpdates should be held before being
966                 // passed upstream to a `ChannelManager` via `Watch::release_pending_monitor_events`. This
967                 // tests that behavior, as well as some ways it might go wrong.
968                 let chanmon_cfgs = create_chanmon_cfgs(2);
969                 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
970                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
971                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
972                 let channel = create_announced_chan_between_nodes(&nodes, 0, 1);
973
974                 // Get a route for later and rebalance the channel somewhat
975                 send_payment(&nodes[0], &[&nodes[1]], 10_000_000);
976                 let (route, second_payment_hash, _, second_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
977
978                 // First route a payment that we will claim on chain and give the recipient the preimage.
979                 let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
980                 nodes[1].node.claim_funds(payment_preimage);
981                 expect_payment_claimed!(nodes[1], payment_hash, 1_000_000);
982                 nodes[1].node.get_and_clear_pending_msg_events();
983                 check_added_monitors!(nodes[1], 1);
984                 let remote_txn = get_local_commitment_txn!(nodes[1], channel.2);
985                 assert_eq!(remote_txn.len(), 2);
986
987                 // Temp-fail the block connection which will hold the channel-closed event
988                 chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
989                 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
990
991                 // Connect B's commitment transaction, but only to the ChainMonitor/ChannelMonitor. The
992                 // channel is now closed, but the ChannelManager doesn't know that yet.
993                 let new_header = create_dummy_header(nodes[0].best_block_info().0, 0);
994                 nodes[0].chain_monitor.chain_monitor.transactions_confirmed(&new_header,
995                         &[(0, &remote_txn[0]), (1, &remote_txn[1])], nodes[0].best_block_info().1 + 1);
996                 assert!(nodes[0].chain_monitor.release_pending_monitor_events().is_empty());
997                 nodes[0].chain_monitor.chain_monitor.best_block_updated(&new_header, nodes[0].best_block_info().1 + 1);
998                 assert!(nodes[0].chain_monitor.release_pending_monitor_events().is_empty());
999
1000                 // If the ChannelManager tries to update the channel, however, the ChainMonitor will pass
1001                 // the update through to the ChannelMonitor which will refuse it (as the channel is closed).
1002                 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1003                 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, second_payment_hash,
1004                                 RecipientOnionFields::secret_only(second_payment_secret), PaymentId(second_payment_hash.0)
1005                         ), false, APIError::MonitorUpdateInProgress, {});
1006                 check_added_monitors!(nodes[0], 1);
1007
1008                 // However, as the ChainMonitor is still waiting for the original persistence to complete,
1009                 // it won't yet release the MonitorEvents.
1010                 assert!(nodes[0].chain_monitor.release_pending_monitor_events().is_empty());
1011
1012                 if block_timeout {
1013                         // After three blocks, pending MontiorEvents should be released either way.
1014                         let latest_header = create_dummy_header(nodes[0].best_block_info().0, 0);
1015                         nodes[0].chain_monitor.chain_monitor.best_block_updated(&latest_header, nodes[0].best_block_info().1 + LATENCY_GRACE_PERIOD_BLOCKS);
1016                 } else {
1017                         let persistences = chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clone();
1018                         for (funding_outpoint, update_ids) in persistences {
1019                                 for update_id in update_ids {
1020                                         nodes[0].chain_monitor.chain_monitor.channel_monitor_updated(funding_outpoint, update_id).unwrap();
1021                                 }
1022                         }
1023                 }
1024
1025                 expect_payment_sent(&nodes[0], payment_preimage, None, true, false);
1026         }
1027
1028         #[test]
1029         fn chainsync_pauses_events() {
1030                 do_chainsync_pauses_events(false);
1031                 do_chainsync_pauses_events(true);
1032         }
1033
1034         #[test]
1035         #[cfg(feature = "std")]
1036         fn update_during_chainsync_poisons_channel() {
1037                 let chanmon_cfgs = create_chanmon_cfgs(2);
1038                 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1039                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1040                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1041                 create_announced_chan_between_nodes(&nodes, 0, 1);
1042
1043                 chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
1044                 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::UnrecoverableError);
1045
1046                 assert!(std::panic::catch_unwind(|| {
1047                         // Returning an UnrecoverableError should always panic immediately
1048                         connect_blocks(&nodes[0], 1);
1049                 }).is_err());
1050                 assert!(std::panic::catch_unwind(|| {
1051                         // ...and also poison our locks causing later use to panic as well
1052                         core::mem::drop(nodes);
1053                 }).is_err());
1054         }
1055 }