1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 //! Logic to connect off-chain channel management with on-chain transaction monitoring.
12 //! [`ChainMonitor`] is an implementation of [`chain::Watch`] used both to process blocks and to
13 //! update [`ChannelMonitor`]s accordingly. If any on-chain events need further processing, it will
14 //! make those available as [`MonitorEvent`]s to be consumed.
16 //! [`ChainMonitor`] is parameterized by an optional chain source, which must implement the
17 //! [`chain::Filter`] trait. This provides a mechanism to signal new relevant outputs back to light
18 //! clients, such that transactions spending those outputs are included in block data.
20 //! [`ChainMonitor`] may be used directly to monitor channels locally or as a part of a distributed
21 //! setup to monitor channels remotely. In the latter case, a custom [`chain::Watch`] implementation
22 //! would be responsible for routing each update to a remote server and for retrieving monitor
23 //! events. The remote server would make use of [`ChainMonitor`] for block processing and for
24 //! servicing [`ChannelMonitor`] updates from the client.
26 use bitcoin::blockdata::block::BlockHeader;
27 use bitcoin::hash_types::{Txid, BlockHash};
30 use crate::chain::{ChannelMonitorUpdateStatus, Filter, WatchedOutput};
31 use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
32 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, Balance, MonitorEvent, TransactionOutputs, LATENCY_GRACE_PERIOD_BLOCKS};
33 use crate::chain::transaction::{OutPoint, TransactionData};
34 use crate::sign::WriteableEcdsaChannelSigner;
36 use crate::events::{Event, EventHandler};
37 use crate::util::atomic_counter::AtomicCounter;
38 use crate::util::logger::Logger;
39 use crate::util::errors::APIError;
40 use crate::util::wakers::{Future, Notifier};
41 use crate::ln::channelmanager::ChannelDetails;
43 use crate::prelude::*;
44 use crate::sync::{RwLock, RwLockReadGuard, Mutex, MutexGuard};
45 use core::iter::FromIterator;
47 use core::sync::atomic::{AtomicUsize, Ordering};
48 use bitcoin::secp256k1::PublicKey;
50 #[derive(Clone, Copy, Hash, PartialEq, Eq)]
51 /// A specific update's ID stored in a `MonitorUpdateId`, separated out to make the contents
54 /// An update that was generated by the `ChannelManager` (via our `chain::Watch`
55 /// implementation). This corresponds to an actual [`ChannelMonitorUpdate::update_id`] field
56 /// and [`ChannelMonitor::get_latest_update_id`].
58 /// An update that was generated during blockchain processing. The ID here is specific to the
59 /// generating [`ChainMonitor`] and does *not* correspond to any on-disk IDs.
63 /// An opaque identifier describing a specific [`Persist`] method call.
64 #[derive(Clone, Copy, Hash, PartialEq, Eq)]
65 pub struct MonitorUpdateId {
66 contents: UpdateOrigin,
69 impl MonitorUpdateId {
70 pub(crate) fn from_monitor_update(update: &ChannelMonitorUpdate) -> Self {
71 Self { contents: UpdateOrigin::OffChain(update.update_id) }
73 pub(crate) fn from_new_monitor<ChannelSigner: WriteableEcdsaChannelSigner>(monitor: &ChannelMonitor<ChannelSigner>) -> Self {
74 Self { contents: UpdateOrigin::OffChain(monitor.get_latest_update_id()) }
78 /// `Persist` defines behavior for persisting channel monitors: this could mean
79 /// writing once to disk, and/or uploading to one or more backup services.
81 /// Each method can return two possible values:
82 /// * If persistence (including any relevant `fsync()` calls) happens immediately, the
83 /// implementation should return [`ChannelMonitorUpdateStatus::Completed`], indicating normal
84 /// channel operation should continue.
86 /// * If persistence happens asynchronously, implementations can return
87 /// [`ChannelMonitorUpdateStatus::InProgress`] while the update continues in the background.
88 /// Once the update completes, [`ChainMonitor::channel_monitor_updated`] should be called with
89 /// the corresponding [`MonitorUpdateId`].
91 /// Note that unlike the direct [`chain::Watch`] interface,
92 /// [`ChainMonitor::channel_monitor_updated`] must be called once for *each* update which occurs.
94 /// If persistence fails for some reason, implementations should still return
95 /// [`ChannelMonitorUpdateStatus::InProgress`] and attempt to shut down or otherwise resolve the
98 /// Third-party watchtowers may be built as a part of an implementation of this trait, with the
99 /// advantage that you can control whether to resume channel operation depending on if an update
100 /// has been persisted to a watchtower. For this, you may find the following methods useful:
101 /// [`ChannelMonitor::initial_counterparty_commitment_tx`],
102 /// [`ChannelMonitor::counterparty_commitment_txs_from_update`],
103 /// [`ChannelMonitor::sign_to_local_justice_tx`], [`TrustedCommitmentTransaction::revokeable_output_index`],
104 /// [`TrustedCommitmentTransaction::build_to_local_justice_tx`].
106 /// [`TrustedCommitmentTransaction::revokeable_output_index`]: crate::ln::chan_utils::TrustedCommitmentTransaction::revokeable_output_index
107 /// [`TrustedCommitmentTransaction::build_to_local_justice_tx`]: crate::ln::chan_utils::TrustedCommitmentTransaction::build_to_local_justice_tx
108 pub trait Persist<ChannelSigner: WriteableEcdsaChannelSigner> {
109 /// Persist a new channel's data in response to a [`chain::Watch::watch_channel`] call. This is
110 /// called by [`ChannelManager`] for new channels, or may be called directly, e.g. on startup.
112 /// The data can be stored any way you want, but the identifier provided by LDK is the
113 /// channel's outpoint (and it is up to you to maintain a correct mapping between the outpoint
114 /// and the stored channel data). Note that you **must** persist every new monitor to disk.
116 /// The `update_id` is used to identify this call to [`ChainMonitor::channel_monitor_updated`],
117 /// if you return [`ChannelMonitorUpdateStatus::InProgress`].
119 /// See [`Writeable::write`] on [`ChannelMonitor`] for writing out a `ChannelMonitor`
120 /// and [`ChannelMonitorUpdateStatus`] for requirements when returning errors.
122 /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
123 /// [`Writeable::write`]: crate::util::ser::Writeable::write
124 fn persist_new_channel(&self, channel_id: OutPoint, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> ChannelMonitorUpdateStatus;
126 /// Update one channel's data. The provided [`ChannelMonitor`] has already applied the given
129 /// Note that on every update, you **must** persist either the [`ChannelMonitorUpdate`] or the
130 /// updated monitor itself to disk/backups. See the [`Persist`] trait documentation for more
133 /// During blockchain synchronization operations, this may be called with no
134 /// [`ChannelMonitorUpdate`], in which case the full [`ChannelMonitor`] needs to be persisted.
135 /// Note that after the full [`ChannelMonitor`] is persisted any previous
136 /// [`ChannelMonitorUpdate`]s which were persisted should be discarded - they can no longer be
137 /// applied to the persisted [`ChannelMonitor`] as they were already applied.
139 /// If an implementer chooses to persist the updates only, they need to make
140 /// sure that all the updates are applied to the `ChannelMonitors` *before*
141 /// the set of channel monitors is given to the `ChannelManager`
142 /// deserialization routine. See [`ChannelMonitor::update_monitor`] for
143 /// applying a monitor update to a monitor. If full `ChannelMonitors` are
144 /// persisted, then there is no need to persist individual updates.
146 /// Note that there could be a performance tradeoff between persisting complete
147 /// channel monitors on every update vs. persisting only updates and applying
148 /// them in batches. The size of each monitor grows `O(number of state updates)`
149 /// whereas updates are small and `O(1)`.
151 /// The `update_id` is used to identify this call to [`ChainMonitor::channel_monitor_updated`],
152 /// if you return [`ChannelMonitorUpdateStatus::InProgress`].
154 /// See [`Writeable::write`] on [`ChannelMonitor`] for writing out a `ChannelMonitor`,
155 /// [`Writeable::write`] on [`ChannelMonitorUpdate`] for writing out an update, and
156 /// [`ChannelMonitorUpdateStatus`] for requirements when returning errors.
158 /// [`Writeable::write`]: crate::util::ser::Writeable::write
159 fn update_persisted_channel(&self, channel_id: OutPoint, update: Option<&ChannelMonitorUpdate>, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> ChannelMonitorUpdateStatus;
162 struct MonitorHolder<ChannelSigner: WriteableEcdsaChannelSigner> {
163 monitor: ChannelMonitor<ChannelSigner>,
164 /// The full set of pending monitor updates for this Channel.
166 /// Note that this lock must be held during updates to prevent a race where we call
167 /// update_persisted_channel, the user returns a
168 /// [`ChannelMonitorUpdateStatus::InProgress`], and then calls channel_monitor_updated
169 /// immediately, racing our insertion of the pending update into the contained Vec.
171 /// Beyond the synchronization of updates themselves, we cannot handle user events until after
172 /// any chain updates have been stored on disk. Thus, we scan this list when returning updates
173 /// to the ChannelManager, refusing to return any updates for a ChannelMonitor which is still
174 /// being persisted fully to disk after a chain update.
176 /// This avoids the possibility of handling, e.g. an on-chain claim, generating a claim monitor
177 /// event, resulting in the relevant ChannelManager generating a PaymentSent event and dropping
178 /// the pending payment entry, and then reloading before the monitor is persisted, resulting in
179 /// the ChannelManager re-adding the same payment entry, before the same block is replayed,
180 /// resulting in a duplicate PaymentSent event.
181 pending_monitor_updates: Mutex<Vec<MonitorUpdateId>>,
182 /// The last block height at which no [`UpdateOrigin::ChainSync`] monitor updates were present
183 /// in `pending_monitor_updates`.
184 /// If it's been more than [`LATENCY_GRACE_PERIOD_BLOCKS`] since we started waiting on a chain
185 /// sync event, we let monitor events return to `ChannelManager` because we cannot hold them up
186 /// forever or we'll end up with HTLC preimages waiting to feed back into an upstream channel
187 /// forever, risking funds loss.
188 last_chain_persist_height: AtomicUsize,
191 impl<ChannelSigner: WriteableEcdsaChannelSigner> MonitorHolder<ChannelSigner> {
192 fn has_pending_offchain_updates(&self, pending_monitor_updates_lock: &MutexGuard<Vec<MonitorUpdateId>>) -> bool {
193 pending_monitor_updates_lock.iter().any(|update_id|
194 if let UpdateOrigin::OffChain(_) = update_id.contents { true } else { false })
196 fn has_pending_chainsync_updates(&self, pending_monitor_updates_lock: &MutexGuard<Vec<MonitorUpdateId>>) -> bool {
197 pending_monitor_updates_lock.iter().any(|update_id|
198 if let UpdateOrigin::ChainSync(_) = update_id.contents { true } else { false })
202 /// A read-only reference to a current ChannelMonitor.
204 /// Note that this holds a mutex in [`ChainMonitor`] and may block other events until it is
206 pub struct LockedChannelMonitor<'a, ChannelSigner: WriteableEcdsaChannelSigner> {
207 lock: RwLockReadGuard<'a, HashMap<OutPoint, MonitorHolder<ChannelSigner>>>,
208 funding_txo: OutPoint,
211 impl<ChannelSigner: WriteableEcdsaChannelSigner> Deref for LockedChannelMonitor<'_, ChannelSigner> {
212 type Target = ChannelMonitor<ChannelSigner>;
213 fn deref(&self) -> &ChannelMonitor<ChannelSigner> {
214 &self.lock.get(&self.funding_txo).expect("Checked at construction").monitor
218 /// An implementation of [`chain::Watch`] for monitoring channels.
220 /// Connected and disconnected blocks must be provided to `ChainMonitor` as documented by
221 /// [`chain::Watch`]. May be used in conjunction with [`ChannelManager`] to monitor channels locally
222 /// or used independently to monitor channels remotely. See the [module-level documentation] for
225 /// Note that `ChainMonitor` should regularly trigger rebroadcasts/fee bumps of pending claims from
226 /// a force-closed channel. This is crucial in preventing certain classes of pinning attacks,
227 /// detecting substantial mempool feerate changes between blocks, and ensuring reliability if
228 /// broadcasting fails. We recommend invoking this every 30 seconds, or lower if running in an
229 /// environment with spotty connections, like on mobile.
231 /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
232 /// [module-level documentation]: crate::chain::chainmonitor
233 /// [`rebroadcast_pending_claims`]: Self::rebroadcast_pending_claims
234 pub struct ChainMonitor<ChannelSigner: WriteableEcdsaChannelSigner, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref>
235 where C::Target: chain::Filter,
236 T::Target: BroadcasterInterface,
237 F::Target: FeeEstimator,
239 P::Target: Persist<ChannelSigner>,
241 monitors: RwLock<HashMap<OutPoint, MonitorHolder<ChannelSigner>>>,
242 /// When we generate a [`MonitorUpdateId`] for a chain-event monitor persistence, we need a
243 /// unique ID, which we calculate by simply getting the next value from this counter. Note that
244 /// the ID is never persisted so it's ok that they reset on restart.
245 sync_persistence_id: AtomicCounter,
246 chain_source: Option<C>,
251 /// "User-provided" (ie persistence-completion/-failed) [`MonitorEvent`]s. These came directly
252 /// from the user and not from a [`ChannelMonitor`].
253 pending_monitor_events: Mutex<Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)>>,
254 /// The best block height seen, used as a proxy for the passage of time.
255 highest_chain_height: AtomicUsize,
257 event_notifier: Notifier,
260 impl<ChannelSigner: WriteableEcdsaChannelSigner, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref> ChainMonitor<ChannelSigner, C, T, F, L, P>
261 where C::Target: chain::Filter,
262 T::Target: BroadcasterInterface,
263 F::Target: FeeEstimator,
265 P::Target: Persist<ChannelSigner>,
267 /// Dispatches to per-channel monitors, which are responsible for updating their on-chain view
268 /// of a channel and reacting accordingly based on transactions in the given chain data. See
269 /// [`ChannelMonitor::block_connected`] for details. Any HTLCs that were resolved on chain will
270 /// be returned by [`chain::Watch::release_pending_monitor_events`].
272 /// Calls back to [`chain::Filter`] if any monitor indicated new outputs to watch. Subsequent
273 /// calls must not exclude any transactions matching the new outputs nor any in-block
274 /// descendants of such transactions. It is not necessary to re-fetch the block to obtain
275 /// updated `txdata`.
277 /// Calls which represent a new blockchain tip height should set `best_height`.
278 fn process_chain_data<FN>(&self, header: &BlockHeader, best_height: Option<u32>, txdata: &TransactionData, process: FN)
280 FN: Fn(&ChannelMonitor<ChannelSigner>, &TransactionData) -> Vec<TransactionOutputs>
282 let funding_outpoints: HashSet<OutPoint> = HashSet::from_iter(self.monitors.read().unwrap().keys().cloned());
283 for funding_outpoint in funding_outpoints.iter() {
284 let monitor_lock = self.monitors.read().unwrap();
285 if let Some(monitor_state) = monitor_lock.get(funding_outpoint) {
286 self.update_monitor_with_chain_data(header, best_height, txdata, &process, funding_outpoint, &monitor_state);
290 // do some followup cleanup if any funding outpoints were added in between iterations
291 let monitor_states = self.monitors.write().unwrap();
292 for (funding_outpoint, monitor_state) in monitor_states.iter() {
293 if !funding_outpoints.contains(funding_outpoint) {
294 self.update_monitor_with_chain_data(header, best_height, txdata, &process, funding_outpoint, &monitor_state);
298 if let Some(height) = best_height {
299 // If the best block height is being updated, update highest_chain_height under the
300 // monitors write lock.
301 let old_height = self.highest_chain_height.load(Ordering::Acquire);
302 let new_height = height as usize;
303 if new_height > old_height {
304 self.highest_chain_height.store(new_height, Ordering::Release);
309 fn update_monitor_with_chain_data<FN>(&self, header: &BlockHeader, best_height: Option<u32>, txdata: &TransactionData, process: FN, funding_outpoint: &OutPoint, monitor_state: &MonitorHolder<ChannelSigner>) where FN: Fn(&ChannelMonitor<ChannelSigner>, &TransactionData) -> Vec<TransactionOutputs> {
310 let monitor = &monitor_state.monitor;
313 txn_outputs = process(monitor, txdata);
314 let update_id = MonitorUpdateId {
315 contents: UpdateOrigin::ChainSync(self.sync_persistence_id.get_increment()),
317 let mut pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap();
318 if let Some(height) = best_height {
319 if !monitor_state.has_pending_chainsync_updates(&pending_monitor_updates) {
320 // If there are not ChainSync persists awaiting completion, go ahead and
321 // set last_chain_persist_height here - we wouldn't want the first
322 // InProgress to always immediately be considered "overly delayed".
323 monitor_state.last_chain_persist_height.store(height as usize, Ordering::Release);
327 log_trace!(self.logger, "Syncing Channel Monitor for channel {}", log_funding_info!(monitor));
328 match self.persister.update_persisted_channel(*funding_outpoint, None, monitor, update_id) {
329 ChannelMonitorUpdateStatus::Completed =>
330 log_trace!(self.logger, "Finished syncing Channel Monitor for channel {}", log_funding_info!(monitor)),
331 ChannelMonitorUpdateStatus::InProgress => {
332 log_debug!(self.logger, "Channel Monitor sync for channel {} in progress, holding events until completion!", log_funding_info!(monitor));
333 pending_monitor_updates.push(update_id);
338 // Register any new outputs with the chain source for filtering, storing any dependent
339 // transactions from within the block that previously had not been included in txdata.
340 if let Some(ref chain_source) = self.chain_source {
341 let block_hash = header.block_hash();
342 for (txid, mut outputs) in txn_outputs.drain(..) {
343 for (idx, output) in outputs.drain(..) {
344 // Register any new outputs with the chain source for filtering
345 let output = WatchedOutput {
346 block_hash: Some(block_hash),
347 outpoint: OutPoint { txid, index: idx as u16 },
348 script_pubkey: output.script_pubkey,
350 chain_source.register_output(output)
356 /// Creates a new `ChainMonitor` used to watch on-chain activity pertaining to channels.
358 /// When an optional chain source implementing [`chain::Filter`] is provided, the chain monitor
359 /// will call back to it indicating transactions and outputs of interest. This allows clients to
360 /// pre-filter blocks or only fetch blocks matching a compact filter. Otherwise, clients may
361 /// always need to fetch full blocks absent another means for determining which blocks contain
362 /// transactions relevant to the watched channels.
363 pub fn new(chain_source: Option<C>, broadcaster: T, logger: L, feeest: F, persister: P) -> Self {
365 monitors: RwLock::new(HashMap::new()),
366 sync_persistence_id: AtomicCounter::new(),
370 fee_estimator: feeest,
372 pending_monitor_events: Mutex::new(Vec::new()),
373 highest_chain_height: AtomicUsize::new(0),
374 event_notifier: Notifier::new(),
378 /// Gets the balances in the contained [`ChannelMonitor`]s which are claimable on-chain or
379 /// claims which are awaiting confirmation.
381 /// Includes the balances from each [`ChannelMonitor`] *except* those included in
382 /// `ignored_channels`.
384 /// See [`ChannelMonitor::get_claimable_balances`] for more details on the exact criteria for
385 /// inclusion in the return value.
386 pub fn get_claimable_balances(&self, ignored_channels: &[&ChannelDetails]) -> Vec<Balance> {
387 let mut ret = Vec::new();
388 let monitor_states = self.monitors.read().unwrap();
389 for (_, monitor_state) in monitor_states.iter().filter(|(funding_outpoint, _)| {
390 for chan in ignored_channels {
391 if chan.funding_txo.as_ref() == Some(funding_outpoint) {
397 ret.append(&mut monitor_state.monitor.get_claimable_balances());
402 /// Gets the [`LockedChannelMonitor`] for a given funding outpoint, returning an `Err` if no
403 /// such [`ChannelMonitor`] is currently being monitored for.
405 /// Note that the result holds a mutex over our monitor set, and should not be held
407 pub fn get_monitor(&self, funding_txo: OutPoint) -> Result<LockedChannelMonitor<'_, ChannelSigner>, ()> {
408 let lock = self.monitors.read().unwrap();
409 if lock.get(&funding_txo).is_some() {
410 Ok(LockedChannelMonitor { lock, funding_txo })
416 /// Lists the funding outpoint of each [`ChannelMonitor`] being monitored.
418 /// Note that [`ChannelMonitor`]s are not removed when a channel is closed as they are always
419 /// monitoring for on-chain state resolutions.
420 pub fn list_monitors(&self) -> Vec<OutPoint> {
421 self.monitors.read().unwrap().keys().map(|outpoint| *outpoint).collect()
424 #[cfg(not(c_bindings))]
425 /// Lists the pending updates for each [`ChannelMonitor`] (by `OutPoint` being monitored).
426 pub fn list_pending_monitor_updates(&self) -> HashMap<OutPoint, Vec<MonitorUpdateId>> {
427 self.monitors.read().unwrap().iter().map(|(outpoint, holder)| {
428 (*outpoint, holder.pending_monitor_updates.lock().unwrap().clone())
433 /// Lists the pending updates for each [`ChannelMonitor`] (by `OutPoint` being monitored).
434 pub fn list_pending_monitor_updates(&self) -> Vec<(OutPoint, Vec<MonitorUpdateId>)> {
435 self.monitors.read().unwrap().iter().map(|(outpoint, holder)| {
436 (*outpoint, holder.pending_monitor_updates.lock().unwrap().clone())
442 pub fn remove_monitor(&self, funding_txo: &OutPoint) -> ChannelMonitor<ChannelSigner> {
443 self.monitors.write().unwrap().remove(funding_txo).unwrap().monitor
446 /// Indicates the persistence of a [`ChannelMonitor`] has completed after
447 /// [`ChannelMonitorUpdateStatus::InProgress`] was returned from an update operation.
449 /// Thus, the anticipated use is, at a high level:
450 /// 1) This [`ChainMonitor`] calls [`Persist::update_persisted_channel`] which stores the
451 /// update to disk and begins updating any remote (e.g. watchtower/backup) copies,
452 /// returning [`ChannelMonitorUpdateStatus::InProgress`],
453 /// 2) once all remote copies are updated, you call this function with the
454 /// `completed_update_id` that completed, and once all pending updates have completed the
455 /// channel will be re-enabled.
456 // Note that we re-enable only after `UpdateOrigin::OffChain` updates complete, we don't
457 // care about `UpdateOrigin::ChainSync` updates for the channel state being updated. We
458 // only care about `UpdateOrigin::ChainSync` for returning `MonitorEvent`s.
460 /// Returns an [`APIError::APIMisuseError`] if `funding_txo` does not match any currently
461 /// registered [`ChannelMonitor`]s.
462 pub fn channel_monitor_updated(&self, funding_txo: OutPoint, completed_update_id: MonitorUpdateId) -> Result<(), APIError> {
463 let monitors = self.monitors.read().unwrap();
464 let monitor_data = if let Some(mon) = monitors.get(&funding_txo) { mon } else {
465 return Err(APIError::APIMisuseError { err: format!("No ChannelMonitor matching funding outpoint {:?} found", funding_txo) });
467 let mut pending_monitor_updates = monitor_data.pending_monitor_updates.lock().unwrap();
468 pending_monitor_updates.retain(|update_id| *update_id != completed_update_id);
470 match completed_update_id {
471 MonitorUpdateId { contents: UpdateOrigin::OffChain(_) } => {
472 // Note that we only check for `UpdateOrigin::OffChain` failures here - if
473 // we're being told that a `UpdateOrigin::OffChain` monitor update completed,
474 // we only care about ensuring we don't tell the `ChannelManager` to restore
475 // the channel to normal operation until all `UpdateOrigin::OffChain` updates
477 // If there's some `UpdateOrigin::ChainSync` update still pending that's okay
478 // - we can still update our channel state, just as long as we don't return
479 // `MonitorEvent`s from the monitor back to the `ChannelManager` until they
481 let monitor_is_pending_updates = monitor_data.has_pending_offchain_updates(&pending_monitor_updates);
482 if monitor_is_pending_updates {
483 // If there are still monitor updates pending, we cannot yet construct a
487 self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::Completed {
489 monitor_update_id: monitor_data.monitor.get_latest_update_id(),
490 }], monitor_data.monitor.get_counterparty_node_id()));
492 MonitorUpdateId { contents: UpdateOrigin::ChainSync(_) } => {
493 if !monitor_data.has_pending_chainsync_updates(&pending_monitor_updates) {
494 monitor_data.last_chain_persist_height.store(self.highest_chain_height.load(Ordering::Acquire), Ordering::Release);
495 // The next time release_pending_monitor_events is called, any events for this
496 // ChannelMonitor will be returned.
500 self.event_notifier.notify();
504 /// This wrapper avoids having to update some of our tests for now as they assume the direct
505 /// chain::Watch API wherein we mark a monitor fully-updated by just calling
506 /// channel_monitor_updated once with the highest ID.
507 #[cfg(any(test, fuzzing))]
508 pub fn force_channel_monitor_updated(&self, funding_txo: OutPoint, monitor_update_id: u64) {
509 let monitors = self.monitors.read().unwrap();
510 let counterparty_node_id = monitors.get(&funding_txo).and_then(|m| m.monitor.get_counterparty_node_id());
511 self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::Completed {
514 }], counterparty_node_id));
515 self.event_notifier.notify();
518 #[cfg(any(test, feature = "_test_utils"))]
519 pub fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
520 use crate::events::EventsProvider;
521 let events = core::cell::RefCell::new(Vec::new());
522 let event_handler = |event: events::Event| events.borrow_mut().push(event);
523 self.process_pending_events(&event_handler);
527 /// Processes any events asynchronously in the order they were generated since the last call
528 /// using the given event handler.
530 /// See the trait-level documentation of [`EventsProvider`] for requirements.
532 /// [`EventsProvider`]: crate::events::EventsProvider
533 pub async fn process_pending_events_async<Future: core::future::Future, H: Fn(Event) -> Future>(
536 // Sadly we can't hold the monitors read lock through an async call. Thus we have to do a
537 // crazy dance to process a monitor's events then only remove them once we've done so.
538 let mons_to_process = self.monitors.read().unwrap().keys().cloned().collect::<Vec<_>>();
539 for funding_txo in mons_to_process {
541 super::channelmonitor::process_events_body!(
542 self.monitors.read().unwrap().get(&funding_txo).map(|m| &m.monitor), ev, handler(ev).await);
546 /// Gets a [`Future`] that completes when an event is available either via
547 /// [`chain::Watch::release_pending_monitor_events`] or
548 /// [`EventsProvider::process_pending_events`].
550 /// Note that callbacks registered on the [`Future`] MUST NOT call back into this
551 /// [`ChainMonitor`] and should instead register actions to be taken later.
553 /// [`EventsProvider::process_pending_events`]: crate::events::EventsProvider::process_pending_events
554 pub fn get_update_future(&self) -> Future {
555 self.event_notifier.get_future()
558 /// Triggers rebroadcasts/fee-bumps of pending claims from a force-closed channel. This is
559 /// crucial in preventing certain classes of pinning attacks, detecting substantial mempool
560 /// feerate changes between blocks, and ensuring reliability if broadcasting fails. We recommend
561 /// invoking this every 30 seconds, or lower if running in an environment with spotty
562 /// connections, like on mobile.
563 pub fn rebroadcast_pending_claims(&self) {
564 let monitors = self.monitors.read().unwrap();
565 for (_, monitor_holder) in &*monitors {
566 monitor_holder.monitor.rebroadcast_pending_claims(
567 &*self.broadcaster, &*self.fee_estimator, &*self.logger
573 impl<ChannelSigner: WriteableEcdsaChannelSigner, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref>
574 chain::Listen for ChainMonitor<ChannelSigner, C, T, F, L, P>
576 C::Target: chain::Filter,
577 T::Target: BroadcasterInterface,
578 F::Target: FeeEstimator,
580 P::Target: Persist<ChannelSigner>,
582 fn filtered_block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
583 log_debug!(self.logger, "New best block {} at height {} provided via block_connected", header.block_hash(), height);
584 self.process_chain_data(header, Some(height), &txdata, |monitor, txdata| {
585 monitor.block_connected(
586 header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger)
590 fn block_disconnected(&self, header: &BlockHeader, height: u32) {
591 let monitor_states = self.monitors.read().unwrap();
592 log_debug!(self.logger, "Latest block {} at height {} removed via block_disconnected", header.block_hash(), height);
593 for monitor_state in monitor_states.values() {
594 monitor_state.monitor.block_disconnected(
595 header, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger);
600 impl<ChannelSigner: WriteableEcdsaChannelSigner, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref>
601 chain::Confirm for ChainMonitor<ChannelSigner, C, T, F, L, P>
603 C::Target: chain::Filter,
604 T::Target: BroadcasterInterface,
605 F::Target: FeeEstimator,
607 P::Target: Persist<ChannelSigner>,
609 fn transactions_confirmed(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
610 log_debug!(self.logger, "{} provided transactions confirmed at height {} in block {}", txdata.len(), height, header.block_hash());
611 self.process_chain_data(header, None, txdata, |monitor, txdata| {
612 monitor.transactions_confirmed(
613 header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger)
617 fn transaction_unconfirmed(&self, txid: &Txid) {
618 log_debug!(self.logger, "Transaction {} reorganized out of chain", txid);
619 let monitor_states = self.monitors.read().unwrap();
620 for monitor_state in monitor_states.values() {
621 monitor_state.monitor.transaction_unconfirmed(txid, &*self.broadcaster, &*self.fee_estimator, &*self.logger);
625 fn best_block_updated(&self, header: &BlockHeader, height: u32) {
626 log_debug!(self.logger, "New best block {} at height {} provided via best_block_updated", header.block_hash(), height);
627 self.process_chain_data(header, Some(height), &[], |monitor, txdata| {
628 // While in practice there shouldn't be any recursive calls when given empty txdata,
629 // it's still possible if a chain::Filter implementation returns a transaction.
630 debug_assert!(txdata.is_empty());
631 monitor.best_block_updated(
632 header, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger)
636 fn get_relevant_txids(&self) -> Vec<(Txid, Option<BlockHash>)> {
637 let mut txids = Vec::new();
638 let monitor_states = self.monitors.read().unwrap();
639 for monitor_state in monitor_states.values() {
640 txids.append(&mut monitor_state.monitor.get_relevant_txids());
643 txids.sort_unstable();
649 impl<ChannelSigner: WriteableEcdsaChannelSigner, C: Deref , T: Deref , F: Deref , L: Deref , P: Deref >
650 chain::Watch<ChannelSigner> for ChainMonitor<ChannelSigner, C, T, F, L, P>
651 where C::Target: chain::Filter,
652 T::Target: BroadcasterInterface,
653 F::Target: FeeEstimator,
655 P::Target: Persist<ChannelSigner>,
657 /// Adds the monitor that watches the channel referred to by the given outpoint.
659 /// Calls back to [`chain::Filter`] with the funding transaction and outputs to watch.
661 /// Note that we persist the given `ChannelMonitor` while holding the `ChainMonitor`
663 fn watch_channel(&self, funding_outpoint: OutPoint, monitor: ChannelMonitor<ChannelSigner>) -> Result<ChannelMonitorUpdateStatus, ()> {
664 let mut monitors = self.monitors.write().unwrap();
665 let entry = match monitors.entry(funding_outpoint) {
666 hash_map::Entry::Occupied(_) => {
667 log_error!(self.logger, "Failed to add new channel data: channel monitor for given outpoint is already present");
670 hash_map::Entry::Vacant(e) => e,
672 log_trace!(self.logger, "Got new ChannelMonitor for channel {}", log_funding_info!(monitor));
673 let update_id = MonitorUpdateId::from_new_monitor(&monitor);
674 let mut pending_monitor_updates = Vec::new();
675 let persist_res = self.persister.persist_new_channel(funding_outpoint, &monitor, update_id);
677 ChannelMonitorUpdateStatus::InProgress => {
678 log_info!(self.logger, "Persistence of new ChannelMonitor for channel {} in progress", log_funding_info!(monitor));
679 pending_monitor_updates.push(update_id);
681 ChannelMonitorUpdateStatus::Completed => {
682 log_info!(self.logger, "Persistence of new ChannelMonitor for channel {} completed", log_funding_info!(monitor));
685 if let Some(ref chain_source) = self.chain_source {
686 monitor.load_outputs_to_watch(chain_source);
688 entry.insert(MonitorHolder {
690 pending_monitor_updates: Mutex::new(pending_monitor_updates),
691 last_chain_persist_height: AtomicUsize::new(self.highest_chain_height.load(Ordering::Acquire)),
696 /// Note that we persist the given `ChannelMonitor` update while holding the
697 /// `ChainMonitor` monitors lock.
698 fn update_channel(&self, funding_txo: OutPoint, update: &ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus {
699 // Update the monitor that watches the channel referred to by the given outpoint.
700 let monitors = self.monitors.read().unwrap();
701 match monitors.get(&funding_txo) {
703 log_error!(self.logger, "Failed to update channel monitor: no such monitor registered");
705 // We should never ever trigger this from within ChannelManager. Technically a
706 // user could use this object with some proxying in between which makes this
707 // possible, but in tests and fuzzing, this should be a panic.
708 #[cfg(debug_assertions)]
709 panic!("ChannelManager generated a channel update for a channel that was not yet registered!");
710 #[cfg(not(debug_assertions))]
711 ChannelMonitorUpdateStatus::InProgress
713 Some(monitor_state) => {
714 let monitor = &monitor_state.monitor;
715 log_trace!(self.logger, "Updating ChannelMonitor for channel {}", log_funding_info!(monitor));
716 let update_res = monitor.update_monitor(update, &self.broadcaster, &*self.fee_estimator, &self.logger);
717 if update_res.is_err() {
718 log_error!(self.logger, "Failed to update ChannelMonitor for channel {}.", log_funding_info!(monitor));
720 // Even if updating the monitor returns an error, the monitor's state will
721 // still be changed. So, persist the updated monitor despite the error.
722 let update_id = MonitorUpdateId::from_monitor_update(update);
723 let mut pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap();
724 let persist_res = self.persister.update_persisted_channel(funding_txo, Some(update), monitor, update_id);
726 ChannelMonitorUpdateStatus::InProgress => {
727 pending_monitor_updates.push(update_id);
728 log_debug!(self.logger, "Persistence of ChannelMonitorUpdate for channel {} in progress", log_funding_info!(monitor));
730 ChannelMonitorUpdateStatus::Completed => {
731 log_debug!(self.logger, "Persistence of ChannelMonitorUpdate for channel {} completed", log_funding_info!(monitor));
734 if update_res.is_err() {
735 ChannelMonitorUpdateStatus::InProgress
743 fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)> {
744 let mut pending_monitor_events = self.pending_monitor_events.lock().unwrap().split_off(0);
745 for monitor_state in self.monitors.read().unwrap().values() {
746 let is_pending_monitor_update = monitor_state.has_pending_chainsync_updates(&monitor_state.pending_monitor_updates.lock().unwrap());
747 if is_pending_monitor_update &&
748 monitor_state.last_chain_persist_height.load(Ordering::Acquire) + LATENCY_GRACE_PERIOD_BLOCKS as usize
749 > self.highest_chain_height.load(Ordering::Acquire)
751 log_debug!(self.logger, "A Channel Monitor sync is still in progress, refusing to provide monitor events!");
753 if is_pending_monitor_update {
754 log_error!(self.logger, "A ChannelMonitor sync took longer than {} blocks to complete.", LATENCY_GRACE_PERIOD_BLOCKS);
755 log_error!(self.logger, " To avoid funds-loss, we are allowing monitor updates to be released.");
756 log_error!(self.logger, " This may cause duplicate payment events to be generated.");
758 let monitor_events = monitor_state.monitor.get_and_clear_pending_monitor_events();
759 if monitor_events.len() > 0 {
760 let monitor_outpoint = monitor_state.monitor.get_funding_txo().0;
761 let counterparty_node_id = monitor_state.monitor.get_counterparty_node_id();
762 pending_monitor_events.push((monitor_outpoint, monitor_events, counterparty_node_id));
766 pending_monitor_events
770 impl<ChannelSigner: WriteableEcdsaChannelSigner, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref> events::EventsProvider for ChainMonitor<ChannelSigner, C, T, F, L, P>
771 where C::Target: chain::Filter,
772 T::Target: BroadcasterInterface,
773 F::Target: FeeEstimator,
775 P::Target: Persist<ChannelSigner>,
777 /// Processes [`SpendableOutputs`] events produced from each [`ChannelMonitor`] upon maturity.
779 /// For channels featuring anchor outputs, this method will also process [`BumpTransaction`]
780 /// events produced from each [`ChannelMonitor`] while there is a balance to claim onchain
781 /// within each channel. As the confirmation of a commitment transaction may be critical to the
782 /// safety of funds, we recommend invoking this every 30 seconds, or lower if running in an
783 /// environment with spotty connections, like on mobile.
785 /// An [`EventHandler`] may safely call back to the provider, though this shouldn't be needed in
786 /// order to handle these events.
788 /// [`SpendableOutputs`]: events::Event::SpendableOutputs
789 /// [`BumpTransaction`]: events::Event::BumpTransaction
790 fn process_pending_events<H: Deref>(&self, handler: H) where H::Target: EventHandler {
791 for monitor_state in self.monitors.read().unwrap().values() {
792 monitor_state.monitor.process_pending_events(&handler);
799 use crate::check_added_monitors;
800 use crate::{expect_payment_claimed, expect_payment_path_successful, get_event_msg};
801 use crate::{get_htlc_update_msgs, get_local_commitment_txn, get_revoke_commit_msgs, get_route_and_payment_hash, unwrap_send_err};
802 use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Watch};
803 use crate::chain::channelmonitor::LATENCY_GRACE_PERIOD_BLOCKS;
804 use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider};
805 use crate::ln::channelmanager::{PaymentSendFailure, PaymentId, RecipientOnionFields};
806 use crate::ln::functional_test_utils::*;
807 use crate::ln::msgs::ChannelMessageHandler;
808 use crate::util::errors::APIError;
811 fn test_async_ooo_offchain_updates() {
812 // Test that if we have multiple offchain updates being persisted and they complete
813 // out-of-order, the ChainMonitor waits until all have completed before informing the
815 let chanmon_cfgs = create_chanmon_cfgs(2);
816 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
817 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
818 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
819 create_announced_chan_between_nodes(&nodes, 0, 1);
821 // Route two payments to be claimed at the same time.
822 let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
823 let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
825 chanmon_cfgs[1].persister.offchain_monitor_updates.lock().unwrap().clear();
826 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
827 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
829 nodes[1].node.claim_funds(payment_preimage_1);
830 check_added_monitors!(nodes[1], 1);
831 nodes[1].node.claim_funds(payment_preimage_2);
832 check_added_monitors!(nodes[1], 1);
834 let persistences = chanmon_cfgs[1].persister.offchain_monitor_updates.lock().unwrap().clone();
835 assert_eq!(persistences.len(), 1);
836 let (funding_txo, updates) = persistences.iter().next().unwrap();
837 assert_eq!(updates.len(), 2);
839 // Note that updates is a HashMap so the ordering here is actually random. This shouldn't
840 // fail either way but if it fails intermittently it's depending on the ordering of updates.
841 let mut update_iter = updates.iter();
842 let next_update = update_iter.next().unwrap().clone();
843 // Should contain next_update when pending updates listed.
844 #[cfg(not(c_bindings))]
845 assert!(nodes[1].chain_monitor.chain_monitor.list_pending_monitor_updates().get(funding_txo)
846 .unwrap().contains(&next_update));
848 assert!(nodes[1].chain_monitor.chain_monitor.list_pending_monitor_updates().iter()
849 .find(|(txo, _)| txo == funding_txo).unwrap().1.contains(&next_update));
850 nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(*funding_txo, next_update.clone()).unwrap();
851 // Should not contain the previously pending next_update when pending updates listed.
852 #[cfg(not(c_bindings))]
853 assert!(!nodes[1].chain_monitor.chain_monitor.list_pending_monitor_updates().get(funding_txo)
854 .unwrap().contains(&next_update));
856 assert!(!nodes[1].chain_monitor.chain_monitor.list_pending_monitor_updates().iter()
857 .find(|(txo, _)| txo == funding_txo).unwrap().1.contains(&next_update));
858 assert!(nodes[1].chain_monitor.release_pending_monitor_events().is_empty());
859 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
860 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
861 nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(*funding_txo, update_iter.next().unwrap().clone()).unwrap();
863 let claim_events = nodes[1].node.get_and_clear_pending_events();
864 assert_eq!(claim_events.len(), 2);
865 match claim_events[0] {
866 Event::PaymentClaimed { ref payment_hash, amount_msat: 1_000_000, .. } => {
867 assert_eq!(payment_hash_1, *payment_hash);
869 _ => panic!("Unexpected event"),
871 match claim_events[1] {
872 Event::PaymentClaimed { ref payment_hash, amount_msat: 1_000_000, .. } => {
873 assert_eq!(payment_hash_2, *payment_hash);
875 _ => panic!("Unexpected event"),
878 // Now manually walk the commitment signed dance - because we claimed two payments
879 // back-to-back it doesn't fit into the neat walk commitment_signed_dance does.
881 let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
882 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
883 expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false);
884 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
885 check_added_monitors!(nodes[0], 1);
886 let (as_first_raa, as_first_update) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
888 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_first_raa);
889 check_added_monitors!(nodes[1], 1);
890 let bs_second_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
891 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_first_update);
892 check_added_monitors!(nodes[1], 1);
893 let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
895 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_second_updates.update_fulfill_htlcs[0]);
896 expect_payment_sent(&nodes[0], payment_preimage_2, None, false, false);
897 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_updates.commitment_signed);
898 check_added_monitors!(nodes[0], 1);
899 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa);
900 expect_payment_path_successful!(nodes[0]);
901 check_added_monitors!(nodes[0], 1);
902 let (as_second_raa, as_second_update) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
904 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
905 check_added_monitors!(nodes[1], 1);
906 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_update);
907 check_added_monitors!(nodes[1], 1);
908 let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
910 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa);
911 expect_payment_path_successful!(nodes[0]);
912 check_added_monitors!(nodes[0], 1);
915 fn do_chainsync_pauses_events(block_timeout: bool) {
916 // When a chainsync monitor update occurs, any MonitorUpdates should be held before being
917 // passed upstream to a `ChannelManager` via `Watch::release_pending_monitor_events`. This
918 // tests that behavior, as well as some ways it might go wrong.
919 let chanmon_cfgs = create_chanmon_cfgs(2);
920 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
921 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
922 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
923 let channel = create_announced_chan_between_nodes(&nodes, 0, 1);
925 // Get a route for later and rebalance the channel somewhat
926 send_payment(&nodes[0], &[&nodes[1]], 10_000_000);
927 let (route, second_payment_hash, _, second_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
929 // First route a payment that we will claim on chain and give the recipient the preimage.
930 let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
931 nodes[1].node.claim_funds(payment_preimage);
932 expect_payment_claimed!(nodes[1], payment_hash, 1_000_000);
933 nodes[1].node.get_and_clear_pending_msg_events();
934 check_added_monitors!(nodes[1], 1);
935 let remote_txn = get_local_commitment_txn!(nodes[1], channel.2);
936 assert_eq!(remote_txn.len(), 2);
938 // Temp-fail the block connection which will hold the channel-closed event
939 chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
940 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
942 // Connect B's commitment transaction, but only to the ChainMonitor/ChannelMonitor. The
943 // channel is now closed, but the ChannelManager doesn't know that yet.
944 let new_header = create_dummy_header(nodes[0].best_block_info().0, 0);
945 nodes[0].chain_monitor.chain_monitor.transactions_confirmed(&new_header,
946 &[(0, &remote_txn[0]), (1, &remote_txn[1])], nodes[0].best_block_info().1 + 1);
947 assert!(nodes[0].chain_monitor.release_pending_monitor_events().is_empty());
948 nodes[0].chain_monitor.chain_monitor.best_block_updated(&new_header, nodes[0].best_block_info().1 + 1);
949 assert!(nodes[0].chain_monitor.release_pending_monitor_events().is_empty());
951 // If the ChannelManager tries to update the channel, however, the ChainMonitor will pass
952 // the update through to the ChannelMonitor which will refuse it (as the channel is closed).
953 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
954 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, second_payment_hash,
955 RecipientOnionFields::secret_only(second_payment_secret), PaymentId(second_payment_hash.0)
956 ), false, APIError::MonitorUpdateInProgress, {});
957 check_added_monitors!(nodes[0], 1);
959 // However, as the ChainMonitor is still waiting for the original persistence to complete,
960 // it won't yet release the MonitorEvents.
961 assert!(nodes[0].chain_monitor.release_pending_monitor_events().is_empty());
964 // After three blocks, pending MontiorEvents should be released either way.
965 let latest_header = create_dummy_header(nodes[0].best_block_info().0, 0);
966 nodes[0].chain_monitor.chain_monitor.best_block_updated(&latest_header, nodes[0].best_block_info().1 + LATENCY_GRACE_PERIOD_BLOCKS);
968 let persistences = chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clone();
969 for (funding_outpoint, update_ids) in persistences {
970 for update_id in update_ids {
971 nodes[0].chain_monitor.chain_monitor.channel_monitor_updated(funding_outpoint, update_id).unwrap();
976 expect_payment_sent(&nodes[0], payment_preimage, None, true, false);
980 fn chainsync_pauses_events() {
981 do_chainsync_pauses_events(false);
982 do_chainsync_pauses_events(true);