1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 //! Logic to connect off-chain channel management with on-chain transaction monitoring.
12 //! [`ChainMonitor`] is an implementation of [`chain::Watch`] used both to process blocks and to
13 //! update [`ChannelMonitor`]s accordingly. If any on-chain events need further processing, it will
14 //! make those available as [`MonitorEvent`]s to be consumed.
16 //! [`ChainMonitor`] is parameterized by an optional chain source, which must implement the
17 //! [`chain::Filter`] trait. This provides a mechanism to signal new relevant outputs back to light
18 //! clients, such that transactions spending those outputs are included in block data.
20 //! [`ChainMonitor`] may be used directly to monitor channels locally or as a part of a distributed
21 //! setup to monitor channels remotely. In the latter case, a custom [`chain::Watch`] implementation
22 //! would be responsible for routing each update to a remote server and for retrieving monitor
23 //! events. The remote server would make use of [`ChainMonitor`] for block processing and for
24 //! servicing [`ChannelMonitor`] updates from the client.
26 use bitcoin::blockdata::block::BlockHeader;
27 use bitcoin::hash_types::Txid;
30 use chain::{ChannelMonitorUpdateErr, Filter, WatchedOutput};
31 use chain::chaininterface::{BroadcasterInterface, FeeEstimator};
32 use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, Balance, MonitorEvent, TransactionOutputs, LATENCY_GRACE_PERIOD_BLOCKS};
33 use chain::transaction::{OutPoint, TransactionData};
34 use chain::keysinterface::Sign;
35 use util::atomic_counter::AtomicCounter;
36 use util::logger::Logger;
37 use util::errors::APIError;
39 use util::events::EventHandler;
40 use ln::channelmanager::ChannelDetails;
43 use sync::{RwLock, RwLockReadGuard, Mutex, MutexGuard};
45 use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
46 use bitcoin::secp256k1::PublicKey;
48 #[derive(Clone, Copy, Hash, PartialEq, Eq)]
49 /// A specific update's ID stored in a `MonitorUpdateId`, separated out to make the contents
52 /// An update that was generated by the `ChannelManager` (via our `chain::Watch`
53 /// implementation). This corresponds to an actual [`ChannelMonitorUpdate::update_id`] field
54 /// and [`ChannelMonitor::get_latest_update_id`].
56 /// An update that was generated during blockchain processing. The ID here is specific to the
57 /// generating [`ChainMonitor`] and does *not* correspond to any on-disk IDs.
61 /// An opaque identifier describing a specific [`Persist`] method call.
62 #[derive(Clone, Copy, Hash, PartialEq, Eq)]
63 pub struct MonitorUpdateId {
64 contents: UpdateOrigin,
67 impl MonitorUpdateId {
68 pub(crate) fn from_monitor_update(update: &ChannelMonitorUpdate) -> Self {
69 Self { contents: UpdateOrigin::OffChain(update.update_id) }
71 pub(crate) fn from_new_monitor<ChannelSigner: Sign>(monitor: &ChannelMonitor<ChannelSigner>) -> Self {
72 Self { contents: UpdateOrigin::OffChain(monitor.get_latest_update_id()) }
76 /// `Persist` defines behavior for persisting channel monitors: this could mean
77 /// writing once to disk, and/or uploading to one or more backup services.
79 /// Each method can return three possible values:
80 /// * If persistence (including any relevant `fsync()` calls) happens immediately, the
81 /// implementation should return `Ok(())`, indicating normal channel operation should continue.
82 /// * If persistence happens asynchronously, implementations should first ensure the
83 /// [`ChannelMonitor`] or [`ChannelMonitorUpdate`] are written durably to disk, and then return
84 /// `Err(ChannelMonitorUpdateErr::TemporaryFailure)` while the update continues in the
85 /// background. Once the update completes, [`ChainMonitor::channel_monitor_updated`] should be
86 /// called with the corresponding [`MonitorUpdateId`].
88 /// Note that unlike the direct [`chain::Watch`] interface,
89 /// [`ChainMonitor::channel_monitor_updated`] must be called once for *each* update which occurs.
91 /// * If persistence fails for some reason, implementations should return
92 /// `Err(ChannelMonitorUpdateErr::PermanentFailure)`, in which case the channel will likely be
93 /// closed without broadcasting the latest state. See
94 /// [`ChannelMonitorUpdateErr::PermanentFailure`] for more details.
95 pub trait Persist<ChannelSigner: Sign> {
96 /// Persist a new channel's data in response to a [`chain::Watch::watch_channel`] call. This is
97 /// called by [`ChannelManager`] for new channels, or may be called directly, e.g. on startup.
99 /// The data can be stored any way you want, but the identifier provided by LDK is the
100 /// channel's outpoint (and it is up to you to maintain a correct mapping between the outpoint
101 /// and the stored channel data). Note that you **must** persist every new monitor to disk.
103 /// The `update_id` is used to identify this call to [`ChainMonitor::channel_monitor_updated`],
104 /// if you return [`ChannelMonitorUpdateErr::TemporaryFailure`].
106 /// See [`Writeable::write`] on [`ChannelMonitor`] for writing out a `ChannelMonitor`
107 /// and [`ChannelMonitorUpdateErr`] for requirements when returning errors.
109 /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
110 /// [`Writeable::write`]: crate::util::ser::Writeable::write
111 fn persist_new_channel(&self, channel_id: OutPoint, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> Result<(), ChannelMonitorUpdateErr>;
113 /// Update one channel's data. The provided [`ChannelMonitor`] has already applied the given
116 /// Note that on every update, you **must** persist either the [`ChannelMonitorUpdate`] or the
117 /// updated monitor itself to disk/backups. See the [`Persist`] trait documentation for more
120 /// During blockchain synchronization operations, this may be called with no
121 /// [`ChannelMonitorUpdate`], in which case the full [`ChannelMonitor`] needs to be persisted.
122 /// Note that after the full [`ChannelMonitor`] is persisted any previous
123 /// [`ChannelMonitorUpdate`]s which were persisted should be discarded - they can no longer be
124 /// applied to the persisted [`ChannelMonitor`] as they were already applied.
126 /// If an implementer chooses to persist the updates only, they need to make
127 /// sure that all the updates are applied to the `ChannelMonitors` *before*
128 /// the set of channel monitors is given to the `ChannelManager`
129 /// deserialization routine. See [`ChannelMonitor::update_monitor`] for
130 /// applying a monitor update to a monitor. If full `ChannelMonitors` are
131 /// persisted, then there is no need to persist individual updates.
133 /// Note that there could be a performance tradeoff between persisting complete
134 /// channel monitors on every update vs. persisting only updates and applying
135 /// them in batches. The size of each monitor grows `O(number of state updates)`
136 /// whereas updates are small and `O(1)`.
138 /// The `update_id` is used to identify this call to [`ChainMonitor::channel_monitor_updated`],
139 /// if you return [`ChannelMonitorUpdateErr::TemporaryFailure`].
141 /// See [`Writeable::write`] on [`ChannelMonitor`] for writing out a `ChannelMonitor`,
142 /// [`Writeable::write`] on [`ChannelMonitorUpdate`] for writing out an update, and
143 /// [`ChannelMonitorUpdateErr`] for requirements when returning errors.
145 /// [`Writeable::write`]: crate::util::ser::Writeable::write
146 fn update_persisted_channel(&self, channel_id: OutPoint, update: &Option<ChannelMonitorUpdate>, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> Result<(), ChannelMonitorUpdateErr>;
149 struct MonitorHolder<ChannelSigner: Sign> {
150 monitor: ChannelMonitor<ChannelSigner>,
151 /// The full set of pending monitor updates for this Channel.
153 /// Note that this lock must be held during updates to prevent a race where we call
154 /// update_persisted_channel, the user returns a TemporaryFailure, and then calls
155 /// channel_monitor_updated immediately, racing our insertion of the pending update into the
158 /// Beyond the synchronization of updates themselves, we cannot handle user events until after
159 /// any chain updates have been stored on disk. Thus, we scan this list when returning updates
160 /// to the ChannelManager, refusing to return any updates for a ChannelMonitor which is still
161 /// being persisted fully to disk after a chain update.
163 /// This avoids the possibility of handling, e.g. an on-chain claim, generating a claim monitor
164 /// event, resulting in the relevant ChannelManager generating a PaymentSent event and dropping
165 /// the pending payment entry, and then reloading before the monitor is persisted, resulting in
166 /// the ChannelManager re-adding the same payment entry, before the same block is replayed,
167 /// resulting in a duplicate PaymentSent event.
168 pending_monitor_updates: Mutex<Vec<MonitorUpdateId>>,
169 /// When the user returns a PermanentFailure error from an update_persisted_channel call during
170 /// block processing, we inform the ChannelManager that the channel should be closed
171 /// asynchronously. In order to ensure no further changes happen before the ChannelManager has
172 /// processed the closure event, we set this to true and return PermanentFailure for any other
173 /// chain::Watch events.
174 channel_perm_failed: AtomicBool,
175 /// The last block height at which no [`UpdateOrigin::ChainSync`] monitor updates were present
176 /// in `pending_monitor_updates`.
177 /// If it's been more than [`LATENCY_GRACE_PERIOD_BLOCKS`] since we started waiting on a chain
178 /// sync event, we let monitor events return to `ChannelManager` because we cannot hold them up
179 /// forever or we'll end up with HTLC preimages waiting to feed back into an upstream channel
180 /// forever, risking funds loss.
181 last_chain_persist_height: AtomicUsize,
184 impl<ChannelSigner: Sign> MonitorHolder<ChannelSigner> {
185 fn has_pending_offchain_updates(&self, pending_monitor_updates_lock: &MutexGuard<Vec<MonitorUpdateId>>) -> bool {
186 pending_monitor_updates_lock.iter().any(|update_id|
187 if let UpdateOrigin::OffChain(_) = update_id.contents { true } else { false })
189 fn has_pending_chainsync_updates(&self, pending_monitor_updates_lock: &MutexGuard<Vec<MonitorUpdateId>>) -> bool {
190 pending_monitor_updates_lock.iter().any(|update_id|
191 if let UpdateOrigin::ChainSync(_) = update_id.contents { true } else { false })
195 /// A read-only reference to a current ChannelMonitor.
197 /// Note that this holds a mutex in [`ChainMonitor`] and may block other events until it is
199 pub struct LockedChannelMonitor<'a, ChannelSigner: Sign> {
200 lock: RwLockReadGuard<'a, HashMap<OutPoint, MonitorHolder<ChannelSigner>>>,
201 funding_txo: OutPoint,
204 impl<ChannelSigner: Sign> Deref for LockedChannelMonitor<'_, ChannelSigner> {
205 type Target = ChannelMonitor<ChannelSigner>;
206 fn deref(&self) -> &ChannelMonitor<ChannelSigner> {
207 &self.lock.get(&self.funding_txo).expect("Checked at construction").monitor
211 /// An implementation of [`chain::Watch`] for monitoring channels.
213 /// Connected and disconnected blocks must be provided to `ChainMonitor` as documented by
214 /// [`chain::Watch`]. May be used in conjunction with [`ChannelManager`] to monitor channels locally
215 /// or used independently to monitor channels remotely. See the [module-level documentation] for
218 /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
219 /// [module-level documentation]: crate::chain::chainmonitor
220 pub struct ChainMonitor<ChannelSigner: Sign, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref>
221 where C::Target: chain::Filter,
222 T::Target: BroadcasterInterface,
223 F::Target: FeeEstimator,
225 P::Target: Persist<ChannelSigner>,
227 monitors: RwLock<HashMap<OutPoint, MonitorHolder<ChannelSigner>>>,
228 /// When we generate a [`MonitorUpdateId`] for a chain-event monitor persistence, we need a
229 /// unique ID, which we calculate by simply getting the next value from this counter. Note that
230 /// the ID is never persisted so it's ok that they reset on restart.
231 sync_persistence_id: AtomicCounter,
232 chain_source: Option<C>,
237 /// "User-provided" (ie persistence-completion/-failed) [`MonitorEvent`]s. These came directly
238 /// from the user and not from a [`ChannelMonitor`].
239 pending_monitor_events: Mutex<Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)>>,
240 /// The best block height seen, used as a proxy for the passage of time.
241 highest_chain_height: AtomicUsize,
244 impl<ChannelSigner: Sign, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref> ChainMonitor<ChannelSigner, C, T, F, L, P>
245 where C::Target: chain::Filter,
246 T::Target: BroadcasterInterface,
247 F::Target: FeeEstimator,
249 P::Target: Persist<ChannelSigner>,
251 /// Dispatches to per-channel monitors, which are responsible for updating their on-chain view
252 /// of a channel and reacting accordingly based on transactions in the given chain data. See
253 /// [`ChannelMonitor::block_connected`] for details. Any HTLCs that were resolved on chain will
254 /// be returned by [`chain::Watch::release_pending_monitor_events`].
256 /// Calls back to [`chain::Filter`] if any monitor indicated new outputs to watch. Subsequent
257 /// calls must not exclude any transactions matching the new outputs nor any in-block
258 /// descendants of such transactions. It is not necessary to re-fetch the block to obtain
259 /// updated `txdata`.
261 /// Calls which represent a new blockchain tip height should set `best_height`.
262 fn process_chain_data<FN>(&self, header: &BlockHeader, best_height: Option<u32>, txdata: &TransactionData, process: FN)
264 FN: Fn(&ChannelMonitor<ChannelSigner>, &TransactionData) -> Vec<TransactionOutputs>
266 let mut dependent_txdata = Vec::new();
268 let monitor_states = self.monitors.write().unwrap();
269 if let Some(height) = best_height {
270 // If the best block height is being updated, update highest_chain_height under the
271 // monitors write lock.
272 let old_height = self.highest_chain_height.load(Ordering::Acquire);
273 let new_height = height as usize;
274 if new_height > old_height {
275 self.highest_chain_height.store(new_height, Ordering::Release);
279 for (funding_outpoint, monitor_state) in monitor_states.iter() {
280 let monitor = &monitor_state.monitor;
283 txn_outputs = process(monitor, txdata);
284 let update_id = MonitorUpdateId {
285 contents: UpdateOrigin::ChainSync(self.sync_persistence_id.get_increment()),
287 let mut pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap();
288 if let Some(height) = best_height {
289 if !monitor_state.has_pending_chainsync_updates(&pending_monitor_updates) {
290 // If there are not ChainSync persists awaiting completion, go ahead and
291 // set last_chain_persist_height here - we wouldn't want the first
292 // TemporaryFailure to always immediately be considered "overly delayed".
293 monitor_state.last_chain_persist_height.store(height as usize, Ordering::Release);
297 log_trace!(self.logger, "Syncing Channel Monitor for channel {}", log_funding_info!(monitor));
298 match self.persister.update_persisted_channel(*funding_outpoint, &None, monitor, update_id) {
300 log_trace!(self.logger, "Finished syncing Channel Monitor for channel {}", log_funding_info!(monitor)),
301 Err(ChannelMonitorUpdateErr::PermanentFailure) => {
302 monitor_state.channel_perm_failed.store(true, Ordering::Release);
303 self.pending_monitor_events.lock().unwrap().push((*funding_outpoint, vec![MonitorEvent::UpdateFailed(*funding_outpoint)], monitor.get_counterparty_node_id()));
305 Err(ChannelMonitorUpdateErr::TemporaryFailure) => {
306 log_debug!(self.logger, "Channel Monitor sync for channel {} in progress, holding events until completion!", log_funding_info!(monitor));
307 pending_monitor_updates.push(update_id);
312 // Register any new outputs with the chain source for filtering, storing any dependent
313 // transactions from within the block that previously had not been included in txdata.
314 if let Some(ref chain_source) = self.chain_source {
315 let block_hash = header.block_hash();
316 for (txid, mut outputs) in txn_outputs.drain(..) {
317 for (idx, output) in outputs.drain(..) {
318 // Register any new outputs with the chain source for filtering and recurse
319 // if it indicates that there are dependent transactions within the block
320 // that had not been previously included in txdata.
321 let output = WatchedOutput {
322 block_hash: Some(block_hash),
323 outpoint: OutPoint { txid, index: idx as u16 },
324 script_pubkey: output.script_pubkey,
326 if let Some(tx) = chain_source.register_output(output) {
327 dependent_txdata.push(tx);
335 // Recursively call for any dependent transactions that were identified by the chain source.
336 if !dependent_txdata.is_empty() {
337 dependent_txdata.sort_unstable_by_key(|(index, _tx)| *index);
338 dependent_txdata.dedup_by_key(|(index, _tx)| *index);
339 let txdata: Vec<_> = dependent_txdata.iter().map(|(index, tx)| (*index, tx)).collect();
340 self.process_chain_data(header, None, &txdata, process); // We skip the best height the second go-around
344 /// Creates a new `ChainMonitor` used to watch on-chain activity pertaining to channels.
346 /// When an optional chain source implementing [`chain::Filter`] is provided, the chain monitor
347 /// will call back to it indicating transactions and outputs of interest. This allows clients to
348 /// pre-filter blocks or only fetch blocks matching a compact filter. Otherwise, clients may
349 /// always need to fetch full blocks absent another means for determining which blocks contain
350 /// transactions relevant to the watched channels.
351 pub fn new(chain_source: Option<C>, broadcaster: T, logger: L, feeest: F, persister: P) -> Self {
353 monitors: RwLock::new(HashMap::new()),
354 sync_persistence_id: AtomicCounter::new(),
358 fee_estimator: feeest,
360 pending_monitor_events: Mutex::new(Vec::new()),
361 highest_chain_height: AtomicUsize::new(0),
365 /// Gets the balances in the contained [`ChannelMonitor`]s which are claimable on-chain or
366 /// claims which are awaiting confirmation.
368 /// Includes the balances from each [`ChannelMonitor`] *except* those included in
369 /// `ignored_channels`, allowing you to filter out balances from channels which are still open
370 /// (and whose balance should likely be pulled from the [`ChannelDetails`]).
372 /// See [`ChannelMonitor::get_claimable_balances`] for more details on the exact criteria for
373 /// inclusion in the return value.
374 pub fn get_claimable_balances(&self, ignored_channels: &[&ChannelDetails]) -> Vec<Balance> {
375 let mut ret = Vec::new();
376 let monitor_states = self.monitors.read().unwrap();
377 for (_, monitor_state) in monitor_states.iter().filter(|(funding_outpoint, _)| {
378 for chan in ignored_channels {
379 if chan.funding_txo.as_ref() == Some(funding_outpoint) {
385 ret.append(&mut monitor_state.monitor.get_claimable_balances());
390 /// Gets the [`LockedChannelMonitor`] for a given funding outpoint, returning an `Err` if no
391 /// such [`ChannelMonitor`] is currently being monitored for.
393 /// Note that the result holds a mutex over our monitor set, and should not be held
395 pub fn get_monitor(&self, funding_txo: OutPoint) -> Result<LockedChannelMonitor<'_, ChannelSigner>, ()> {
396 let lock = self.monitors.read().unwrap();
397 if lock.get(&funding_txo).is_some() {
398 Ok(LockedChannelMonitor { lock, funding_txo })
404 /// Lists the funding outpoint of each [`ChannelMonitor`] being monitored.
406 /// Note that [`ChannelMonitor`]s are not removed when a channel is closed as they are always
407 /// monitoring for on-chain state resolutions.
408 pub fn list_monitors(&self) -> Vec<OutPoint> {
409 self.monitors.read().unwrap().keys().map(|outpoint| *outpoint).collect()
413 pub fn remove_monitor(&self, funding_txo: &OutPoint) -> ChannelMonitor<ChannelSigner> {
414 self.monitors.write().unwrap().remove(funding_txo).unwrap().monitor
417 /// Indicates the persistence of a [`ChannelMonitor`] has completed after
418 /// [`ChannelMonitorUpdateErr::TemporaryFailure`] was returned from an update operation.
420 /// Thus, the anticipated use is, at a high level:
421 /// 1) This [`ChainMonitor`] calls [`Persist::update_persisted_channel`] which stores the
422 /// update to disk and begins updating any remote (e.g. watchtower/backup) copies,
423 /// returning [`ChannelMonitorUpdateErr::TemporaryFailure`],
424 /// 2) once all remote copies are updated, you call this function with the
425 /// `completed_update_id` that completed, and once all pending updates have completed the
426 /// channel will be re-enabled.
427 // Note that we re-enable only after `UpdateOrigin::OffChain` updates complete, we don't
428 // care about `UpdateOrigin::ChainSync` updates for the channel state being updated. We
429 // only care about `UpdateOrigin::ChainSync` for returning `MonitorEvent`s.
431 /// Returns an [`APIError::APIMisuseError`] if `funding_txo` does not match any currently
432 /// registered [`ChannelMonitor`]s.
433 pub fn channel_monitor_updated(&self, funding_txo: OutPoint, completed_update_id: MonitorUpdateId) -> Result<(), APIError> {
434 let monitors = self.monitors.read().unwrap();
435 let monitor_data = if let Some(mon) = monitors.get(&funding_txo) { mon } else {
436 return Err(APIError::APIMisuseError { err: format!("No ChannelMonitor matching funding outpoint {:?} found", funding_txo) });
438 let mut pending_monitor_updates = monitor_data.pending_monitor_updates.lock().unwrap();
439 pending_monitor_updates.retain(|update_id| *update_id != completed_update_id);
441 match completed_update_id {
442 MonitorUpdateId { contents: UpdateOrigin::OffChain(_) } => {
443 // Note that we only check for `UpdateOrigin::OffChain` failures here - if
444 // we're being told that a `UpdateOrigin::OffChain` monitor update completed,
445 // we only care about ensuring we don't tell the `ChannelManager` to restore
446 // the channel to normal operation until all `UpdateOrigin::OffChain` updates
448 // If there's some `UpdateOrigin::ChainSync` update still pending that's okay
449 // - we can still update our channel state, just as long as we don't return
450 // `MonitorEvent`s from the monitor back to the `ChannelManager` until they
452 let monitor_is_pending_updates = monitor_data.has_pending_offchain_updates(&pending_monitor_updates);
453 if monitor_is_pending_updates || monitor_data.channel_perm_failed.load(Ordering::Acquire) {
454 // If there are still monitor updates pending (or an old monitor update
455 // finished after a later one perm-failed), we cannot yet construct an
456 // UpdateCompleted event.
459 self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::UpdateCompleted {
461 monitor_update_id: monitor_data.monitor.get_latest_update_id(),
462 }], monitor_data.monitor.get_counterparty_node_id()));
464 MonitorUpdateId { contents: UpdateOrigin::ChainSync(_) } => {
465 if !monitor_data.has_pending_chainsync_updates(&pending_monitor_updates) {
466 monitor_data.last_chain_persist_height.store(self.highest_chain_height.load(Ordering::Acquire), Ordering::Release);
467 // The next time release_pending_monitor_events is called, any events for this
468 // ChannelMonitor will be returned.
475 /// This wrapper avoids having to update some of our tests for now as they assume the direct
476 /// chain::Watch API wherein we mark a monitor fully-updated by just calling
477 /// channel_monitor_updated once with the highest ID.
478 #[cfg(any(test, fuzzing))]
479 pub fn force_channel_monitor_updated(&self, funding_txo: OutPoint, monitor_update_id: u64) {
480 let monitors = self.monitors.read().unwrap();
481 let counterparty_node_id = monitors.get(&funding_txo).and_then(|m| m.monitor.get_counterparty_node_id());
482 self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::UpdateCompleted {
485 }], counterparty_node_id));
488 #[cfg(any(test, fuzzing, feature = "_test_utils"))]
489 pub fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
490 use util::events::EventsProvider;
491 let events = core::cell::RefCell::new(Vec::new());
492 let event_handler = |event: &events::Event| events.borrow_mut().push(event.clone());
493 self.process_pending_events(&event_handler);
498 impl<ChannelSigner: Sign, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref>
499 chain::Listen for ChainMonitor<ChannelSigner, C, T, F, L, P>
501 C::Target: chain::Filter,
502 T::Target: BroadcasterInterface,
503 F::Target: FeeEstimator,
505 P::Target: Persist<ChannelSigner>,
507 fn filtered_block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
508 log_debug!(self.logger, "New best block {} at height {} provided via block_connected", header.block_hash(), height);
509 self.process_chain_data(header, Some(height), &txdata, |monitor, txdata| {
510 monitor.block_connected(
511 header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger)
515 fn block_disconnected(&self, header: &BlockHeader, height: u32) {
516 let monitor_states = self.monitors.read().unwrap();
517 log_debug!(self.logger, "Latest block {} at height {} removed via block_disconnected", header.block_hash(), height);
518 for monitor_state in monitor_states.values() {
519 monitor_state.monitor.block_disconnected(
520 header, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger);
525 impl<ChannelSigner: Sign, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref>
526 chain::Confirm for ChainMonitor<ChannelSigner, C, T, F, L, P>
528 C::Target: chain::Filter,
529 T::Target: BroadcasterInterface,
530 F::Target: FeeEstimator,
532 P::Target: Persist<ChannelSigner>,
534 fn transactions_confirmed(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
535 log_debug!(self.logger, "{} provided transactions confirmed at height {} in block {}", txdata.len(), height, header.block_hash());
536 self.process_chain_data(header, None, txdata, |monitor, txdata| {
537 monitor.transactions_confirmed(
538 header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger)
542 fn transaction_unconfirmed(&self, txid: &Txid) {
543 log_debug!(self.logger, "Transaction {} reorganized out of chain", txid);
544 let monitor_states = self.monitors.read().unwrap();
545 for monitor_state in monitor_states.values() {
546 monitor_state.monitor.transaction_unconfirmed(txid, &*self.broadcaster, &*self.fee_estimator, &*self.logger);
550 fn best_block_updated(&self, header: &BlockHeader, height: u32) {
551 log_debug!(self.logger, "New best block {} at height {} provided via best_block_updated", header.block_hash(), height);
552 self.process_chain_data(header, Some(height), &[], |monitor, txdata| {
553 // While in practice there shouldn't be any recursive calls when given empty txdata,
554 // it's still possible if a chain::Filter implementation returns a transaction.
555 debug_assert!(txdata.is_empty());
556 monitor.best_block_updated(
557 header, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger)
561 fn get_relevant_txids(&self) -> Vec<Txid> {
562 let mut txids = Vec::new();
563 let monitor_states = self.monitors.read().unwrap();
564 for monitor_state in monitor_states.values() {
565 txids.append(&mut monitor_state.monitor.get_relevant_txids());
568 txids.sort_unstable();
574 impl<ChannelSigner: Sign, C: Deref , T: Deref , F: Deref , L: Deref , P: Deref >
575 chain::Watch<ChannelSigner> for ChainMonitor<ChannelSigner, C, T, F, L, P>
576 where C::Target: chain::Filter,
577 T::Target: BroadcasterInterface,
578 F::Target: FeeEstimator,
580 P::Target: Persist<ChannelSigner>,
582 /// Adds the monitor that watches the channel referred to by the given outpoint.
584 /// Calls back to [`chain::Filter`] with the funding transaction and outputs to watch.
586 /// Note that we persist the given `ChannelMonitor` while holding the `ChainMonitor`
588 fn watch_channel(&self, funding_outpoint: OutPoint, monitor: ChannelMonitor<ChannelSigner>) -> Result<(), ChannelMonitorUpdateErr> {
589 let mut monitors = self.monitors.write().unwrap();
590 let entry = match monitors.entry(funding_outpoint) {
591 hash_map::Entry::Occupied(_) => {
592 log_error!(self.logger, "Failed to add new channel data: channel monitor for given outpoint is already present");
593 return Err(ChannelMonitorUpdateErr::PermanentFailure)},
594 hash_map::Entry::Vacant(e) => e,
596 log_trace!(self.logger, "Got new ChannelMonitor for channel {}", log_funding_info!(monitor));
597 let update_id = MonitorUpdateId::from_new_monitor(&monitor);
598 let mut pending_monitor_updates = Vec::new();
599 let persist_res = self.persister.persist_new_channel(funding_outpoint, &monitor, update_id);
600 if persist_res.is_err() {
601 log_error!(self.logger, "Failed to persist new ChannelMonitor for channel {}: {:?}", log_funding_info!(monitor), persist_res);
603 log_trace!(self.logger, "Finished persisting new ChannelMonitor for channel {}", log_funding_info!(monitor));
605 if persist_res == Err(ChannelMonitorUpdateErr::PermanentFailure) {
607 } else if persist_res.is_err() {
608 pending_monitor_updates.push(update_id);
610 if let Some(ref chain_source) = self.chain_source {
611 monitor.load_outputs_to_watch(chain_source);
613 entry.insert(MonitorHolder {
615 pending_monitor_updates: Mutex::new(pending_monitor_updates),
616 channel_perm_failed: AtomicBool::new(false),
617 last_chain_persist_height: AtomicUsize::new(self.highest_chain_height.load(Ordering::Acquire)),
622 /// Note that we persist the given `ChannelMonitor` update while holding the
623 /// `ChainMonitor` monitors lock.
624 fn update_channel(&self, funding_txo: OutPoint, update: ChannelMonitorUpdate) -> Result<(), ChannelMonitorUpdateErr> {
625 // Update the monitor that watches the channel referred to by the given outpoint.
626 let monitors = self.monitors.read().unwrap();
627 match monitors.get(&funding_txo) {
629 log_error!(self.logger, "Failed to update channel monitor: no such monitor registered");
631 // We should never ever trigger this from within ChannelManager. Technically a
632 // user could use this object with some proxying in between which makes this
633 // possible, but in tests and fuzzing, this should be a panic.
634 #[cfg(any(test, fuzzing))]
635 panic!("ChannelManager generated a channel update for a channel that was not yet registered!");
636 #[cfg(not(any(test, fuzzing)))]
637 Err(ChannelMonitorUpdateErr::PermanentFailure)
639 Some(monitor_state) => {
640 let monitor = &monitor_state.monitor;
641 log_trace!(self.logger, "Updating ChannelMonitor for channel {}", log_funding_info!(monitor));
642 let update_res = monitor.update_monitor(&update, &self.broadcaster, &*self.fee_estimator, &self.logger);
643 if update_res.is_err() {
644 log_error!(self.logger, "Failed to update ChannelMonitor for channel {}.", log_funding_info!(monitor));
646 // Even if updating the monitor returns an error, the monitor's state will
647 // still be changed. So, persist the updated monitor despite the error.
648 let update_id = MonitorUpdateId::from_monitor_update(&update);
649 let mut pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap();
650 let persist_res = self.persister.update_persisted_channel(funding_txo, &Some(update), monitor, update_id);
651 if let Err(e) = persist_res {
652 if e == ChannelMonitorUpdateErr::TemporaryFailure {
653 pending_monitor_updates.push(update_id);
655 monitor_state.channel_perm_failed.store(true, Ordering::Release);
657 log_error!(self.logger, "Failed to persist ChannelMonitor update for channel {}: {:?}", log_funding_info!(monitor), e);
659 log_trace!(self.logger, "Finished persisting ChannelMonitor update for channel {}", log_funding_info!(monitor));
661 if update_res.is_err() {
662 Err(ChannelMonitorUpdateErr::PermanentFailure)
663 } else if monitor_state.channel_perm_failed.load(Ordering::Acquire) {
664 Err(ChannelMonitorUpdateErr::PermanentFailure)
672 fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)> {
673 let mut pending_monitor_events = self.pending_monitor_events.lock().unwrap().split_off(0);
674 for monitor_state in self.monitors.read().unwrap().values() {
675 let is_pending_monitor_update = monitor_state.has_pending_chainsync_updates(&monitor_state.pending_monitor_updates.lock().unwrap());
676 if is_pending_monitor_update &&
677 monitor_state.last_chain_persist_height.load(Ordering::Acquire) + LATENCY_GRACE_PERIOD_BLOCKS as usize
678 > self.highest_chain_height.load(Ordering::Acquire)
680 log_info!(self.logger, "A Channel Monitor sync is still in progress, refusing to provide monitor events!");
682 if monitor_state.channel_perm_failed.load(Ordering::Acquire) {
683 // If a `UpdateOrigin::ChainSync` persistence failed with `PermanantFailure`,
684 // we don't really know if the latest `ChannelMonitor` state is on disk or not.
685 // We're supposed to hold monitor updates until the latest state is on disk to
686 // avoid duplicate events, but the user told us persistence is screw-y and may
687 // not complete. We can't hold events forever because we may learn some payment
688 // preimage, so instead we just log and hope the user complied with the
689 // `PermanentFailure` requirements of having at least the local-disk copy
691 log_info!(self.logger, "A Channel Monitor sync returned PermanentFailure. Returning monitor events but duplicate events may appear after reload!");
693 if is_pending_monitor_update {
694 log_error!(self.logger, "A ChannelMonitor sync took longer than {} blocks to complete.", LATENCY_GRACE_PERIOD_BLOCKS);
695 log_error!(self.logger, " To avoid funds-loss, we are allowing monitor updates to be released.");
696 log_error!(self.logger, " This may cause duplicate payment events to be generated.");
698 let monitor_events = monitor_state.monitor.get_and_clear_pending_monitor_events();
699 if monitor_events.len() > 0 {
700 let monitor_outpoint = monitor_state.monitor.get_funding_txo().0;
701 let counterparty_node_id = monitor_state.monitor.get_counterparty_node_id();
702 pending_monitor_events.push((monitor_outpoint, monitor_events, counterparty_node_id));
706 pending_monitor_events
710 impl<ChannelSigner: Sign, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref> events::EventsProvider for ChainMonitor<ChannelSigner, C, T, F, L, P>
711 where C::Target: chain::Filter,
712 T::Target: BroadcasterInterface,
713 F::Target: FeeEstimator,
715 P::Target: Persist<ChannelSigner>,
717 /// Processes [`SpendableOutputs`] events produced from each [`ChannelMonitor`] upon maturity.
719 /// An [`EventHandler`] may safely call back to the provider, though this shouldn't be needed in
720 /// order to handle these events.
722 /// [`SpendableOutputs`]: events::Event::SpendableOutputs
723 fn process_pending_events<H: Deref>(&self, handler: H) where H::Target: EventHandler {
724 let mut pending_events = Vec::new();
725 for monitor_state in self.monitors.read().unwrap().values() {
726 pending_events.append(&mut monitor_state.monitor.get_and_clear_pending_events());
728 for event in pending_events.drain(..) {
729 handler.handle_event(&event);
736 use bitcoin::BlockHeader;
737 use ::{check_added_monitors, check_closed_broadcast, check_closed_event};
738 use ::{expect_payment_sent, expect_payment_claimed, expect_payment_sent_without_paths, expect_payment_path_successful, get_event_msg};
739 use ::{get_htlc_update_msgs, get_local_commitment_txn, get_revoke_commit_msgs, get_route_and_payment_hash, unwrap_send_err};
740 use chain::{ChannelMonitorUpdateErr, Confirm, Watch};
741 use chain::channelmonitor::LATENCY_GRACE_PERIOD_BLOCKS;
742 use ln::channelmanager::PaymentSendFailure;
743 use ln::features::InitFeatures;
744 use ln::functional_test_utils::*;
745 use ln::msgs::ChannelMessageHandler;
746 use util::errors::APIError;
747 use util::events::{ClosureReason, MessageSendEvent, MessageSendEventsProvider};
748 use util::test_utils::{OnRegisterOutput, TxOutReference};
750 /// Tests that in-block dependent transactions are processed by `block_connected` when not
751 /// included in `txdata` but returned by [`chain::Filter::register_output`]. For instance,
752 /// a (non-anchor) commitment transaction's HTLC output may be spent in the same block as the
753 /// commitment transaction itself. An Electrum client may filter the commitment transaction but
754 /// needs to return the HTLC transaction so it can be processed.
756 fn connect_block_checks_dependent_transactions() {
757 let chanmon_cfgs = create_chanmon_cfgs(2);
758 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
759 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
760 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
761 let channel = create_announced_chan_between_nodes(
762 &nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
764 // Send a payment, saving nodes[0]'s revoked commitment and HTLC-Timeout transactions.
765 let (commitment_tx, htlc_tx) = {
766 let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 5_000_000).0;
767 let mut txn = get_local_commitment_txn!(nodes[0], channel.2);
768 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
770 assert_eq!(txn.len(), 2);
771 (txn.remove(0), txn.remove(0))
774 // Set expectations on nodes[1]'s chain source to return dependent transactions.
775 let htlc_output = TxOutReference(commitment_tx.clone(), 0);
776 let to_local_output = TxOutReference(commitment_tx.clone(), 1);
777 let htlc_timeout_output = TxOutReference(htlc_tx.clone(), 0);
778 nodes[1].chain_source
779 .expect(OnRegisterOutput { with: htlc_output, returns: Some((1, htlc_tx)) })
780 .expect(OnRegisterOutput { with: to_local_output, returns: None })
781 .expect(OnRegisterOutput { with: htlc_timeout_output, returns: None });
783 // Notify nodes[1] that nodes[0]'s revoked commitment transaction was mined. The chain
784 // source should return the dependent HTLC transaction when the HTLC output is registered.
785 mine_transaction(&nodes[1], &commitment_tx);
787 // Clean up so uninteresting assertions don't fail.
788 check_added_monitors!(nodes[1], 1);
789 nodes[1].node.get_and_clear_pending_msg_events();
790 nodes[1].node.get_and_clear_pending_events();
794 fn test_async_ooo_offchain_updates() {
795 // Test that if we have multiple offchain updates being persisted and they complete
796 // out-of-order, the ChainMonitor waits until all have completed before informing the
798 let chanmon_cfgs = create_chanmon_cfgs(2);
799 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
800 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
801 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
802 create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
804 // Route two payments to be claimed at the same time.
805 let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
806 let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
808 chanmon_cfgs[1].persister.offchain_monitor_updates.lock().unwrap().clear();
809 chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
811 nodes[1].node.claim_funds(payment_preimage_1);
812 check_added_monitors!(nodes[1], 1);
813 expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
814 nodes[1].node.claim_funds(payment_preimage_2);
815 check_added_monitors!(nodes[1], 1);
816 expect_payment_claimed!(nodes[1], payment_hash_2, 1_000_000);
818 chanmon_cfgs[1].persister.set_update_ret(Ok(()));
820 let persistences = chanmon_cfgs[1].persister.offchain_monitor_updates.lock().unwrap().clone();
821 assert_eq!(persistences.len(), 1);
822 let (funding_txo, updates) = persistences.iter().next().unwrap();
823 assert_eq!(updates.len(), 2);
825 // Note that updates is a HashMap so the ordering here is actually random. This shouldn't
826 // fail either way but if it fails intermittently it's depending on the ordering of updates.
827 let mut update_iter = updates.iter();
828 nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(*funding_txo, update_iter.next().unwrap().clone()).unwrap();
829 assert!(nodes[1].chain_monitor.release_pending_monitor_events().is_empty());
830 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
831 nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(*funding_txo, update_iter.next().unwrap().clone()).unwrap();
833 // Now manually walk the commitment signed dance - because we claimed two payments
834 // back-to-back it doesn't fit into the neat walk commitment_signed_dance does.
836 let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
837 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
838 expect_payment_sent_without_paths!(nodes[0], payment_preimage_1);
839 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
840 check_added_monitors!(nodes[0], 1);
841 let (as_first_raa, as_first_update) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
843 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_first_raa);
844 check_added_monitors!(nodes[1], 1);
845 let bs_second_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
846 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_first_update);
847 check_added_monitors!(nodes[1], 1);
848 let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
850 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_second_updates.update_fulfill_htlcs[0]);
851 expect_payment_sent_without_paths!(nodes[0], payment_preimage_2);
852 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_updates.commitment_signed);
853 check_added_monitors!(nodes[0], 1);
854 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa);
855 expect_payment_path_successful!(nodes[0]);
856 check_added_monitors!(nodes[0], 1);
857 let (as_second_raa, as_second_update) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
859 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
860 check_added_monitors!(nodes[1], 1);
861 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_update);
862 check_added_monitors!(nodes[1], 1);
863 let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
865 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa);
866 expect_payment_path_successful!(nodes[0]);
867 check_added_monitors!(nodes[0], 1);
870 fn do_chainsync_pauses_events(block_timeout: bool) {
871 // When a chainsync monitor update occurs, any MonitorUpdates should be held before being
872 // passed upstream to a `ChannelManager` via `Watch::release_pending_monitor_events`. This
873 // tests that behavior, as well as some ways it might go wrong.
874 let chanmon_cfgs = create_chanmon_cfgs(2);
875 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
876 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
877 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
878 let channel = create_announced_chan_between_nodes(
879 &nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
881 // Get a route for later and rebalance the channel somewhat
882 send_payment(&nodes[0], &[&nodes[1]], 10_000_000);
883 let (route, second_payment_hash, _, second_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
885 // First route a payment that we will claim on chain and give the recipient the preimage.
886 let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
887 nodes[1].node.claim_funds(payment_preimage);
888 expect_payment_claimed!(nodes[1], payment_hash, 1_000_000);
889 nodes[1].node.get_and_clear_pending_msg_events();
890 check_added_monitors!(nodes[1], 1);
891 let remote_txn = get_local_commitment_txn!(nodes[1], channel.2);
892 assert_eq!(remote_txn.len(), 2);
894 // Temp-fail the block connection which will hold the channel-closed event
895 chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
896 chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
898 // Connect B's commitment transaction, but only to the ChainMonitor/ChannelMonitor. The
899 // channel is now closed, but the ChannelManager doesn't know that yet.
900 let new_header = BlockHeader {
901 version: 2, time: 0, bits: 0, nonce: 0,
902 prev_blockhash: nodes[0].best_block_info().0,
903 merkle_root: Default::default() };
904 nodes[0].chain_monitor.chain_monitor.transactions_confirmed(&new_header,
905 &[(0, &remote_txn[0]), (1, &remote_txn[1])], nodes[0].best_block_info().1 + 1);
906 assert!(nodes[0].chain_monitor.release_pending_monitor_events().is_empty());
907 nodes[0].chain_monitor.chain_monitor.best_block_updated(&new_header, nodes[0].best_block_info().1 + 1);
908 assert!(nodes[0].chain_monitor.release_pending_monitor_events().is_empty());
910 // If the ChannelManager tries to update the channel, however, the ChainMonitor will pass
911 // the update through to the ChannelMonitor which will refuse it (as the channel is closed).
912 chanmon_cfgs[0].persister.set_update_ret(Ok(()));
913 unwrap_send_err!(nodes[0].node.send_payment(&route, second_payment_hash, &Some(second_payment_secret)),
914 true, APIError::ChannelUnavailable { ref err },
915 assert!(err.contains("ChannelMonitor storage failure")));
916 check_added_monitors!(nodes[0], 2); // After the failure we generate a close-channel monitor update
917 check_closed_broadcast!(nodes[0], true);
918 check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
920 // However, as the ChainMonitor is still waiting for the original persistence to complete,
921 // it won't yet release the MonitorEvents.
922 assert!(nodes[0].chain_monitor.release_pending_monitor_events().is_empty());
925 // After three blocks, pending MontiorEvents should be released either way.
926 let latest_header = BlockHeader {
927 version: 2, time: 0, bits: 0, nonce: 0,
928 prev_blockhash: nodes[0].best_block_info().0,
929 merkle_root: Default::default() };
930 nodes[0].chain_monitor.chain_monitor.best_block_updated(&latest_header, nodes[0].best_block_info().1 + LATENCY_GRACE_PERIOD_BLOCKS);
932 let persistences = chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clone();
933 for (funding_outpoint, update_ids) in persistences {
934 for update_id in update_ids {
935 nodes[0].chain_monitor.chain_monitor.channel_monitor_updated(funding_outpoint, update_id).unwrap();
940 expect_payment_sent!(nodes[0], payment_preimage);
944 fn chainsync_pauses_events() {
945 do_chainsync_pauses_events(false);
946 do_chainsync_pauses_events(true);
950 fn update_during_chainsync_fails_channel() {
951 let chanmon_cfgs = create_chanmon_cfgs(2);
952 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
953 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
954 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
955 create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
957 chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
958 chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::PermanentFailure));
960 connect_blocks(&nodes[0], 1);
961 // Before processing events, the ChannelManager will still think the Channel is open and
962 // there won't be any ChannelMonitorUpdates
963 assert_eq!(nodes[0].node.list_channels().len(), 1);
964 check_added_monitors!(nodes[0], 0);
965 // ... however once we get events once, the channel will close, creating a channel-closed
966 // ChannelMonitorUpdate.
967 check_closed_broadcast!(nodes[0], true);
968 check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Failed to persist ChannelMonitor update during chain sync".to_string() });
969 check_added_monitors!(nodes[0], 1);