1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 //! Logic to connect off-chain channel management with on-chain transaction monitoring.
12 //! [`ChainMonitor`] is an implementation of [`chain::Watch`] used both to process blocks and to
13 //! update [`ChannelMonitor`]s accordingly. If any on-chain events need further processing, it will
14 //! make those available as [`MonitorEvent`]s to be consumed.
16 //! [`ChainMonitor`] is parameterized by an optional chain source, which must implement the
17 //! [`chain::Filter`] trait. This provides a mechanism to signal new relevant outputs back to light
18 //! clients, such that transactions spending those outputs are included in block data.
20 //! [`ChainMonitor`] may be used directly to monitor channels locally or as a part of a distributed
21 //! setup to monitor channels remotely. In the latter case, a custom [`chain::Watch`] implementation
22 //! would be responsible for routing each update to a remote server and for retrieving monitor
23 //! events. The remote server would make use of [`ChainMonitor`] for block processing and for
24 //! servicing [`ChannelMonitor`] updates from the client.
26 use bitcoin::blockdata::block::{Block, BlockHeader};
27 use bitcoin::hash_types::Txid;
30 use chain::{ChannelMonitorUpdateErr, Filter, WatchedOutput};
31 use chain::chaininterface::{BroadcasterInterface, FeeEstimator};
32 use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, Balance, MonitorEvent, TransactionOutputs};
33 use chain::transaction::{OutPoint, TransactionData};
34 use chain::keysinterface::Sign;
35 use util::logger::Logger;
36 use util::errors::APIError;
38 use util::events::EventHandler;
39 use ln::channelmanager::ChannelDetails;
42 use sync::{RwLock, RwLockReadGuard, Mutex, MutexGuard};
45 #[derive(Clone, Copy, Hash, PartialEq, Eq)]
50 /// An opaque identifier describing a specific [`Persist`] method call.
51 #[derive(Clone, Copy, Hash, PartialEq, Eq)]
52 pub struct MonitorUpdateId {
53 contents: UpdateOrigin,
56 impl MonitorUpdateId {
57 pub(crate) fn from_monitor_update(update: &ChannelMonitorUpdate) -> Self {
58 Self { contents: UpdateOrigin::OffChain(update.update_id) }
60 pub(crate) fn from_new_monitor<ChannelSigner: Sign>(monitor: &ChannelMonitor<ChannelSigner>) -> Self {
61 Self { contents: UpdateOrigin::OffChain(monitor.get_latest_update_id()) }
65 /// `Persist` defines behavior for persisting channel monitors: this could mean
66 /// writing once to disk, and/or uploading to one or more backup services.
68 /// Each method can return three possible values:
69 /// * If persistence (including any relevant `fsync()` calls) happens immediately, the
70 /// implementation should return `Ok(())`, indicating normal channel operation should continue.
71 /// * If persistence happens asynchronously, implementations should first ensure the
72 /// [`ChannelMonitor`] or [`ChannelMonitorUpdate`] are written durably to disk, and then return
73 /// `Err(ChannelMonitorUpdateErr::TemporaryFailure)` while the update continues in the
74 /// background. Once the update completes, [`ChainMonitor::channel_monitor_updated`] should be
75 /// called with the corresponding [`MonitorUpdateId`].
77 /// Note that unlike the direct [`chain::Watch`] interface,
78 /// [`ChainMonitor::channel_monitor_updated`] must be called once for *each* update which occurs.
80 /// * If persistence fails for some reason, implementations should return
81 /// `Err(ChannelMonitorUpdateErr::PermanentFailure)`, in which case the channel will likely be
82 /// closed without broadcasting the latest state. See
83 /// [`ChannelMonitorUpdateErr::PermanentFailure`] for more details.
84 pub trait Persist<ChannelSigner: Sign> {
85 /// Persist a new channel's data. The data can be stored any way you want, but the identifier
86 /// provided by LDK is the channel's outpoint (and it is up to you to maintain a correct
87 /// mapping between the outpoint and the stored channel data). Note that you **must** persist
88 /// every new monitor to disk.
90 /// The `update_id` is used to identify this call to [`ChainMonitor::channel_monitor_updated`],
91 /// if you return [`ChannelMonitorUpdateErr::TemporaryFailure`].
93 /// See [`Writeable::write`] on [`ChannelMonitor`] for writing out a `ChannelMonitor`
94 /// and [`ChannelMonitorUpdateErr`] for requirements when returning errors.
96 /// [`Writeable::write`]: crate::util::ser::Writeable::write
97 fn persist_new_channel(&self, channel_id: OutPoint, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> Result<(), ChannelMonitorUpdateErr>;
99 /// Update one channel's data. The provided [`ChannelMonitor`] has already applied the given
102 /// Note that on every update, you **must** persist either the [`ChannelMonitorUpdate`] or the
103 /// updated monitor itself to disk/backups. See the [`Persist`] trait documentation for more
106 /// If an implementer chooses to persist the updates only, they need to make
107 /// sure that all the updates are applied to the `ChannelMonitors` *before*
108 /// the set of channel monitors is given to the `ChannelManager`
109 /// deserialization routine. See [`ChannelMonitor::update_monitor`] for
110 /// applying a monitor update to a monitor. If full `ChannelMonitors` are
111 /// persisted, then there is no need to persist individual updates.
113 /// Note that there could be a performance tradeoff between persisting complete
114 /// channel monitors on every update vs. persisting only updates and applying
115 /// them in batches. The size of each monitor grows `O(number of state updates)`
116 /// whereas updates are small and `O(1)`.
118 /// The `update_id` is used to identify this call to [`ChainMonitor::channel_monitor_updated`],
119 /// if you return [`ChannelMonitorUpdateErr::TemporaryFailure`].
121 /// See [`Writeable::write`] on [`ChannelMonitor`] for writing out a `ChannelMonitor`,
122 /// [`Writeable::write`] on [`ChannelMonitorUpdate`] for writing out an update, and
123 /// [`ChannelMonitorUpdateErr`] for requirements when returning errors.
125 /// [`Writeable::write`]: crate::util::ser::Writeable::write
126 fn update_persisted_channel(&self, channel_id: OutPoint, update: &ChannelMonitorUpdate, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> Result<(), ChannelMonitorUpdateErr>;
129 struct MonitorHolder<ChannelSigner: Sign> {
130 monitor: ChannelMonitor<ChannelSigner>,
131 /// The full set of pending monitor updates for this Channel.
133 /// Note that this lock must be held during updates to prevent a race where we call
134 /// update_persisted_channel, the user returns a TemporaryFailure, and then calls
135 /// channel_monitor_updated immediately, racing our insertion of the pending update into the
137 pending_monitor_updates: Mutex<Vec<MonitorUpdateId>>,
140 impl<ChannelSigner: Sign> MonitorHolder<ChannelSigner> {
141 fn has_pending_offchain_updates(&self, pending_monitor_updates_lock: &MutexGuard<Vec<MonitorUpdateId>>) -> bool {
142 pending_monitor_updates_lock.iter().any(|update_id|
143 if let UpdateOrigin::OffChain(_) = update_id.contents { true } else { false })
147 /// A read-only reference to a current ChannelMonitor.
149 /// Note that this holds a mutex in [`ChainMonitor`] and may block other events until it is
151 pub struct LockedChannelMonitor<'a, ChannelSigner: Sign> {
152 lock: RwLockReadGuard<'a, HashMap<OutPoint, MonitorHolder<ChannelSigner>>>,
153 funding_txo: OutPoint,
156 impl<ChannelSigner: Sign> Deref for LockedChannelMonitor<'_, ChannelSigner> {
157 type Target = ChannelMonitor<ChannelSigner>;
158 fn deref(&self) -> &ChannelMonitor<ChannelSigner> {
159 &self.lock.get(&self.funding_txo).expect("Checked at construction").monitor
163 /// An implementation of [`chain::Watch`] for monitoring channels.
165 /// Connected and disconnected blocks must be provided to `ChainMonitor` as documented by
166 /// [`chain::Watch`]. May be used in conjunction with [`ChannelManager`] to monitor channels locally
167 /// or used independently to monitor channels remotely. See the [module-level documentation] for
170 /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
171 /// [module-level documentation]: crate::chain::chainmonitor
172 pub struct ChainMonitor<ChannelSigner: Sign, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref>
173 where C::Target: chain::Filter,
174 T::Target: BroadcasterInterface,
175 F::Target: FeeEstimator,
177 P::Target: Persist<ChannelSigner>,
179 monitors: RwLock<HashMap<OutPoint, MonitorHolder<ChannelSigner>>>,
180 chain_source: Option<C>,
185 pending_monitor_events: Mutex<Vec<MonitorEvent>>,
188 impl<ChannelSigner: Sign, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref> ChainMonitor<ChannelSigner, C, T, F, L, P>
189 where C::Target: chain::Filter,
190 T::Target: BroadcasterInterface,
191 F::Target: FeeEstimator,
193 P::Target: Persist<ChannelSigner>,
195 /// Dispatches to per-channel monitors, which are responsible for updating their on-chain view
196 /// of a channel and reacting accordingly based on transactions in the given chain data. See
197 /// [`ChannelMonitor::block_connected`] for details. Any HTLCs that were resolved on chain will
198 /// be returned by [`chain::Watch::release_pending_monitor_events`].
200 /// Calls back to [`chain::Filter`] if any monitor indicated new outputs to watch. Subsequent
201 /// calls must not exclude any transactions matching the new outputs nor any in-block
202 /// descendants of such transactions. It is not necessary to re-fetch the block to obtain
203 /// updated `txdata`.
204 fn process_chain_data<FN>(&self, header: &BlockHeader, txdata: &TransactionData, process: FN)
206 FN: Fn(&ChannelMonitor<ChannelSigner>, &TransactionData) -> Vec<TransactionOutputs>
208 let mut dependent_txdata = Vec::new();
209 let monitor_states = self.monitors.read().unwrap();
210 for monitor_state in monitor_states.values() {
211 let mut txn_outputs = process(&monitor_state.monitor, txdata);
213 // Register any new outputs with the chain source for filtering, storing any dependent
214 // transactions from within the block that previously had not been included in txdata.
215 if let Some(ref chain_source) = self.chain_source {
216 let block_hash = header.block_hash();
217 for (txid, mut outputs) in txn_outputs.drain(..) {
218 for (idx, output) in outputs.drain(..) {
219 // Register any new outputs with the chain source for filtering and recurse
220 // if it indicates that there are dependent transactions within the block
221 // that had not been previously included in txdata.
222 let output = WatchedOutput {
223 block_hash: Some(block_hash),
224 outpoint: OutPoint { txid, index: idx as u16 },
225 script_pubkey: output.script_pubkey,
227 if let Some(tx) = chain_source.register_output(output) {
228 dependent_txdata.push(tx);
235 // Recursively call for any dependent transactions that were identified by the chain source.
236 if !dependent_txdata.is_empty() {
237 dependent_txdata.sort_unstable_by_key(|(index, _tx)| *index);
238 dependent_txdata.dedup_by_key(|(index, _tx)| *index);
239 let txdata: Vec<_> = dependent_txdata.iter().map(|(index, tx)| (*index, tx)).collect();
240 self.process_chain_data(header, &txdata, process);
244 /// Creates a new `ChainMonitor` used to watch on-chain activity pertaining to channels.
246 /// When an optional chain source implementing [`chain::Filter`] is provided, the chain monitor
247 /// will call back to it indicating transactions and outputs of interest. This allows clients to
248 /// pre-filter blocks or only fetch blocks matching a compact filter. Otherwise, clients may
249 /// always need to fetch full blocks absent another means for determining which blocks contain
250 /// transactions relevant to the watched channels.
251 pub fn new(chain_source: Option<C>, broadcaster: T, logger: L, feeest: F, persister: P) -> Self {
253 monitors: RwLock::new(HashMap::new()),
257 fee_estimator: feeest,
259 pending_monitor_events: Mutex::new(Vec::new()),
263 /// Gets the balances in the contained [`ChannelMonitor`]s which are claimable on-chain or
264 /// claims which are awaiting confirmation.
266 /// Includes the balances from each [`ChannelMonitor`] *except* those included in
267 /// `ignored_channels`, allowing you to filter out balances from channels which are still open
268 /// (and whose balance should likely be pulled from the [`ChannelDetails`]).
270 /// See [`ChannelMonitor::get_claimable_balances`] for more details on the exact criteria for
271 /// inclusion in the return value.
272 pub fn get_claimable_balances(&self, ignored_channels: &[&ChannelDetails]) -> Vec<Balance> {
273 let mut ret = Vec::new();
274 let monitor_states = self.monitors.read().unwrap();
275 for (_, monitor_state) in monitor_states.iter().filter(|(funding_outpoint, _)| {
276 for chan in ignored_channels {
277 if chan.funding_txo.as_ref() == Some(funding_outpoint) {
283 ret.append(&mut monitor_state.monitor.get_claimable_balances());
288 /// Gets the [`LockedChannelMonitor`] for a given funding outpoint, returning an `Err` if no
289 /// such [`ChannelMonitor`] is currently being monitored for.
291 /// Note that the result holds a mutex over our monitor set, and should not be held
293 pub fn get_monitor(&self, funding_txo: OutPoint) -> Result<LockedChannelMonitor<'_, ChannelSigner>, ()> {
294 let lock = self.monitors.read().unwrap();
295 if lock.get(&funding_txo).is_some() {
296 Ok(LockedChannelMonitor { lock, funding_txo })
302 /// Lists the funding outpoint of each [`ChannelMonitor`] being monitored.
304 /// Note that [`ChannelMonitor`]s are not removed when a channel is closed as they are always
305 /// monitoring for on-chain state resolutions.
306 pub fn list_monitors(&self) -> Vec<OutPoint> {
307 self.monitors.read().unwrap().keys().map(|outpoint| *outpoint).collect()
311 pub fn remove_monitor(&self, funding_txo: &OutPoint) -> ChannelMonitor<ChannelSigner> {
312 self.monitors.write().unwrap().remove(funding_txo).unwrap().monitor
315 /// Indicates the persistence of a [`ChannelMonitor`] has completed after
316 /// [`ChannelMonitorUpdateErr::TemporaryFailure`] was returned from an update operation.
318 /// Thus, the anticipated use is, at a high level:
319 /// 1) This [`ChainMonitor`] calls [`Persist::update_persisted_channel`] which stores the
320 /// update to disk and begins updating any remote (e.g. watchtower/backup) copies,
321 /// returning [`ChannelMonitorUpdateErr::TemporaryFailure`],
322 /// 2) once all remote copies are updated, you call this function with the
323 /// `completed_update_id` that completed, and once all pending updates have completed the
324 /// channel will be re-enabled.
325 // Note that we re-enable only after `UpdateOrigin::OffChain` updates complete, we don't
326 // care about `UpdateOrigin::ChainSync` updates for the channel state being updated. We
327 // only care about `UpdateOrigin::ChainSync` for returning `MonitorEvent`s.
329 /// Returns an [`APIError::APIMisuseError`] if `funding_txo` does not match any currently
330 /// registered [`ChannelMonitor`]s.
331 pub fn channel_monitor_updated(&self, funding_txo: OutPoint, completed_update_id: MonitorUpdateId) -> Result<(), APIError> {
332 let monitors = self.monitors.read().unwrap();
333 let monitor_data = if let Some(mon) = monitors.get(&funding_txo) { mon } else {
334 return Err(APIError::APIMisuseError { err: format!("No ChannelMonitor matching funding outpoint {:?} found", funding_txo) });
336 let mut pending_monitor_updates = monitor_data.pending_monitor_updates.lock().unwrap();
337 pending_monitor_updates.retain(|update_id| *update_id != completed_update_id);
339 match completed_update_id {
340 MonitorUpdateId { .. } => {
341 // Note that we only check for `UpdateOrigin::OffChain` failures here - if
342 // we're being told that a `UpdateOrigin::OffChain` monitor update completed,
343 // we only care about ensuring we don't tell the `ChannelManager` to restore
344 // the channel to normal operation until all `UpdateOrigin::OffChain` updates
346 // If there's some `UpdateOrigin::ChainSync` update still pending that's okay
347 // - we can still update our channel state, just as long as we don't return
348 // `MonitorEvent`s from the monitor back to the `ChannelManager` until they
350 let monitor_is_pending_updates = monitor_data.has_pending_offchain_updates(&pending_monitor_updates);
351 if monitor_is_pending_updates {
352 // If there are still monitor updates pending, we cannot yet construct an
353 // UpdateCompleted event.
356 self.pending_monitor_events.lock().unwrap().push(MonitorEvent::UpdateCompleted {
358 monitor_update_id: monitor_data.monitor.get_latest_update_id(),
365 /// This wrapper avoids having to update some of our tests for now as they assume the direct
366 /// chain::Watch API wherein we mark a monitor fully-updated by just calling
367 /// channel_monitor_updated once with the highest ID.
368 #[cfg(any(test, feature = "fuzztarget"))]
369 pub fn force_channel_monitor_updated(&self, funding_txo: OutPoint, monitor_update_id: u64) {
370 self.pending_monitor_events.lock().unwrap().push(MonitorEvent::UpdateCompleted {
376 #[cfg(any(test, feature = "fuzztarget", feature = "_test_utils"))]
377 pub fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
378 use util::events::EventsProvider;
379 let events = core::cell::RefCell::new(Vec::new());
380 let event_handler = |event: &events::Event| events.borrow_mut().push(event.clone());
381 self.process_pending_events(&event_handler);
386 impl<ChannelSigner: Sign, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref>
387 chain::Listen for ChainMonitor<ChannelSigner, C, T, F, L, P>
389 C::Target: chain::Filter,
390 T::Target: BroadcasterInterface,
391 F::Target: FeeEstimator,
393 P::Target: Persist<ChannelSigner>,
395 fn block_connected(&self, block: &Block, height: u32) {
396 let header = &block.header;
397 let txdata: Vec<_> = block.txdata.iter().enumerate().collect();
398 log_debug!(self.logger, "New best block {} at height {} provided via block_connected", header.block_hash(), height);
399 self.process_chain_data(header, &txdata, |monitor, txdata| {
400 monitor.block_connected(
401 header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger)
405 fn block_disconnected(&self, header: &BlockHeader, height: u32) {
406 let monitor_states = self.monitors.read().unwrap();
407 log_debug!(self.logger, "Latest block {} at height {} removed via block_disconnected", header.block_hash(), height);
408 for monitor_state in monitor_states.values() {
409 monitor_state.monitor.block_disconnected(
410 header, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger);
415 impl<ChannelSigner: Sign, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref>
416 chain::Confirm for ChainMonitor<ChannelSigner, C, T, F, L, P>
418 C::Target: chain::Filter,
419 T::Target: BroadcasterInterface,
420 F::Target: FeeEstimator,
422 P::Target: Persist<ChannelSigner>,
424 fn transactions_confirmed(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
425 log_debug!(self.logger, "{} provided transactions confirmed at height {} in block {}", txdata.len(), height, header.block_hash());
426 self.process_chain_data(header, txdata, |monitor, txdata| {
427 monitor.transactions_confirmed(
428 header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger)
432 fn transaction_unconfirmed(&self, txid: &Txid) {
433 log_debug!(self.logger, "Transaction {} reorganized out of chain", txid);
434 let monitor_states = self.monitors.read().unwrap();
435 for monitor_state in monitor_states.values() {
436 monitor_state.monitor.transaction_unconfirmed(txid, &*self.broadcaster, &*self.fee_estimator, &*self.logger);
440 fn best_block_updated(&self, header: &BlockHeader, height: u32) {
441 log_debug!(self.logger, "New best block {} at height {} provided via best_block_updated", header.block_hash(), height);
442 self.process_chain_data(header, &[], |monitor, txdata| {
443 // While in practice there shouldn't be any recursive calls when given empty txdata,
444 // it's still possible if a chain::Filter implementation returns a transaction.
445 debug_assert!(txdata.is_empty());
446 monitor.best_block_updated(
447 header, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger)
451 fn get_relevant_txids(&self) -> Vec<Txid> {
452 let mut txids = Vec::new();
453 let monitor_states = self.monitors.read().unwrap();
454 for monitor_state in monitor_states.values() {
455 txids.append(&mut monitor_state.monitor.get_relevant_txids());
458 txids.sort_unstable();
464 impl<ChannelSigner: Sign, C: Deref , T: Deref , F: Deref , L: Deref , P: Deref >
465 chain::Watch<ChannelSigner> for ChainMonitor<ChannelSigner, C, T, F, L, P>
466 where C::Target: chain::Filter,
467 T::Target: BroadcasterInterface,
468 F::Target: FeeEstimator,
470 P::Target: Persist<ChannelSigner>,
472 /// Adds the monitor that watches the channel referred to by the given outpoint.
474 /// Calls back to [`chain::Filter`] with the funding transaction and outputs to watch.
476 /// Note that we persist the given `ChannelMonitor` while holding the `ChainMonitor`
478 fn watch_channel(&self, funding_outpoint: OutPoint, monitor: ChannelMonitor<ChannelSigner>) -> Result<(), ChannelMonitorUpdateErr> {
479 let mut monitors = self.monitors.write().unwrap();
480 let entry = match monitors.entry(funding_outpoint) {
481 hash_map::Entry::Occupied(_) => {
482 log_error!(self.logger, "Failed to add new channel data: channel monitor for given outpoint is already present");
483 return Err(ChannelMonitorUpdateErr::PermanentFailure)},
484 hash_map::Entry::Vacant(e) => e,
486 let update_id = MonitorUpdateId::from_new_monitor(&monitor);
487 let mut pending_monitor_updates = Vec::new();
488 let persist_res = self.persister.persist_new_channel(funding_outpoint, &monitor, update_id);
489 if persist_res.is_err() {
490 log_error!(self.logger, "Failed to persist new channel data: {:?}", persist_res);
492 if persist_res == Err(ChannelMonitorUpdateErr::PermanentFailure) {
494 } else if persist_res.is_err() {
495 pending_monitor_updates.push(update_id);
498 let funding_txo = monitor.get_funding_txo();
499 log_trace!(self.logger, "Got new Channel Monitor for channel {}", log_bytes!(funding_txo.0.to_channel_id()[..]));
501 if let Some(ref chain_source) = self.chain_source {
502 monitor.load_outputs_to_watch(chain_source);
505 entry.insert(MonitorHolder { monitor, pending_monitor_updates: Mutex::new(pending_monitor_updates) });
509 /// Note that we persist the given `ChannelMonitor` update while holding the
510 /// `ChainMonitor` monitors lock.
511 fn update_channel(&self, funding_txo: OutPoint, update: ChannelMonitorUpdate) -> Result<(), ChannelMonitorUpdateErr> {
512 // Update the monitor that watches the channel referred to by the given outpoint.
513 let monitors = self.monitors.read().unwrap();
514 match monitors.get(&funding_txo) {
516 log_error!(self.logger, "Failed to update channel monitor: no such monitor registered");
518 // We should never ever trigger this from within ChannelManager. Technically a
519 // user could use this object with some proxying in between which makes this
520 // possible, but in tests and fuzzing, this should be a panic.
521 #[cfg(any(test, feature = "fuzztarget"))]
522 panic!("ChannelManager generated a channel update for a channel that was not yet registered!");
523 #[cfg(not(any(test, feature = "fuzztarget")))]
524 Err(ChannelMonitorUpdateErr::PermanentFailure)
526 Some(monitor_state) => {
527 let monitor = &monitor_state.monitor;
528 log_trace!(self.logger, "Updating Channel Monitor for channel {}", log_funding_info!(monitor));
529 let update_res = monitor.update_monitor(&update, &self.broadcaster, &self.fee_estimator, &self.logger);
530 if let Err(e) = &update_res {
531 log_error!(self.logger, "Failed to update channel monitor: {:?}", e);
533 // Even if updating the monitor returns an error, the monitor's state will
534 // still be changed. So, persist the updated monitor despite the error.
535 let update_id = MonitorUpdateId::from_monitor_update(&update);
536 let mut pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap();
537 let persist_res = self.persister.update_persisted_channel(funding_txo, &update, monitor, update_id);
538 if let Err(e) = persist_res {
539 if e == ChannelMonitorUpdateErr::TemporaryFailure {
540 pending_monitor_updates.push(update_id);
542 log_error!(self.logger, "Failed to persist channel monitor update: {:?}", e);
544 if update_res.is_err() {
545 Err(ChannelMonitorUpdateErr::PermanentFailure)
553 fn release_pending_monitor_events(&self) -> Vec<MonitorEvent> {
554 let mut pending_monitor_events = self.pending_monitor_events.lock().unwrap().split_off(0);
555 for monitor_state in self.monitors.read().unwrap().values() {
556 pending_monitor_events.append(&mut monitor_state.monitor.get_and_clear_pending_monitor_events());
558 pending_monitor_events
562 impl<ChannelSigner: Sign, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref> events::EventsProvider for ChainMonitor<ChannelSigner, C, T, F, L, P>
563 where C::Target: chain::Filter,
564 T::Target: BroadcasterInterface,
565 F::Target: FeeEstimator,
567 P::Target: Persist<ChannelSigner>,
569 /// Processes [`SpendableOutputs`] events produced from each [`ChannelMonitor`] upon maturity.
571 /// An [`EventHandler`] may safely call back to the provider, though this shouldn't be needed in
572 /// order to handle these events.
574 /// [`SpendableOutputs`]: events::Event::SpendableOutputs
575 fn process_pending_events<H: Deref>(&self, handler: H) where H::Target: EventHandler {
576 let mut pending_events = Vec::new();
577 for monitor_state in self.monitors.read().unwrap().values() {
578 pending_events.append(&mut monitor_state.monitor.get_and_clear_pending_events());
580 for event in pending_events.drain(..) {
581 handler.handle_event(&event);
588 use ::{check_added_monitors, get_local_commitment_txn};
589 use ln::features::InitFeatures;
590 use ln::functional_test_utils::*;
591 use util::events::MessageSendEventsProvider;
592 use util::test_utils::{OnRegisterOutput, TxOutReference};
594 /// Tests that in-block dependent transactions are processed by `block_connected` when not
595 /// included in `txdata` but returned by [`chain::Filter::register_output`]. For instance,
596 /// a (non-anchor) commitment transaction's HTLC output may be spent in the same block as the
597 /// commitment transaction itself. An Electrum client may filter the commitment transaction but
598 /// needs to return the HTLC transaction so it can be processed.
600 fn connect_block_checks_dependent_transactions() {
601 let chanmon_cfgs = create_chanmon_cfgs(2);
602 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
603 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
604 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
605 let channel = create_announced_chan_between_nodes(
606 &nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
608 // Send a payment, saving nodes[0]'s revoked commitment and HTLC-Timeout transactions.
609 let (commitment_tx, htlc_tx) = {
610 let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 5_000_000).0;
611 let mut txn = get_local_commitment_txn!(nodes[0], channel.2);
612 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
614 assert_eq!(txn.len(), 2);
615 (txn.remove(0), txn.remove(0))
618 // Set expectations on nodes[1]'s chain source to return dependent transactions.
619 let htlc_output = TxOutReference(commitment_tx.clone(), 0);
620 let to_local_output = TxOutReference(commitment_tx.clone(), 1);
621 let htlc_timeout_output = TxOutReference(htlc_tx.clone(), 0);
622 nodes[1].chain_source
623 .expect(OnRegisterOutput { with: htlc_output, returns: Some((1, htlc_tx)) })
624 .expect(OnRegisterOutput { with: to_local_output, returns: None })
625 .expect(OnRegisterOutput { with: htlc_timeout_output, returns: None });
627 // Notify nodes[1] that nodes[0]'s revoked commitment transaction was mined. The chain
628 // source should return the dependent HTLC transaction when the HTLC output is registered.
629 mine_transaction(&nodes[1], &commitment_tx);
631 // Clean up so uninteresting assertions don't fail.
632 check_added_monitors!(nodes[1], 1);
633 nodes[1].node.get_and_clear_pending_msg_events();
634 nodes[1].node.get_and_clear_pending_events();