X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fchain%2Fchainmonitor.rs;h=58bf5f8002d27cc7ab092fc8cc288a8e52377acf;hb=a9dcfaf952584ed835d733cb4688d5f96e86349d;hp=0b7e13f24b0218235cf690bb8652322d7f7db520;hpb=f07f4b90f8de76d594328e11e36d094cdb936097;p=rust-lightning diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index 0b7e13f2..58bf5f80 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -29,64 +29,23 @@ use bitcoin::hash_types::{Txid, BlockHash}; use crate::chain; use crate::chain::{ChannelMonitorUpdateStatus, Filter, WatchedOutput}; use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator}; -use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, Balance, MonitorEvent, TransactionOutputs, LATENCY_GRACE_PERIOD_BLOCKS}; +use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, Balance, MonitorEvent, TransactionOutputs, WithChannelMonitor}; use crate::chain::transaction::{OutPoint, TransactionData}; -use crate::sign::ecdsa::WriteableEcdsaChannelSigner; +use crate::ln::types::ChannelId; +use crate::sign::ecdsa::EcdsaChannelSigner; use crate::events; use crate::events::{Event, EventHandler}; -use crate::util::atomic_counter::AtomicCounter; -use crate::util::logger::Logger; +use crate::util::logger::{Logger, WithContext}; use crate::util::errors::APIError; use crate::util::wakers::{Future, Notifier}; -use crate::ln::channelmanager::ChannelDetails; +use crate::ln::channel_state::ChannelDetails; use crate::prelude::*; use crate::sync::{RwLock, RwLockReadGuard, Mutex, MutexGuard}; -use core::iter::FromIterator; use core::ops::Deref; use core::sync::atomic::{AtomicUsize, Ordering}; use bitcoin::secp256k1::PublicKey; -mod update_origin { - #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)] - /// A specific update's ID stored in a `MonitorUpdateId`, separated out to make the contents - /// entirely opaque. - pub(crate) enum UpdateOrigin { - /// An update that was generated by the `ChannelManager` (via our [`crate::chain::Watch`] - /// implementation). This corresponds to an actual [ChannelMonitorUpdate::update_id] field - /// and [ChannelMonitor::get_latest_update_id]. - /// - /// [ChannelMonitor::get_latest_update_id]: crate::chain::channelmonitor::ChannelMonitor::get_latest_update_id - /// [ChannelMonitorUpdate::update_id]: crate::chain::channelmonitor::ChannelMonitorUpdate::update_id - OffChain(u64), - /// An update that was generated during blockchain processing. The ID here is specific to the - /// generating [ChannelMonitor] and does *not* correspond to any on-disk IDs. - /// - /// [ChannelMonitor]: crate::chain::channelmonitor::ChannelMonitor - ChainSync(u64), - } -} - -#[cfg(any(feature = "_test_utils", test))] -pub(crate) use update_origin::UpdateOrigin; -#[cfg(not(any(feature = "_test_utils", test)))] -use update_origin::UpdateOrigin; - -/// An opaque identifier describing a specific [`Persist`] method call. -#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)] -pub struct MonitorUpdateId { - pub(crate) contents: UpdateOrigin, -} - -impl MonitorUpdateId { - pub(crate) fn from_monitor_update(update: &ChannelMonitorUpdate) -> Self { - Self { contents: UpdateOrigin::OffChain(update.update_id) } - } - pub(crate) fn from_new_monitor(monitor: &ChannelMonitor) -> Self { - Self { contents: UpdateOrigin::OffChain(monitor.get_latest_update_id()) } - } -} - /// `Persist` defines behavior for persisting channel monitors: this could mean /// writing once to disk, and/or uploading to one or more backup services. /// @@ -119,7 +78,7 @@ impl MonitorUpdateId { /// All calls should generally spawn a background task and immediately return /// [`ChannelMonitorUpdateStatus::InProgress`]. Once the update completes, /// [`ChainMonitor::channel_monitor_updated`] should be called with the corresponding -/// [`MonitorUpdateId`]. +/// [`ChannelMonitor::get_latest_update_id`] or [`ChannelMonitorUpdate::update_id`]. /// /// Note that unlike the direct [`chain::Watch`] interface, /// [`ChainMonitor::channel_monitor_updated`] must be called once for *each* update which occurs. @@ -142,7 +101,7 @@ impl MonitorUpdateId { /// /// [`TrustedCommitmentTransaction::revokeable_output_index`]: crate::ln::chan_utils::TrustedCommitmentTransaction::revokeable_output_index /// [`TrustedCommitmentTransaction::build_to_local_justice_tx`]: crate::ln::chan_utils::TrustedCommitmentTransaction::build_to_local_justice_tx -pub trait Persist { +pub trait Persist { /// Persist a new channel's data in response to a [`chain::Watch::watch_channel`] call. This is /// called by [`ChannelManager`] for new channels, or may be called directly, e.g. on startup. /// @@ -150,15 +109,16 @@ pub trait Persist { /// channel's outpoint (and it is up to you to maintain a correct mapping between the outpoint /// and the stored channel data). Note that you **must** persist every new monitor to disk. /// - /// The `update_id` is used to identify this call to [`ChainMonitor::channel_monitor_updated`], - /// if you return [`ChannelMonitorUpdateStatus::InProgress`]. + /// The [`ChannelMonitor::get_latest_update_id`] uniquely links this call to [`ChainMonitor::channel_monitor_updated`]. + /// For [`Persist::persist_new_channel`], it is only necessary to call [`ChainMonitor::channel_monitor_updated`] + /// when you return [`ChannelMonitorUpdateStatus::InProgress`]. /// /// See [`Writeable::write`] on [`ChannelMonitor`] for writing out a `ChannelMonitor` /// and [`ChannelMonitorUpdateStatus`] for requirements when returning errors. /// /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager /// [`Writeable::write`]: crate::util::ser::Writeable::write - fn persist_new_channel(&self, channel_id: OutPoint, data: &ChannelMonitor, update_id: MonitorUpdateId) -> ChannelMonitorUpdateStatus; + fn persist_new_channel(&self, channel_funding_outpoint: OutPoint, monitor: &ChannelMonitor) -> ChannelMonitorUpdateStatus; /// Update one channel's data. The provided [`ChannelMonitor`] has already applied the given /// update. @@ -185,18 +145,25 @@ pub trait Persist { /// them in batches. The size of each monitor grows `O(number of state updates)` /// whereas updates are small and `O(1)`. /// - /// The `update_id` is used to identify this call to [`ChainMonitor::channel_monitor_updated`], - /// if you return [`ChannelMonitorUpdateStatus::InProgress`]. + /// The [`ChannelMonitorUpdate::update_id`] or [`ChannelMonitor::get_latest_update_id`] uniquely + /// links this call to [`ChainMonitor::channel_monitor_updated`]. + /// For [`Persist::update_persisted_channel`], it is only necessary to call [`ChainMonitor::channel_monitor_updated`] + /// when a [`ChannelMonitorUpdate`] is provided and when you return [`ChannelMonitorUpdateStatus::InProgress`]. /// /// See [`Writeable::write`] on [`ChannelMonitor`] for writing out a `ChannelMonitor`, /// [`Writeable::write`] on [`ChannelMonitorUpdate`] for writing out an update, and /// [`ChannelMonitorUpdateStatus`] for requirements when returning errors. /// /// [`Writeable::write`]: crate::util::ser::Writeable::write - fn update_persisted_channel(&self, channel_id: OutPoint, update: Option<&ChannelMonitorUpdate>, data: &ChannelMonitor, update_id: MonitorUpdateId) -> ChannelMonitorUpdateStatus; + fn update_persisted_channel(&self, channel_funding_outpoint: OutPoint, monitor_update: Option<&ChannelMonitorUpdate>, monitor: &ChannelMonitor) -> ChannelMonitorUpdateStatus; + /// Prevents the channel monitor from being loaded on startup. + /// + /// Archiving the data in a backup location (rather than deleting it fully) is useful for + /// hedging against data loss in case of unexpected failure. + fn archive_persisted_channel(&self, channel_funding_outpoint: OutPoint); } -struct MonitorHolder { +struct MonitorHolder { monitor: ChannelMonitor, /// The full set of pending monitor updates for this Channel. /// @@ -204,35 +171,12 @@ struct MonitorHolder { /// update_persisted_channel, the user returns a /// [`ChannelMonitorUpdateStatus::InProgress`], and then calls channel_monitor_updated /// immediately, racing our insertion of the pending update into the contained Vec. - /// - /// Beyond the synchronization of updates themselves, we cannot handle user events until after - /// any chain updates have been stored on disk. Thus, we scan this list when returning updates - /// to the ChannelManager, refusing to return any updates for a ChannelMonitor which is still - /// being persisted fully to disk after a chain update. - /// - /// This avoids the possibility of handling, e.g. an on-chain claim, generating a claim monitor - /// event, resulting in the relevant ChannelManager generating a PaymentSent event and dropping - /// the pending payment entry, and then reloading before the monitor is persisted, resulting in - /// the ChannelManager re-adding the same payment entry, before the same block is replayed, - /// resulting in a duplicate PaymentSent event. - pending_monitor_updates: Mutex>, - /// The last block height at which no [`UpdateOrigin::ChainSync`] monitor updates were present - /// in `pending_monitor_updates`. - /// If it's been more than [`LATENCY_GRACE_PERIOD_BLOCKS`] since we started waiting on a chain - /// sync event, we let monitor events return to `ChannelManager` because we cannot hold them up - /// forever or we'll end up with HTLC preimages waiting to feed back into an upstream channel - /// forever, risking funds loss. - last_chain_persist_height: AtomicUsize, + pending_monitor_updates: Mutex>, } -impl MonitorHolder { - fn has_pending_offchain_updates(&self, pending_monitor_updates_lock: &MutexGuard>) -> bool { - pending_monitor_updates_lock.iter().any(|update_id| - if let UpdateOrigin::OffChain(_) = update_id.contents { true } else { false }) - } - fn has_pending_chainsync_updates(&self, pending_monitor_updates_lock: &MutexGuard>) -> bool { - pending_monitor_updates_lock.iter().any(|update_id| - if let UpdateOrigin::ChainSync(_) = update_id.contents { true } else { false }) +impl MonitorHolder { + fn has_pending_updates(&self, pending_monitor_updates_lock: &MutexGuard>) -> bool { + !pending_monitor_updates_lock.is_empty() } } @@ -240,12 +184,12 @@ impl MonitorHolder { /// /// Note that this holds a mutex in [`ChainMonitor`] and may block other events until it is /// released. -pub struct LockedChannelMonitor<'a, ChannelSigner: WriteableEcdsaChannelSigner> { +pub struct LockedChannelMonitor<'a, ChannelSigner: EcdsaChannelSigner> { lock: RwLockReadGuard<'a, HashMap>>, funding_txo: OutPoint, } -impl Deref for LockedChannelMonitor<'_, ChannelSigner> { +impl Deref for LockedChannelMonitor<'_, ChannelSigner> { type Target = ChannelMonitor; fn deref(&self) -> &ChannelMonitor { &self.lock.get(&self.funding_txo).expect("Checked at construction").monitor @@ -268,7 +212,7 @@ impl Deref for LockedChannelMonitor< /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager /// [module-level documentation]: crate::chain::chainmonitor /// [`rebroadcast_pending_claims`]: Self::rebroadcast_pending_claims -pub struct ChainMonitor +pub struct ChainMonitor where C::Target: chain::Filter, T::Target: BroadcasterInterface, F::Target: FeeEstimator, @@ -276,10 +220,6 @@ pub struct ChainMonitor, { monitors: RwLock>>, - /// When we generate a [`MonitorUpdateId`] for a chain-event monitor persistence, we need a - /// unique ID, which we calculate by simply getting the next value from this counter. Note that - /// the ID is never persisted so it's ok that they reset on restart. - sync_persistence_id: AtomicCounter, chain_source: Option, broadcaster: T, logger: L, @@ -287,14 +227,16 @@ pub struct ChainMonitor, Option)>>, + pending_monitor_events: Mutex, Option)>>, /// The best block height seen, used as a proxy for the passage of time. highest_chain_height: AtomicUsize, + /// A [`Notifier`] used to wake up the background processor in case we have any [`Event`]s for + /// it to give to users (or [`MonitorEvent`]s for `ChannelManager` to process). event_notifier: Notifier, } -impl ChainMonitor +impl ChainMonitor where C::Target: chain::Filter, T::Target: BroadcasterInterface, F::Target: FeeEstimator, @@ -317,11 +259,11 @@ where C::Target: chain::Filter, FN: Fn(&ChannelMonitor, &TransactionData) -> Vec { let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down."; - let funding_outpoints: HashSet = HashSet::from_iter(self.monitors.read().unwrap().keys().cloned()); + let funding_outpoints = hash_set_from_iter(self.monitors.read().unwrap().keys().cloned()); for funding_outpoint in funding_outpoints.iter() { let monitor_lock = self.monitors.read().unwrap(); if let Some(monitor_state) = monitor_lock.get(funding_outpoint) { - if self.update_monitor_with_chain_data(header, best_height, txdata, &process, funding_outpoint, &monitor_state).is_err() { + if self.update_monitor_with_chain_data(header, txdata, &process, funding_outpoint, &monitor_state).is_err() { // Take the monitors lock for writing so that we poison it and any future // operations going forward fail immediately. core::mem::drop(monitor_lock); @@ -336,7 +278,7 @@ where C::Target: chain::Filter, let monitor_states = self.monitors.write().unwrap(); for (funding_outpoint, monitor_state) in monitor_states.iter() { if !funding_outpoints.contains(funding_outpoint) { - if self.update_monitor_with_chain_data(header, best_height, txdata, &process, funding_outpoint, &monitor_state).is_err() { + if self.update_monitor_with_chain_data(header, txdata, &process, funding_outpoint, &monitor_state).is_err() { log_error!(self.logger, "{}", err_str); panic!("{}", err_str); } @@ -355,33 +297,22 @@ where C::Target: chain::Filter, } fn update_monitor_with_chain_data( - &self, header: &Header, best_height: Option, txdata: &TransactionData, - process: FN, funding_outpoint: &OutPoint, monitor_state: &MonitorHolder + &self, header: &Header, txdata: &TransactionData, process: FN, funding_outpoint: &OutPoint, + monitor_state: &MonitorHolder ) -> Result<(), ()> where FN: Fn(&ChannelMonitor, &TransactionData) -> Vec { let monitor = &monitor_state.monitor; + let logger = WithChannelMonitor::from(&self.logger, &monitor, None); let mut txn_outputs; { txn_outputs = process(monitor, txdata); - let update_id = MonitorUpdateId { - contents: UpdateOrigin::ChainSync(self.sync_persistence_id.get_increment()), - }; - let mut pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap(); - if let Some(height) = best_height { - if !monitor_state.has_pending_chainsync_updates(&pending_monitor_updates) { - // If there are not ChainSync persists awaiting completion, go ahead and - // set last_chain_persist_height here - we wouldn't want the first - // InProgress to always immediately be considered "overly delayed". - monitor_state.last_chain_persist_height.store(height as usize, Ordering::Release); - } - } - - log_trace!(self.logger, "Syncing Channel Monitor for channel {}", log_funding_info!(monitor)); - match self.persister.update_persisted_channel(*funding_outpoint, None, monitor, update_id) { + log_trace!(logger, "Syncing Channel Monitor for channel {}", log_funding_info!(monitor)); + match self.persister.update_persisted_channel(*funding_outpoint, None, monitor) { ChannelMonitorUpdateStatus::Completed => - log_trace!(self.logger, "Finished syncing Channel Monitor for channel {}", log_funding_info!(monitor)), + log_trace!(logger, "Finished syncing Channel Monitor for channel {} for block-data", + log_funding_info!(monitor) + ), ChannelMonitorUpdateStatus::InProgress => { - log_debug!(self.logger, "Channel Monitor sync for channel {} in progress, holding events until completion!", log_funding_info!(monitor)); - pending_monitor_updates.push(update_id); + log_trace!(logger, "Channel Monitor sync for channel {} in progress.", log_funding_info!(monitor)); }, ChannelMonitorUpdateStatus::UnrecoverableError => { return Err(()); @@ -401,7 +332,8 @@ where C::Target: chain::Filter, outpoint: OutPoint { txid, index: idx as u16 }, script_pubkey: output.script_pubkey, }; - chain_source.register_output(output) + log_trace!(logger, "Adding monitoring for spends of outpoint {} to the filter", output.outpoint); + chain_source.register_output(output); } } } @@ -417,8 +349,7 @@ where C::Target: chain::Filter, /// transactions relevant to the watched channels. pub fn new(chain_source: Option, broadcaster: T, logger: L, feeest: F, persister: P) -> Self { Self { - monitors: RwLock::new(HashMap::new()), - sync_persistence_id: AtomicCounter::new(), + monitors: RwLock::new(new_hash_map()), chain_source, broadcaster, logger, @@ -469,25 +400,34 @@ where C::Target: chain::Filter, } } - /// Lists the funding outpoint of each [`ChannelMonitor`] being monitored. + /// Lists the funding outpoint and channel ID of each [`ChannelMonitor`] being monitored. /// /// Note that [`ChannelMonitor`]s are not removed when a channel is closed as they are always /// monitoring for on-chain state resolutions. - pub fn list_monitors(&self) -> Vec { - self.monitors.read().unwrap().keys().map(|outpoint| *outpoint).collect() + pub fn list_monitors(&self) -> Vec<(OutPoint, ChannelId)> { + self.monitors.read().unwrap().iter().map(|(outpoint, monitor_holder)| { + let channel_id = monitor_holder.monitor.channel_id(); + (*outpoint, channel_id) + }).collect() } #[cfg(not(c_bindings))] /// Lists the pending updates for each [`ChannelMonitor`] (by `OutPoint` being monitored). - pub fn list_pending_monitor_updates(&self) -> HashMap> { - self.monitors.read().unwrap().iter().map(|(outpoint, holder)| { + /// Each `Vec` contains `update_id`s from [`ChannelMonitor::get_latest_update_id`] for updates + /// that have not yet been fully persisted. Note that if a full monitor is persisted all the pending + /// monitor updates must be individually marked completed by calling [`ChainMonitor::channel_monitor_updated`]. + pub fn list_pending_monitor_updates(&self) -> HashMap> { + hash_map_from_iter(self.monitors.read().unwrap().iter().map(|(outpoint, holder)| { (*outpoint, holder.pending_monitor_updates.lock().unwrap().clone()) - }).collect() + })) } #[cfg(c_bindings)] /// Lists the pending updates for each [`ChannelMonitor`] (by `OutPoint` being monitored). - pub fn list_pending_monitor_updates(&self) -> Vec<(OutPoint, Vec)> { + /// Each `Vec` contains `update_id`s from [`ChannelMonitor::get_latest_update_id`] for updates + /// that have not yet been fully persisted. Note that if a full monitor is persisted all the pending + /// monitor updates must be individually marked completed by calling [`ChainMonitor::channel_monitor_updated`]. + pub fn list_pending_monitor_updates(&self) -> Vec<(OutPoint, Vec)> { self.monitors.read().unwrap().iter().map(|(outpoint, holder)| { (*outpoint, holder.pending_monitor_updates.lock().unwrap().clone()) }).collect() @@ -506,16 +446,20 @@ where C::Target: chain::Filter, /// 1) This [`ChainMonitor`] calls [`Persist::update_persisted_channel`] which stores the /// update to disk and begins updating any remote (e.g. watchtower/backup) copies, /// returning [`ChannelMonitorUpdateStatus::InProgress`], - /// 2) once all remote copies are updated, you call this function with the - /// `completed_update_id` that completed, and once all pending updates have completed the - /// channel will be re-enabled. - // Note that we re-enable only after `UpdateOrigin::OffChain` updates complete, we don't - // care about `UpdateOrigin::ChainSync` updates for the channel state being updated. We - // only care about `UpdateOrigin::ChainSync` for returning `MonitorEvent`s. + /// 2) once all remote copies are updated, you call this function with [`ChannelMonitor::get_latest_update_id`] + /// or [`ChannelMonitorUpdate::update_id`] as the `completed_update_id`, and once all pending + /// updates have completed the channel will be re-enabled. + /// + /// It is only necessary to call [`ChainMonitor::channel_monitor_updated`] when you return [`ChannelMonitorUpdateStatus::InProgress`] + /// from [`Persist`] and either: + /// 1. A new [`ChannelMonitor`] was added in [`Persist::persist_new_channel`], or + /// 2. A [`ChannelMonitorUpdate`] was provided as part of [`Persist::update_persisted_channel`]. + /// Note that we don't care about calls to [`Persist::update_persisted_channel`] where no + /// [`ChannelMonitorUpdate`] was provided. /// /// Returns an [`APIError::APIMisuseError`] if `funding_txo` does not match any currently /// registered [`ChannelMonitor`]s. - pub fn channel_monitor_updated(&self, funding_txo: OutPoint, completed_update_id: MonitorUpdateId) -> Result<(), APIError> { + pub fn channel_monitor_updated(&self, funding_txo: OutPoint, completed_update_id: u64) -> Result<(), APIError> { let monitors = self.monitors.read().unwrap(); let monitor_data = if let Some(mon) = monitors.get(&funding_txo) { mon } else { return Err(APIError::APIMisuseError { err: format!("No ChannelMonitor matching funding outpoint {:?} found", funding_txo) }); @@ -523,36 +467,28 @@ where C::Target: chain::Filter, let mut pending_monitor_updates = monitor_data.pending_monitor_updates.lock().unwrap(); pending_monitor_updates.retain(|update_id| *update_id != completed_update_id); - match completed_update_id { - MonitorUpdateId { contents: UpdateOrigin::OffChain(_) } => { - // Note that we only check for `UpdateOrigin::OffChain` failures here - if - // we're being told that a `UpdateOrigin::OffChain` monitor update completed, - // we only care about ensuring we don't tell the `ChannelManager` to restore - // the channel to normal operation until all `UpdateOrigin::OffChain` updates - // complete. - // If there's some `UpdateOrigin::ChainSync` update still pending that's okay - // - we can still update our channel state, just as long as we don't return - // `MonitorEvent`s from the monitor back to the `ChannelManager` until they - // complete. - let monitor_is_pending_updates = monitor_data.has_pending_offchain_updates(&pending_monitor_updates); - if monitor_is_pending_updates { - // If there are still monitor updates pending, we cannot yet construct a - // Completed event. - return Ok(()); - } - self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::Completed { - funding_txo, - monitor_update_id: monitor_data.monitor.get_latest_update_id(), - }], monitor_data.monitor.get_counterparty_node_id())); - }, - MonitorUpdateId { contents: UpdateOrigin::ChainSync(_) } => { - if !monitor_data.has_pending_chainsync_updates(&pending_monitor_updates) { - monitor_data.last_chain_persist_height.store(self.highest_chain_height.load(Ordering::Acquire), Ordering::Release); - // The next time release_pending_monitor_events is called, any events for this - // ChannelMonitor will be returned. - } - }, + // Note that we only check for pending non-chainsync monitor updates and we don't track monitor + // updates resulting from chainsync in `pending_monitor_updates`. + let monitor_is_pending_updates = monitor_data.has_pending_updates(&pending_monitor_updates); + log_debug!(self.logger, "Completed off-chain monitor update {} for channel with funding outpoint {:?}, {}", + completed_update_id, + funding_txo, + if monitor_is_pending_updates { + "still have pending off-chain updates" + } else { + "all off-chain updates complete, returning a MonitorEvent" + }); + if monitor_is_pending_updates { + // If there are still monitor updates pending, we cannot yet construct a + // Completed event. + return Ok(()); } + let channel_id = monitor_data.monitor.channel_id(); + self.pending_monitor_events.lock().unwrap().push((funding_txo, channel_id, vec![MonitorEvent::Completed { + funding_txo, channel_id, + monitor_update_id: monitor_data.monitor.get_latest_update_id(), + }], monitor_data.monitor.get_counterparty_node_id())); + self.event_notifier.notify(); Ok(()) } @@ -563,9 +499,14 @@ where C::Target: chain::Filter, #[cfg(any(test, fuzzing))] pub fn force_channel_monitor_updated(&self, funding_txo: OutPoint, monitor_update_id: u64) { let monitors = self.monitors.read().unwrap(); - let counterparty_node_id = monitors.get(&funding_txo).and_then(|m| m.monitor.get_counterparty_node_id()); - self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::Completed { + let (counterparty_node_id, channel_id) = if let Some(m) = monitors.get(&funding_txo) { + (m.monitor.get_counterparty_node_id(), m.monitor.channel_id()) + } else { + (None, ChannelId::v1_from_funding_outpoint(funding_txo)) + }; + self.pending_monitor_events.lock().unwrap().push((funding_txo, channel_id, vec![MonitorEvent::Completed { funding_txo, + channel_id, monitor_update_id, }], counterparty_node_id)); self.event_notifier.notify(); @@ -620,13 +561,69 @@ where C::Target: chain::Filter, let monitors = self.monitors.read().unwrap(); for (_, monitor_holder) in &*monitors { monitor_holder.monitor.rebroadcast_pending_claims( - &*self.broadcaster, &*self.fee_estimator, &*self.logger + &*self.broadcaster, &*self.fee_estimator, &self.logger ) } } + + /// Triggers rebroadcasts of pending claims from force-closed channels after a transaction + /// signature generation failure. + /// + /// `monitor_opt` can be used as a filter to only trigger them for a specific channel monitor. + pub fn signer_unblocked(&self, monitor_opt: Option) { + let monitors = self.monitors.read().unwrap(); + if let Some(funding_txo) = monitor_opt { + if let Some(monitor_holder) = monitors.get(&funding_txo) { + monitor_holder.monitor.signer_unblocked( + &*self.broadcaster, &*self.fee_estimator, &self.logger + ) + } + } else { + for (_, monitor_holder) in &*monitors { + monitor_holder.monitor.signer_unblocked( + &*self.broadcaster, &*self.fee_estimator, &self.logger + ) + } + } + } + + /// Archives fully resolved channel monitors by calling [`Persist::archive_persisted_channel`]. + /// + /// This is useful for pruning fully resolved monitors from the monitor set and primary + /// storage so they are not kept in memory and reloaded on restart. + /// + /// Should be called occasionally (once every handful of blocks or on startup). + /// + /// Depending on the implementation of [`Persist::archive_persisted_channel`] the monitor + /// data could be moved to an archive location or removed entirely. + pub fn archive_fully_resolved_channel_monitors(&self) { + let mut have_monitors_to_prune = false; + for (_, monitor_holder) in self.monitors.read().unwrap().iter() { + let logger = WithChannelMonitor::from(&self.logger, &monitor_holder.monitor, None); + if monitor_holder.monitor.is_fully_resolved(&logger) { + have_monitors_to_prune = true; + } + } + if have_monitors_to_prune { + let mut monitors = self.monitors.write().unwrap(); + monitors.retain(|funding_txo, monitor_holder| { + let logger = WithChannelMonitor::from(&self.logger, &monitor_holder.monitor, None); + if monitor_holder.monitor.is_fully_resolved(&logger) { + log_info!(logger, + "Archiving fully resolved ChannelMonitor for funding txo {}", + funding_txo + ); + self.persister.archive_persisted_channel(*funding_txo); + false + } else { + true + } + }); + } + } } -impl +impl chain::Listen for ChainMonitor where C::Target: chain::Filter, @@ -639,8 +636,10 @@ where log_debug!(self.logger, "New best block {} at height {} provided via block_connected", header.block_hash(), height); self.process_chain_data(header, Some(height), &txdata, |monitor, txdata| { monitor.block_connected( - header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger) + header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &self.logger) }); + // Assume we may have some new events and wake the event processor + self.event_notifier.notify(); } fn block_disconnected(&self, header: &Header, height: u32) { @@ -648,12 +647,12 @@ where log_debug!(self.logger, "Latest block {} at height {} removed via block_disconnected", header.block_hash(), height); for monitor_state in monitor_states.values() { monitor_state.monitor.block_disconnected( - header, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger); + header, height, &*self.broadcaster, &*self.fee_estimator, &self.logger); } } } -impl +impl chain::Confirm for ChainMonitor where C::Target: chain::Filter, @@ -666,15 +665,17 @@ where log_debug!(self.logger, "{} provided transactions confirmed at height {} in block {}", txdata.len(), height, header.block_hash()); self.process_chain_data(header, None, txdata, |monitor, txdata| { monitor.transactions_confirmed( - header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger) + header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &self.logger) }); + // Assume we may have some new events and wake the event processor + self.event_notifier.notify(); } fn transaction_unconfirmed(&self, txid: &Txid) { log_debug!(self.logger, "Transaction {} reorganized out of chain", txid); let monitor_states = self.monitors.read().unwrap(); for monitor_state in monitor_states.values() { - monitor_state.monitor.transaction_unconfirmed(txid, &*self.broadcaster, &*self.fee_estimator, &*self.logger); + monitor_state.monitor.transaction_unconfirmed(txid, &*self.broadcaster, &*self.fee_estimator, &self.logger); } } @@ -685,8 +686,11 @@ where // it's still possible if a chain::Filter implementation returns a transaction. debug_assert!(txdata.is_empty()); monitor.best_block_updated( - header, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger) + header, height, &*self.broadcaster, &*self.fee_estimator, &self.logger + ) }); + // Assume we may have some new events and wake the event processor + self.event_notifier.notify(); } fn get_relevant_txids(&self) -> Vec<(Txid, u32, Option)> { @@ -702,7 +706,7 @@ where } } -impl +impl chain::Watch for ChainMonitor where C::Target: chain::Filter, T::Target: BroadcasterInterface, @@ -711,49 +715,53 @@ where C::Target: chain::Filter, P::Target: Persist, { fn watch_channel(&self, funding_outpoint: OutPoint, monitor: ChannelMonitor) -> Result { + let logger = WithChannelMonitor::from(&self.logger, &monitor, None); let mut monitors = self.monitors.write().unwrap(); let entry = match monitors.entry(funding_outpoint) { hash_map::Entry::Occupied(_) => { - log_error!(self.logger, "Failed to add new channel data: channel monitor for given outpoint is already present"); + log_error!(logger, "Failed to add new channel data: channel monitor for given outpoint is already present"); return Err(()); }, hash_map::Entry::Vacant(e) => e, }; - log_trace!(self.logger, "Got new ChannelMonitor for channel {}", log_funding_info!(monitor)); - let update_id = MonitorUpdateId::from_new_monitor(&monitor); + log_trace!(logger, "Got new ChannelMonitor for channel {}", log_funding_info!(monitor)); + let update_id = monitor.get_latest_update_id(); let mut pending_monitor_updates = Vec::new(); - let persist_res = self.persister.persist_new_channel(funding_outpoint, &monitor, update_id); + let persist_res = self.persister.persist_new_channel(funding_outpoint, &monitor); match persist_res { ChannelMonitorUpdateStatus::InProgress => { - log_info!(self.logger, "Persistence of new ChannelMonitor for channel {} in progress", log_funding_info!(monitor)); + log_info!(logger, "Persistence of new ChannelMonitor for channel {} in progress", log_funding_info!(monitor)); pending_monitor_updates.push(update_id); }, ChannelMonitorUpdateStatus::Completed => { - log_info!(self.logger, "Persistence of new ChannelMonitor for channel {} completed", log_funding_info!(monitor)); + log_info!(logger, "Persistence of new ChannelMonitor for channel {} completed", log_funding_info!(monitor)); }, ChannelMonitorUpdateStatus::UnrecoverableError => { let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down."; - log_error!(self.logger, "{}", err_str); + log_error!(logger, "{}", err_str); panic!("{}", err_str); }, } if let Some(ref chain_source) = self.chain_source { - monitor.load_outputs_to_watch(chain_source); + monitor.load_outputs_to_watch(chain_source , &self.logger); } entry.insert(MonitorHolder { monitor, pending_monitor_updates: Mutex::new(pending_monitor_updates), - last_chain_persist_height: AtomicUsize::new(self.highest_chain_height.load(Ordering::Acquire)), }); Ok(persist_res) } fn update_channel(&self, funding_txo: OutPoint, update: &ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus { + // `ChannelMonitorUpdate`'s `channel_id` is `None` prior to 0.0.121 and all channels in those + // versions are V1-established. For 0.0.121+ the `channel_id` fields is always `Some`. + let channel_id = update.channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(funding_txo)); // Update the monitor that watches the channel referred to by the given outpoint. let monitors = self.monitors.read().unwrap(); - let ret = match monitors.get(&funding_txo) { + match monitors.get(&funding_txo) { None => { - log_error!(self.logger, "Failed to update channel monitor: no such monitor registered"); + let logger = WithContext::from(&self.logger, update.counterparty_node_id, Some(channel_id), None); + log_error!(logger, "Failed to update channel monitor: no such monitor registered"); // We should never ever trigger this from within ChannelManager. Technically a // user could use this object with some proxying in between which makes this @@ -765,10 +773,11 @@ where C::Target: chain::Filter, }, Some(monitor_state) => { let monitor = &monitor_state.monitor; - log_trace!(self.logger, "Updating ChannelMonitor for channel {}", log_funding_info!(monitor)); + let logger = WithChannelMonitor::from(&self.logger, &monitor, None); + log_trace!(logger, "Updating ChannelMonitor to id {} for channel {}", update.update_id, log_funding_info!(monitor)); let update_res = monitor.update_monitor(update, &self.broadcaster, &self.fee_estimator, &self.logger); - let update_id = MonitorUpdateId::from_monitor_update(update); + let update_id = update.update_id; let mut pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap(); let persist_res = if update_res.is_err() { // Even if updating the monitor returns an error, the monitor's state will @@ -776,20 +785,37 @@ where C::Target: chain::Filter, // We don't want to persist a `monitor_update` which results in a failure to apply later // while reading `channel_monitor` with updates from storage. Instead, we should persist // the entire `channel_monitor` here. - log_warn!(self.logger, "Failed to update ChannelMonitor for channel {}. Going ahead and persisting the entire ChannelMonitor", log_funding_info!(monitor)); - self.persister.update_persisted_channel(funding_txo, None, monitor, update_id) + log_warn!(logger, "Failed to update ChannelMonitor for channel {}. Going ahead and persisting the entire ChannelMonitor", log_funding_info!(monitor)); + self.persister.update_persisted_channel(funding_txo, None, monitor) } else { - self.persister.update_persisted_channel(funding_txo, Some(update), monitor, update_id) + self.persister.update_persisted_channel(funding_txo, Some(update), monitor) }; match persist_res { ChannelMonitorUpdateStatus::InProgress => { pending_monitor_updates.push(update_id); - log_debug!(self.logger, "Persistence of ChannelMonitorUpdate for channel {} in progress", log_funding_info!(monitor)); + log_debug!(logger, + "Persistence of ChannelMonitorUpdate id {:?} for channel {} in progress", + update_id, + log_funding_info!(monitor) + ); }, ChannelMonitorUpdateStatus::Completed => { - log_debug!(self.logger, "Persistence of ChannelMonitorUpdate for channel {} completed", log_funding_info!(monitor)); + log_debug!(logger, + "Persistence of ChannelMonitorUpdate id {:?} for channel {} completed", + update_id, + log_funding_info!(monitor) + ); + }, + ChannelMonitorUpdateStatus::UnrecoverableError => { + // Take the monitors lock for writing so that we poison it and any future + // operations going forward fail immediately. + core::mem::drop(pending_monitor_updates); + core::mem::drop(monitors); + let _poison = self.monitors.write().unwrap(); + let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down."; + log_error!(logger, "{}", err_str); + panic!("{}", err_str); }, - ChannelMonitorUpdateStatus::UnrecoverableError => { /* we'll panic in a moment */ }, } if update_res.is_err() { ChannelMonitorUpdateStatus::InProgress @@ -797,42 +823,25 @@ where C::Target: chain::Filter, persist_res } } - }; - if let ChannelMonitorUpdateStatus::UnrecoverableError = ret { - // Take the monitors lock for writing so that we poison it and any future - // operations going forward fail immediately. - core::mem::drop(monitors); - let _poison = self.monitors.write().unwrap(); - let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down."; - log_error!(self.logger, "{}", err_str); - panic!("{}", err_str); } - ret } - fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec, Option)> { + fn release_pending_monitor_events(&self) -> Vec<(OutPoint, ChannelId, Vec, Option)> { let mut pending_monitor_events = self.pending_monitor_events.lock().unwrap().split_off(0); for monitor_state in self.monitors.read().unwrap().values() { - let is_pending_monitor_update = monitor_state.has_pending_chainsync_updates(&monitor_state.pending_monitor_updates.lock().unwrap()); - if !is_pending_monitor_update || monitor_state.last_chain_persist_height.load(Ordering::Acquire) + LATENCY_GRACE_PERIOD_BLOCKS as usize <= self.highest_chain_height.load(Ordering::Acquire) { - if is_pending_monitor_update { - log_error!(self.logger, "A ChannelMonitor sync took longer than {} blocks to complete.", LATENCY_GRACE_PERIOD_BLOCKS); - log_error!(self.logger, " To avoid funds-loss, we are allowing monitor updates to be released."); - log_error!(self.logger, " This may cause duplicate payment events to be generated."); - } - let monitor_events = monitor_state.monitor.get_and_clear_pending_monitor_events(); - if monitor_events.len() > 0 { - let monitor_outpoint = monitor_state.monitor.get_funding_txo().0; - let counterparty_node_id = monitor_state.monitor.get_counterparty_node_id(); - pending_monitor_events.push((monitor_outpoint, monitor_events, counterparty_node_id)); - } + let monitor_events = monitor_state.monitor.get_and_clear_pending_monitor_events(); + if monitor_events.len() > 0 { + let monitor_outpoint = monitor_state.monitor.get_funding_txo().0; + let monitor_channel_id = monitor_state.monitor.channel_id(); + let counterparty_node_id = monitor_state.monitor.get_counterparty_node_id(); + pending_monitor_events.push((monitor_outpoint, monitor_channel_id, monitor_events, counterparty_node_id)); } } pending_monitor_events } } -impl events::EventsProvider for ChainMonitor +impl events::EventsProvider for ChainMonitor where C::Target: chain::Filter, T::Target: BroadcasterInterface, F::Target: FeeEstimator, @@ -862,15 +871,12 @@ impl