//! events. The remote server would make use of [`ChainMonitor`] for block processing and for
//! servicing [`ChannelMonitor`] updates from the client.
-use bitcoin::blockdata::block::BlockHeader;
+use bitcoin::blockdata::block::Header;
use bitcoin::hash_types::{Txid, BlockHash};
use crate::chain;
use crate::chain::{ChannelMonitorUpdateStatus, Filter, WatchedOutput};
use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
-use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, Balance, MonitorEvent, TransactionOutputs, LATENCY_GRACE_PERIOD_BLOCKS};
+use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, Balance, MonitorEvent, TransactionOutputs, WithChannelMonitor};
use crate::chain::transaction::{OutPoint, TransactionData};
-use crate::chain::keysinterface::Sign;
-use crate::util::atomic_counter::AtomicCounter;
-use crate::util::logger::Logger;
+use crate::ln::types::ChannelId;
+use crate::sign::ecdsa::EcdsaChannelSigner;
+use crate::events;
+use crate::events::{Event, EventHandler};
+use crate::util::logger::{Logger, WithContext};
use crate::util::errors::APIError;
-use crate::util::events;
-use crate::util::events::{Event, EventHandler};
+use crate::util::wakers::{Future, Notifier};
use crate::ln::channelmanager::ChannelDetails;
use crate::prelude::*;
use crate::sync::{RwLock, RwLockReadGuard, Mutex, MutexGuard};
use core::ops::Deref;
-use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
+use core::sync::atomic::{AtomicUsize, Ordering};
use bitcoin::secp256k1::PublicKey;
-#[derive(Clone, Copy, Hash, PartialEq, Eq)]
-/// A specific update's ID stored in a `MonitorUpdateId`, separated out to make the contents
-/// entirely opaque.
-enum UpdateOrigin {
- /// An update that was generated by the `ChannelManager` (via our `chain::Watch`
- /// implementation). This corresponds to an actual [`ChannelMonitorUpdate::update_id`] field
- /// and [`ChannelMonitor::get_latest_update_id`].
- OffChain(u64),
- /// An update that was generated during blockchain processing. The ID here is specific to the
- /// generating [`ChainMonitor`] and does *not* correspond to any on-disk IDs.
- ChainSync(u64),
-}
-
-/// An opaque identifier describing a specific [`Persist`] method call.
-#[derive(Clone, Copy, Hash, PartialEq, Eq)]
-pub struct MonitorUpdateId {
- contents: UpdateOrigin,
-}
-
-impl MonitorUpdateId {
- pub(crate) fn from_monitor_update(update: &ChannelMonitorUpdate) -> Self {
- Self { contents: UpdateOrigin::OffChain(update.update_id) }
- }
- pub(crate) fn from_new_monitor<ChannelSigner: Sign>(monitor: &ChannelMonitor<ChannelSigner>) -> Self {
- Self { contents: UpdateOrigin::OffChain(monitor.get_latest_update_id()) }
- }
-}
-
/// `Persist` defines behavior for persisting channel monitors: this could mean
/// writing once to disk, and/or uploading to one or more backup services.
///
-/// Each method can return three possible values:
-/// * If persistence (including any relevant `fsync()` calls) happens immediately, the
-/// implementation should return [`ChannelMonitorUpdateStatus::Completed`], indicating normal
-/// channel operation should continue.
-/// * If persistence happens asynchronously, implementations should first ensure the
-/// [`ChannelMonitor`] or [`ChannelMonitorUpdate`] are written durably to disk, and then return
-/// [`ChannelMonitorUpdateStatus::InProgress`] while the update continues in the background.
-/// Once the update completes, [`ChainMonitor::channel_monitor_updated`] should be called with
-/// the corresponding [`MonitorUpdateId`].
+/// Persistence can happen in one of two ways - synchronously completing before the trait method
+/// calls return or asynchronously in the background.
+///
+/// # For those implementing synchronous persistence
+///
+/// * If persistence completes fully (including any relevant `fsync()` calls), the implementation
+/// should return [`ChannelMonitorUpdateStatus::Completed`], indicating normal channel operation
+/// should continue.
+///
+/// * If persistence fails for some reason, implementations should consider returning
+/// [`ChannelMonitorUpdateStatus::InProgress`] and retry all pending persistence operations in
+/// the background with [`ChainMonitor::list_pending_monitor_updates`] and
+/// [`ChainMonitor::get_monitor`].
+///
+/// Once a full [`ChannelMonitor`] has been persisted, all pending updates for that channel can
+/// be marked as complete via [`ChainMonitor::channel_monitor_updated`].
+///
+/// If at some point no further progress can be made towards persisting the pending updates, the
+/// node should simply shut down.
+///
+/// * If the persistence has failed and cannot be retried further (e.g. because of an outage),
+/// [`ChannelMonitorUpdateStatus::UnrecoverableError`] can be used, though this will result in
+/// an immediate panic and future operations in LDK generally failing.
+///
+/// # For those implementing asynchronous persistence
+///
+/// All calls should generally spawn a background task and immediately return
+/// [`ChannelMonitorUpdateStatus::InProgress`]. Once the update completes,
+/// [`ChainMonitor::channel_monitor_updated`] should be called with the corresponding
+/// [`ChannelMonitor::get_latest_update_id`] or [`ChannelMonitorUpdate::update_id`].
+///
+/// Note that unlike the direct [`chain::Watch`] interface,
+/// [`ChainMonitor::channel_monitor_updated`] must be called once for *each* update which occurs.
+///
+/// If at some point no further progress can be made towards persisting a pending update, the node
+/// should simply shut down. Until then, the background task should either loop indefinitely, or
+/// persistence should be regularly retried with [`ChainMonitor::list_pending_monitor_updates`]
+/// and [`ChainMonitor::get_monitor`] (note that if a full monitor is persisted all pending
+/// monitor updates may be marked completed).
///
-/// Note that unlike the direct [`chain::Watch`] interface,
-/// [`ChainMonitor::channel_monitor_updated`] must be called once for *each* update which occurs.
+/// # Using remote watchtowers
///
-/// * If persistence fails for some reason, implementations should return
-/// [`ChannelMonitorUpdateStatus::PermanentFailure`], in which case the channel will likely be
-/// closed without broadcasting the latest state. See
-/// [`ChannelMonitorUpdateStatus::PermanentFailure`] for more details.
-pub trait Persist<ChannelSigner: Sign> {
+/// Watchtowers may be updated as a part of an implementation of this trait, utilizing the async
+/// update process described above while the watchtower is being updated. The following methods are
+/// provided for bulding transactions for a watchtower:
+/// [`ChannelMonitor::initial_counterparty_commitment_tx`],
+/// [`ChannelMonitor::counterparty_commitment_txs_from_update`],
+/// [`ChannelMonitor::sign_to_local_justice_tx`], [`TrustedCommitmentTransaction::revokeable_output_index`],
+/// [`TrustedCommitmentTransaction::build_to_local_justice_tx`].
+///
+/// [`TrustedCommitmentTransaction::revokeable_output_index`]: crate::ln::chan_utils::TrustedCommitmentTransaction::revokeable_output_index
+/// [`TrustedCommitmentTransaction::build_to_local_justice_tx`]: crate::ln::chan_utils::TrustedCommitmentTransaction::build_to_local_justice_tx
+pub trait Persist<ChannelSigner: EcdsaChannelSigner> {
/// Persist a new channel's data in response to a [`chain::Watch::watch_channel`] call. This is
/// called by [`ChannelManager`] for new channels, or may be called directly, e.g. on startup.
///
/// channel's outpoint (and it is up to you to maintain a correct mapping between the outpoint
/// and the stored channel data). Note that you **must** persist every new monitor to disk.
///
- /// The `update_id` is used to identify this call to [`ChainMonitor::channel_monitor_updated`],
- /// if you return [`ChannelMonitorUpdateStatus::InProgress`].
+ /// The [`ChannelMonitor::get_latest_update_id`] uniquely links this call to [`ChainMonitor::channel_monitor_updated`].
+ /// For [`Persist::persist_new_channel`], it is only necessary to call [`ChainMonitor::channel_monitor_updated`]
+ /// when you return [`ChannelMonitorUpdateStatus::InProgress`].
///
/// See [`Writeable::write`] on [`ChannelMonitor`] for writing out a `ChannelMonitor`
/// and [`ChannelMonitorUpdateStatus`] for requirements when returning errors.
///
/// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
/// [`Writeable::write`]: crate::util::ser::Writeable::write
- fn persist_new_channel(&self, channel_id: OutPoint, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> ChannelMonitorUpdateStatus;
+ fn persist_new_channel(&self, channel_funding_outpoint: OutPoint, monitor: &ChannelMonitor<ChannelSigner>) -> ChannelMonitorUpdateStatus;
/// Update one channel's data. The provided [`ChannelMonitor`] has already applied the given
/// update.
/// updated monitor itself to disk/backups. See the [`Persist`] trait documentation for more
/// details.
///
- /// During blockchain synchronization operations, this may be called with no
- /// [`ChannelMonitorUpdate`], in which case the full [`ChannelMonitor`] needs to be persisted.
+ /// During blockchain synchronization operations, and in some rare cases, this may be called with
+ /// no [`ChannelMonitorUpdate`], in which case the full [`ChannelMonitor`] needs to be persisted.
/// Note that after the full [`ChannelMonitor`] is persisted any previous
/// [`ChannelMonitorUpdate`]s which were persisted should be discarded - they can no longer be
/// applied to the persisted [`ChannelMonitor`] as they were already applied.
/// them in batches. The size of each monitor grows `O(number of state updates)`
/// whereas updates are small and `O(1)`.
///
- /// The `update_id` is used to identify this call to [`ChainMonitor::channel_monitor_updated`],
- /// if you return [`ChannelMonitorUpdateStatus::InProgress`].
+ /// The [`ChannelMonitorUpdate::update_id`] or [`ChannelMonitor::get_latest_update_id`] uniquely
+ /// links this call to [`ChainMonitor::channel_monitor_updated`].
+ /// For [`Persist::update_persisted_channel`], it is only necessary to call [`ChainMonitor::channel_monitor_updated`]
+ /// when a [`ChannelMonitorUpdate`] is provided and when you return [`ChannelMonitorUpdateStatus::InProgress`].
///
/// See [`Writeable::write`] on [`ChannelMonitor`] for writing out a `ChannelMonitor`,
/// [`Writeable::write`] on [`ChannelMonitorUpdate`] for writing out an update, and
/// [`ChannelMonitorUpdateStatus`] for requirements when returning errors.
///
/// [`Writeable::write`]: crate::util::ser::Writeable::write
- fn update_persisted_channel(&self, channel_id: OutPoint, update: Option<&ChannelMonitorUpdate>, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> ChannelMonitorUpdateStatus;
+ fn update_persisted_channel(&self, channel_funding_outpoint: OutPoint, monitor_update: Option<&ChannelMonitorUpdate>, monitor: &ChannelMonitor<ChannelSigner>) -> ChannelMonitorUpdateStatus;
+ /// Prevents the channel monitor from being loaded on startup.
+ ///
+ /// Archiving the data in a backup location (rather than deleting it fully) is useful for
+ /// hedging against data loss in case of unexpected failure.
+ fn archive_persisted_channel(&self, channel_funding_outpoint: OutPoint);
}
-struct MonitorHolder<ChannelSigner: Sign> {
+struct MonitorHolder<ChannelSigner: EcdsaChannelSigner> {
monitor: ChannelMonitor<ChannelSigner>,
/// The full set of pending monitor updates for this Channel.
///
/// update_persisted_channel, the user returns a
/// [`ChannelMonitorUpdateStatus::InProgress`], and then calls channel_monitor_updated
/// immediately, racing our insertion of the pending update into the contained Vec.
- ///
- /// Beyond the synchronization of updates themselves, we cannot handle user events until after
- /// any chain updates have been stored on disk. Thus, we scan this list when returning updates
- /// to the ChannelManager, refusing to return any updates for a ChannelMonitor which is still
- /// being persisted fully to disk after a chain update.
- ///
- /// This avoids the possibility of handling, e.g. an on-chain claim, generating a claim monitor
- /// event, resulting in the relevant ChannelManager generating a PaymentSent event and dropping
- /// the pending payment entry, and then reloading before the monitor is persisted, resulting in
- /// the ChannelManager re-adding the same payment entry, before the same block is replayed,
- /// resulting in a duplicate PaymentSent event.
- pending_monitor_updates: Mutex<Vec<MonitorUpdateId>>,
- /// When the user returns a PermanentFailure error from an update_persisted_channel call during
- /// block processing, we inform the ChannelManager that the channel should be closed
- /// asynchronously. In order to ensure no further changes happen before the ChannelManager has
- /// processed the closure event, we set this to true and return PermanentFailure for any other
- /// chain::Watch events.
- channel_perm_failed: AtomicBool,
- /// The last block height at which no [`UpdateOrigin::ChainSync`] monitor updates were present
- /// in `pending_monitor_updates`.
- /// If it's been more than [`LATENCY_GRACE_PERIOD_BLOCKS`] since we started waiting on a chain
- /// sync event, we let monitor events return to `ChannelManager` because we cannot hold them up
- /// forever or we'll end up with HTLC preimages waiting to feed back into an upstream channel
- /// forever, risking funds loss.
- last_chain_persist_height: AtomicUsize,
+ pending_monitor_updates: Mutex<Vec<u64>>,
}
-impl<ChannelSigner: Sign> MonitorHolder<ChannelSigner> {
- fn has_pending_offchain_updates(&self, pending_monitor_updates_lock: &MutexGuard<Vec<MonitorUpdateId>>) -> bool {
- pending_monitor_updates_lock.iter().any(|update_id|
- if let UpdateOrigin::OffChain(_) = update_id.contents { true } else { false })
- }
- fn has_pending_chainsync_updates(&self, pending_monitor_updates_lock: &MutexGuard<Vec<MonitorUpdateId>>) -> bool {
- pending_monitor_updates_lock.iter().any(|update_id|
- if let UpdateOrigin::ChainSync(_) = update_id.contents { true } else { false })
+impl<ChannelSigner: EcdsaChannelSigner> MonitorHolder<ChannelSigner> {
+ fn has_pending_updates(&self, pending_monitor_updates_lock: &MutexGuard<Vec<u64>>) -> bool {
+ !pending_monitor_updates_lock.is_empty()
}
}
///
/// Note that this holds a mutex in [`ChainMonitor`] and may block other events until it is
/// released.
-pub struct LockedChannelMonitor<'a, ChannelSigner: Sign> {
+pub struct LockedChannelMonitor<'a, ChannelSigner: EcdsaChannelSigner> {
lock: RwLockReadGuard<'a, HashMap<OutPoint, MonitorHolder<ChannelSigner>>>,
funding_txo: OutPoint,
}
-impl<ChannelSigner: Sign> Deref for LockedChannelMonitor<'_, ChannelSigner> {
+impl<ChannelSigner: EcdsaChannelSigner> Deref for LockedChannelMonitor<'_, ChannelSigner> {
type Target = ChannelMonitor<ChannelSigner>;
fn deref(&self) -> &ChannelMonitor<ChannelSigner> {
&self.lock.get(&self.funding_txo).expect("Checked at construction").monitor
/// or used independently to monitor channels remotely. See the [module-level documentation] for
/// details.
///
+/// Note that `ChainMonitor` should regularly trigger rebroadcasts/fee bumps of pending claims from
+/// a force-closed channel. This is crucial in preventing certain classes of pinning attacks,
+/// detecting substantial mempool feerate changes between blocks, and ensuring reliability if
+/// broadcasting fails. We recommend invoking this every 30 seconds, or lower if running in an
+/// environment with spotty connections, like on mobile.
+///
/// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
/// [module-level documentation]: crate::chain::chainmonitor
-pub struct ChainMonitor<ChannelSigner: Sign, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref>
+/// [`rebroadcast_pending_claims`]: Self::rebroadcast_pending_claims
+pub struct ChainMonitor<ChannelSigner: EcdsaChannelSigner, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref>
where C::Target: chain::Filter,
T::Target: BroadcasterInterface,
F::Target: FeeEstimator,
P::Target: Persist<ChannelSigner>,
{
monitors: RwLock<HashMap<OutPoint, MonitorHolder<ChannelSigner>>>,
- /// When we generate a [`MonitorUpdateId`] for a chain-event monitor persistence, we need a
- /// unique ID, which we calculate by simply getting the next value from this counter. Note that
- /// the ID is never persisted so it's ok that they reset on restart.
- sync_persistence_id: AtomicCounter,
chain_source: Option<C>,
broadcaster: T,
logger: L,
persister: P,
/// "User-provided" (ie persistence-completion/-failed) [`MonitorEvent`]s. These came directly
/// from the user and not from a [`ChannelMonitor`].
- pending_monitor_events: Mutex<Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)>>,
+ pending_monitor_events: Mutex<Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, Option<PublicKey>)>>,
/// The best block height seen, used as a proxy for the passage of time.
highest_chain_height: AtomicUsize,
+
+ /// A [`Notifier`] used to wake up the background processor in case we have any [`Event`]s for
+ /// it to give to users (or [`MonitorEvent`]s for `ChannelManager` to process).
+ event_notifier: Notifier,
}
-impl<ChannelSigner: Sign, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref> ChainMonitor<ChannelSigner, C, T, F, L, P>
+impl<ChannelSigner: EcdsaChannelSigner, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref> ChainMonitor<ChannelSigner, C, T, F, L, P>
where C::Target: chain::Filter,
T::Target: BroadcasterInterface,
F::Target: FeeEstimator,
/// updated `txdata`.
///
/// Calls which represent a new blockchain tip height should set `best_height`.
- fn process_chain_data<FN>(&self, header: &BlockHeader, best_height: Option<u32>, txdata: &TransactionData, process: FN)
+ fn process_chain_data<FN>(&self, header: &Header, best_height: Option<u32>, txdata: &TransactionData, process: FN)
where
FN: Fn(&ChannelMonitor<ChannelSigner>, &TransactionData) -> Vec<TransactionOutputs>
{
+ let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
+ let funding_outpoints = hash_set_from_iter(self.monitors.read().unwrap().keys().cloned());
+ for funding_outpoint in funding_outpoints.iter() {
+ let monitor_lock = self.monitors.read().unwrap();
+ if let Some(monitor_state) = monitor_lock.get(funding_outpoint) {
+ if self.update_monitor_with_chain_data(header, txdata, &process, funding_outpoint, &monitor_state).is_err() {
+ // Take the monitors lock for writing so that we poison it and any future
+ // operations going forward fail immediately.
+ core::mem::drop(monitor_lock);
+ let _poison = self.monitors.write().unwrap();
+ log_error!(self.logger, "{}", err_str);
+ panic!("{}", err_str);
+ }
+ }
+ }
+
+ // do some followup cleanup if any funding outpoints were added in between iterations
let monitor_states = self.monitors.write().unwrap();
+ for (funding_outpoint, monitor_state) in monitor_states.iter() {
+ if !funding_outpoints.contains(funding_outpoint) {
+ if self.update_monitor_with_chain_data(header, txdata, &process, funding_outpoint, &monitor_state).is_err() {
+ log_error!(self.logger, "{}", err_str);
+ panic!("{}", err_str);
+ }
+ }
+ }
+
if let Some(height) = best_height {
// If the best block height is being updated, update highest_chain_height under the
// monitors write lock.
self.highest_chain_height.store(new_height, Ordering::Release);
}
}
+ }
- for (funding_outpoint, monitor_state) in monitor_states.iter() {
- let monitor = &monitor_state.monitor;
- let mut txn_outputs;
- {
- txn_outputs = process(monitor, txdata);
- let update_id = MonitorUpdateId {
- contents: UpdateOrigin::ChainSync(self.sync_persistence_id.get_increment()),
- };
- let mut pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap();
- if let Some(height) = best_height {
- if !monitor_state.has_pending_chainsync_updates(&pending_monitor_updates) {
- // If there are not ChainSync persists awaiting completion, go ahead and
- // set last_chain_persist_height here - we wouldn't want the first
- // InProgress to always immediately be considered "overly delayed".
- monitor_state.last_chain_persist_height.store(height as usize, Ordering::Release);
- }
- }
-
- log_trace!(self.logger, "Syncing Channel Monitor for channel {}", log_funding_info!(monitor));
- match self.persister.update_persisted_channel(*funding_outpoint, None, monitor, update_id) {
- ChannelMonitorUpdateStatus::Completed =>
- log_trace!(self.logger, "Finished syncing Channel Monitor for channel {}", log_funding_info!(monitor)),
- ChannelMonitorUpdateStatus::PermanentFailure => {
- monitor_state.channel_perm_failed.store(true, Ordering::Release);
- self.pending_monitor_events.lock().unwrap().push((*funding_outpoint, vec![MonitorEvent::UpdateFailed(*funding_outpoint)], monitor.get_counterparty_node_id()));
- },
- ChannelMonitorUpdateStatus::InProgress => {
- log_debug!(self.logger, "Channel Monitor sync for channel {} in progress, holding events until completion!", log_funding_info!(monitor));
- pending_monitor_updates.push(update_id);
- },
- }
+ fn update_monitor_with_chain_data<FN>(
+ &self, header: &Header, txdata: &TransactionData, process: FN, funding_outpoint: &OutPoint,
+ monitor_state: &MonitorHolder<ChannelSigner>
+ ) -> Result<(), ()> where FN: Fn(&ChannelMonitor<ChannelSigner>, &TransactionData) -> Vec<TransactionOutputs> {
+ let monitor = &monitor_state.monitor;
+ let logger = WithChannelMonitor::from(&self.logger, &monitor, None);
+ let mut txn_outputs;
+ {
+ txn_outputs = process(monitor, txdata);
+ log_trace!(logger, "Syncing Channel Monitor for channel {}", log_funding_info!(monitor));
+ match self.persister.update_persisted_channel(*funding_outpoint, None, monitor) {
+ ChannelMonitorUpdateStatus::Completed =>
+ log_trace!(logger, "Finished syncing Channel Monitor for channel {} for block-data",
+ log_funding_info!(monitor)
+ ),
+ ChannelMonitorUpdateStatus::InProgress => {
+ log_trace!(logger, "Channel Monitor sync for channel {} in progress.", log_funding_info!(monitor));
+ },
+ ChannelMonitorUpdateStatus::UnrecoverableError => {
+ return Err(());
+ },
}
+ }
- // Register any new outputs with the chain source for filtering, storing any dependent
- // transactions from within the block that previously had not been included in txdata.
- if let Some(ref chain_source) = self.chain_source {
- let block_hash = header.block_hash();
- for (txid, mut outputs) in txn_outputs.drain(..) {
- for (idx, output) in outputs.drain(..) {
- // Register any new outputs with the chain source for filtering
- let output = WatchedOutput {
- block_hash: Some(block_hash),
- outpoint: OutPoint { txid, index: idx as u16 },
- script_pubkey: output.script_pubkey,
- };
- chain_source.register_output(output)
- }
+ // Register any new outputs with the chain source for filtering, storing any dependent
+ // transactions from within the block that previously had not been included in txdata.
+ if let Some(ref chain_source) = self.chain_source {
+ let block_hash = header.block_hash();
+ for (txid, mut outputs) in txn_outputs.drain(..) {
+ for (idx, output) in outputs.drain(..) {
+ // Register any new outputs with the chain source for filtering
+ let output = WatchedOutput {
+ block_hash: Some(block_hash),
+ outpoint: OutPoint { txid, index: idx as u16 },
+ script_pubkey: output.script_pubkey,
+ };
+ log_trace!(logger, "Adding monitoring for spends of outpoint {} to the filter", output.outpoint);
+ chain_source.register_output(output);
}
}
}
+ Ok(())
}
/// Creates a new `ChainMonitor` used to watch on-chain activity pertaining to channels.
/// transactions relevant to the watched channels.
pub fn new(chain_source: Option<C>, broadcaster: T, logger: L, feeest: F, persister: P) -> Self {
Self {
- monitors: RwLock::new(HashMap::new()),
- sync_persistence_id: AtomicCounter::new(),
+ monitors: RwLock::new(new_hash_map()),
chain_source,
broadcaster,
logger,
persister,
pending_monitor_events: Mutex::new(Vec::new()),
highest_chain_height: AtomicUsize::new(0),
+ event_notifier: Notifier::new(),
}
}
}
}
- /// Lists the funding outpoint of each [`ChannelMonitor`] being monitored.
+ /// Lists the funding outpoint and channel ID of each [`ChannelMonitor`] being monitored.
///
/// Note that [`ChannelMonitor`]s are not removed when a channel is closed as they are always
/// monitoring for on-chain state resolutions.
- pub fn list_monitors(&self) -> Vec<OutPoint> {
- self.monitors.read().unwrap().keys().map(|outpoint| *outpoint).collect()
+ pub fn list_monitors(&self) -> Vec<(OutPoint, ChannelId)> {
+ self.monitors.read().unwrap().iter().map(|(outpoint, monitor_holder)| {
+ let channel_id = monitor_holder.monitor.channel_id();
+ (*outpoint, channel_id)
+ }).collect()
}
#[cfg(not(c_bindings))]
/// Lists the pending updates for each [`ChannelMonitor`] (by `OutPoint` being monitored).
- pub fn list_pending_monitor_updates(&self) -> HashMap<OutPoint, Vec<MonitorUpdateId>> {
- self.monitors.read().unwrap().iter().map(|(outpoint, holder)| {
+ /// Each `Vec<u64>` contains `update_id`s from [`ChannelMonitor::get_latest_update_id`] for updates
+ /// that have not yet been fully persisted. Note that if a full monitor is persisted all the pending
+ /// monitor updates must be individually marked completed by calling [`ChainMonitor::channel_monitor_updated`].
+ pub fn list_pending_monitor_updates(&self) -> HashMap<OutPoint, Vec<u64>> {
+ hash_map_from_iter(self.monitors.read().unwrap().iter().map(|(outpoint, holder)| {
(*outpoint, holder.pending_monitor_updates.lock().unwrap().clone())
- }).collect()
+ }))
}
#[cfg(c_bindings)]
/// Lists the pending updates for each [`ChannelMonitor`] (by `OutPoint` being monitored).
- pub fn list_pending_monitor_updates(&self) -> Vec<(OutPoint, Vec<MonitorUpdateId>)> {
+ /// Each `Vec<u64>` contains `update_id`s from [`ChannelMonitor::get_latest_update_id`] for updates
+ /// that have not yet been fully persisted. Note that if a full monitor is persisted all the pending
+ /// monitor updates must be individually marked completed by calling [`ChainMonitor::channel_monitor_updated`].
+ pub fn list_pending_monitor_updates(&self) -> Vec<(OutPoint, Vec<u64>)> {
self.monitors.read().unwrap().iter().map(|(outpoint, holder)| {
(*outpoint, holder.pending_monitor_updates.lock().unwrap().clone())
}).collect()
/// 1) This [`ChainMonitor`] calls [`Persist::update_persisted_channel`] which stores the
/// update to disk and begins updating any remote (e.g. watchtower/backup) copies,
/// returning [`ChannelMonitorUpdateStatus::InProgress`],
- /// 2) once all remote copies are updated, you call this function with the
- /// `completed_update_id` that completed, and once all pending updates have completed the
- /// channel will be re-enabled.
- // Note that we re-enable only after `UpdateOrigin::OffChain` updates complete, we don't
- // care about `UpdateOrigin::ChainSync` updates for the channel state being updated. We
- // only care about `UpdateOrigin::ChainSync` for returning `MonitorEvent`s.
+ /// 2) once all remote copies are updated, you call this function with [`ChannelMonitor::get_latest_update_id`]
+ /// or [`ChannelMonitorUpdate::update_id`] as the `completed_update_id`, and once all pending
+ /// updates have completed the channel will be re-enabled.
+ ///
+ /// It is only necessary to call [`ChainMonitor::channel_monitor_updated`] when you return [`ChannelMonitorUpdateStatus::InProgress`]
+ /// from [`Persist`] and either:
+ /// 1. A new [`ChannelMonitor`] was added in [`Persist::persist_new_channel`], or
+ /// 2. A [`ChannelMonitorUpdate`] was provided as part of [`Persist::update_persisted_channel`].
+ /// Note that we don't care about calls to [`Persist::update_persisted_channel`] where no
+ /// [`ChannelMonitorUpdate`] was provided.
///
/// Returns an [`APIError::APIMisuseError`] if `funding_txo` does not match any currently
/// registered [`ChannelMonitor`]s.
- pub fn channel_monitor_updated(&self, funding_txo: OutPoint, completed_update_id: MonitorUpdateId) -> Result<(), APIError> {
+ pub fn channel_monitor_updated(&self, funding_txo: OutPoint, completed_update_id: u64) -> Result<(), APIError> {
let monitors = self.monitors.read().unwrap();
let monitor_data = if let Some(mon) = monitors.get(&funding_txo) { mon } else {
return Err(APIError::APIMisuseError { err: format!("No ChannelMonitor matching funding outpoint {:?} found", funding_txo) });
let mut pending_monitor_updates = monitor_data.pending_monitor_updates.lock().unwrap();
pending_monitor_updates.retain(|update_id| *update_id != completed_update_id);
- match completed_update_id {
- MonitorUpdateId { contents: UpdateOrigin::OffChain(_) } => {
- // Note that we only check for `UpdateOrigin::OffChain` failures here - if
- // we're being told that a `UpdateOrigin::OffChain` monitor update completed,
- // we only care about ensuring we don't tell the `ChannelManager` to restore
- // the channel to normal operation until all `UpdateOrigin::OffChain` updates
- // complete.
- // If there's some `UpdateOrigin::ChainSync` update still pending that's okay
- // - we can still update our channel state, just as long as we don't return
- // `MonitorEvent`s from the monitor back to the `ChannelManager` until they
- // complete.
- let monitor_is_pending_updates = monitor_data.has_pending_offchain_updates(&pending_monitor_updates);
- if monitor_is_pending_updates || monitor_data.channel_perm_failed.load(Ordering::Acquire) {
- // If there are still monitor updates pending (or an old monitor update
- // finished after a later one perm-failed), we cannot yet construct an
- // Completed event.
- return Ok(());
- }
- self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::Completed {
- funding_txo,
- monitor_update_id: monitor_data.monitor.get_latest_update_id(),
- }], monitor_data.monitor.get_counterparty_node_id()));
- },
- MonitorUpdateId { contents: UpdateOrigin::ChainSync(_) } => {
- if !monitor_data.has_pending_chainsync_updates(&pending_monitor_updates) {
- monitor_data.last_chain_persist_height.store(self.highest_chain_height.load(Ordering::Acquire), Ordering::Release);
- // The next time release_pending_monitor_events is called, any events for this
- // ChannelMonitor will be returned.
- }
- },
+ // Note that we only check for pending non-chainsync monitor updates and we don't track monitor
+ // updates resulting from chainsync in `pending_monitor_updates`.
+ let monitor_is_pending_updates = monitor_data.has_pending_updates(&pending_monitor_updates);
+ log_debug!(self.logger, "Completed off-chain monitor update {} for channel with funding outpoint {:?}, {}",
+ completed_update_id,
+ funding_txo,
+ if monitor_is_pending_updates {
+ "still have pending off-chain updates"
+ } else {
+ "all off-chain updates complete, returning a MonitorEvent"
+ });
+ if monitor_is_pending_updates {
+ // If there are still monitor updates pending, we cannot yet construct a
+ // Completed event.
+ return Ok(());
}
+ let channel_id = monitor_data.monitor.channel_id();
+ self.pending_monitor_events.lock().unwrap().push((funding_txo, channel_id, vec![MonitorEvent::Completed {
+ funding_txo, channel_id,
+ monitor_update_id: monitor_data.monitor.get_latest_update_id(),
+ }], monitor_data.monitor.get_counterparty_node_id()));
+
+ self.event_notifier.notify();
Ok(())
}
#[cfg(any(test, fuzzing))]
pub fn force_channel_monitor_updated(&self, funding_txo: OutPoint, monitor_update_id: u64) {
let monitors = self.monitors.read().unwrap();
- let counterparty_node_id = monitors.get(&funding_txo).and_then(|m| m.monitor.get_counterparty_node_id());
- self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::Completed {
+ let (counterparty_node_id, channel_id) = if let Some(m) = monitors.get(&funding_txo) {
+ (m.monitor.get_counterparty_node_id(), m.monitor.channel_id())
+ } else {
+ (None, ChannelId::v1_from_funding_outpoint(funding_txo))
+ };
+ self.pending_monitor_events.lock().unwrap().push((funding_txo, channel_id, vec![MonitorEvent::Completed {
funding_txo,
+ channel_id,
monitor_update_id,
}], counterparty_node_id));
+ self.event_notifier.notify();
}
- #[cfg(any(test, fuzzing, feature = "_test_utils"))]
+ #[cfg(any(test, feature = "_test_utils"))]
pub fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
- use crate::util::events::EventsProvider;
+ use crate::events::EventsProvider;
let events = core::cell::RefCell::new(Vec::new());
let event_handler = |event: events::Event| events.borrow_mut().push(event);
self.process_pending_events(&event_handler);
///
/// See the trait-level documentation of [`EventsProvider`] for requirements.
///
- /// [`EventsProvider`]: crate::util::events::EventsProvider
+ /// [`EventsProvider`]: crate::events::EventsProvider
pub async fn process_pending_events_async<Future: core::future::Future, H: Fn(Event) -> Future>(
&self, handler: H
) {
- let mut pending_events = Vec::new();
- for monitor_state in self.monitors.read().unwrap().values() {
- pending_events.append(&mut monitor_state.monitor.get_and_clear_pending_events());
+ // Sadly we can't hold the monitors read lock through an async call. Thus we have to do a
+ // crazy dance to process a monitor's events then only remove them once we've done so.
+ let mons_to_process = self.monitors.read().unwrap().keys().cloned().collect::<Vec<_>>();
+ for funding_txo in mons_to_process {
+ let mut ev;
+ super::channelmonitor::process_events_body!(
+ self.monitors.read().unwrap().get(&funding_txo).map(|m| &m.monitor), ev, handler(ev).await);
+ }
+ }
+
+ /// Gets a [`Future`] that completes when an event is available either via
+ /// [`chain::Watch::release_pending_monitor_events`] or
+ /// [`EventsProvider::process_pending_events`].
+ ///
+ /// Note that callbacks registered on the [`Future`] MUST NOT call back into this
+ /// [`ChainMonitor`] and should instead register actions to be taken later.
+ ///
+ /// [`EventsProvider::process_pending_events`]: crate::events::EventsProvider::process_pending_events
+ pub fn get_update_future(&self) -> Future {
+ self.event_notifier.get_future()
+ }
+
+ /// Triggers rebroadcasts/fee-bumps of pending claims from a force-closed channel. This is
+ /// crucial in preventing certain classes of pinning attacks, detecting substantial mempool
+ /// feerate changes between blocks, and ensuring reliability if broadcasting fails. We recommend
+ /// invoking this every 30 seconds, or lower if running in an environment with spotty
+ /// connections, like on mobile.
+ pub fn rebroadcast_pending_claims(&self) {
+ let monitors = self.monitors.read().unwrap();
+ for (_, monitor_holder) in &*monitors {
+ monitor_holder.monitor.rebroadcast_pending_claims(
+ &*self.broadcaster, &*self.fee_estimator, &self.logger
+ )
+ }
+ }
+
+ /// Triggers rebroadcasts of pending claims from force-closed channels after a transaction
+ /// signature generation failure.
+ ///
+ /// `monitor_opt` can be used as a filter to only trigger them for a specific channel monitor.
+ pub fn signer_unblocked(&self, monitor_opt: Option<OutPoint>) {
+ let monitors = self.monitors.read().unwrap();
+ if let Some(funding_txo) = monitor_opt {
+ if let Some(monitor_holder) = monitors.get(&funding_txo) {
+ monitor_holder.monitor.signer_unblocked(
+ &*self.broadcaster, &*self.fee_estimator, &self.logger
+ )
+ }
+ } else {
+ for (_, monitor_holder) in &*monitors {
+ monitor_holder.monitor.signer_unblocked(
+ &*self.broadcaster, &*self.fee_estimator, &self.logger
+ )
+ }
+ }
+ }
+
+ /// Archives fully resolved channel monitors by calling [`Persist::archive_persisted_channel`].
+ ///
+ /// This is useful for pruning fully resolved monitors from the monitor set and primary
+ /// storage so they are not kept in memory and reloaded on restart.
+ ///
+ /// Should be called occasionally (once every handful of blocks or on startup).
+ ///
+ /// Depending on the implementation of [`Persist::archive_persisted_channel`] the monitor
+ /// data could be moved to an archive location or removed entirely.
+ pub fn archive_fully_resolved_channel_monitors(&self) {
+ let mut have_monitors_to_prune = false;
+ for (_, monitor_holder) in self.monitors.read().unwrap().iter() {
+ let logger = WithChannelMonitor::from(&self.logger, &monitor_holder.monitor, None);
+ if monitor_holder.monitor.is_fully_resolved(&logger) {
+ have_monitors_to_prune = true;
+ }
}
- for event in pending_events {
- handler(event).await;
+ if have_monitors_to_prune {
+ let mut monitors = self.monitors.write().unwrap();
+ monitors.retain(|funding_txo, monitor_holder| {
+ let logger = WithChannelMonitor::from(&self.logger, &monitor_holder.monitor, None);
+ if monitor_holder.monitor.is_fully_resolved(&logger) {
+ log_info!(logger,
+ "Archiving fully resolved ChannelMonitor for funding txo {}",
+ funding_txo
+ );
+ self.persister.archive_persisted_channel(*funding_txo);
+ false
+ } else {
+ true
+ }
+ });
}
}
}
-impl<ChannelSigner: Sign, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref>
+impl<ChannelSigner: EcdsaChannelSigner, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref>
chain::Listen for ChainMonitor<ChannelSigner, C, T, F, L, P>
where
C::Target: chain::Filter,
L::Target: Logger,
P::Target: Persist<ChannelSigner>,
{
- fn filtered_block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
+ fn filtered_block_connected(&self, header: &Header, txdata: &TransactionData, height: u32) {
log_debug!(self.logger, "New best block {} at height {} provided via block_connected", header.block_hash(), height);
self.process_chain_data(header, Some(height), &txdata, |monitor, txdata| {
monitor.block_connected(
- header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger)
+ header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &self.logger)
});
+ // Assume we may have some new events and wake the event processor
+ self.event_notifier.notify();
}
- fn block_disconnected(&self, header: &BlockHeader, height: u32) {
+ fn block_disconnected(&self, header: &Header, height: u32) {
let monitor_states = self.monitors.read().unwrap();
log_debug!(self.logger, "Latest block {} at height {} removed via block_disconnected", header.block_hash(), height);
for monitor_state in monitor_states.values() {
monitor_state.monitor.block_disconnected(
- header, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger);
+ header, height, &*self.broadcaster, &*self.fee_estimator, &self.logger);
}
}
}
-impl<ChannelSigner: Sign, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref>
+impl<ChannelSigner: EcdsaChannelSigner, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref>
chain::Confirm for ChainMonitor<ChannelSigner, C, T, F, L, P>
where
C::Target: chain::Filter,
L::Target: Logger,
P::Target: Persist<ChannelSigner>,
{
- fn transactions_confirmed(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
+ fn transactions_confirmed(&self, header: &Header, txdata: &TransactionData, height: u32) {
log_debug!(self.logger, "{} provided transactions confirmed at height {} in block {}", txdata.len(), height, header.block_hash());
self.process_chain_data(header, None, txdata, |monitor, txdata| {
monitor.transactions_confirmed(
- header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger)
+ header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &self.logger)
});
+ // Assume we may have some new events and wake the event processor
+ self.event_notifier.notify();
}
fn transaction_unconfirmed(&self, txid: &Txid) {
log_debug!(self.logger, "Transaction {} reorganized out of chain", txid);
let monitor_states = self.monitors.read().unwrap();
for monitor_state in monitor_states.values() {
- monitor_state.monitor.transaction_unconfirmed(txid, &*self.broadcaster, &*self.fee_estimator, &*self.logger);
+ monitor_state.monitor.transaction_unconfirmed(txid, &*self.broadcaster, &*self.fee_estimator, &self.logger);
}
}
- fn best_block_updated(&self, header: &BlockHeader, height: u32) {
+ fn best_block_updated(&self, header: &Header, height: u32) {
log_debug!(self.logger, "New best block {} at height {} provided via best_block_updated", header.block_hash(), height);
self.process_chain_data(header, Some(height), &[], |monitor, txdata| {
// While in practice there shouldn't be any recursive calls when given empty txdata,
// it's still possible if a chain::Filter implementation returns a transaction.
debug_assert!(txdata.is_empty());
monitor.best_block_updated(
- header, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger)
+ header, height, &*self.broadcaster, &*self.fee_estimator, &self.logger
+ )
});
+ // Assume we may have some new events and wake the event processor
+ self.event_notifier.notify();
}
- fn get_relevant_txids(&self) -> Vec<(Txid, Option<BlockHash>)> {
+ fn get_relevant_txids(&self) -> Vec<(Txid, u32, Option<BlockHash>)> {
let mut txids = Vec::new();
let monitor_states = self.monitors.read().unwrap();
for monitor_state in monitor_states.values() {
txids.append(&mut monitor_state.monitor.get_relevant_txids());
}
- txids.sort_unstable();
- txids.dedup();
+ txids.sort_unstable_by(|a, b| a.0.cmp(&b.0).then(b.1.cmp(&a.1)));
+ txids.dedup_by_key(|(txid, _, _)| *txid);
txids
}
}
-impl<ChannelSigner: Sign, C: Deref , T: Deref , F: Deref , L: Deref , P: Deref >
+impl<ChannelSigner: EcdsaChannelSigner, C: Deref , T: Deref , F: Deref , L: Deref , P: Deref >
chain::Watch<ChannelSigner> for ChainMonitor<ChannelSigner, C, T, F, L, P>
where C::Target: chain::Filter,
T::Target: BroadcasterInterface,
L::Target: Logger,
P::Target: Persist<ChannelSigner>,
{
- /// Adds the monitor that watches the channel referred to by the given outpoint.
- ///
- /// Calls back to [`chain::Filter`] with the funding transaction and outputs to watch.
- ///
- /// Note that we persist the given `ChannelMonitor` while holding the `ChainMonitor`
- /// monitors lock.
- fn watch_channel(&self, funding_outpoint: OutPoint, monitor: ChannelMonitor<ChannelSigner>) -> ChannelMonitorUpdateStatus {
+ fn watch_channel(&self, funding_outpoint: OutPoint, monitor: ChannelMonitor<ChannelSigner>) -> Result<ChannelMonitorUpdateStatus, ()> {
+ let logger = WithChannelMonitor::from(&self.logger, &monitor, None);
let mut monitors = self.monitors.write().unwrap();
let entry = match monitors.entry(funding_outpoint) {
hash_map::Entry::Occupied(_) => {
- log_error!(self.logger, "Failed to add new channel data: channel monitor for given outpoint is already present");
- return ChannelMonitorUpdateStatus::PermanentFailure
+ log_error!(logger, "Failed to add new channel data: channel monitor for given outpoint is already present");
+ return Err(());
},
hash_map::Entry::Vacant(e) => e,
};
- log_trace!(self.logger, "Got new ChannelMonitor for channel {}", log_funding_info!(monitor));
- let update_id = MonitorUpdateId::from_new_monitor(&monitor);
+ log_trace!(logger, "Got new ChannelMonitor for channel {}", log_funding_info!(monitor));
+ let update_id = monitor.get_latest_update_id();
let mut pending_monitor_updates = Vec::new();
- let persist_res = self.persister.persist_new_channel(funding_outpoint, &monitor, update_id);
+ let persist_res = self.persister.persist_new_channel(funding_outpoint, &monitor);
match persist_res {
ChannelMonitorUpdateStatus::InProgress => {
- log_info!(self.logger, "Persistence of new ChannelMonitor for channel {} in progress", log_funding_info!(monitor));
+ log_info!(logger, "Persistence of new ChannelMonitor for channel {} in progress", log_funding_info!(monitor));
pending_monitor_updates.push(update_id);
},
- ChannelMonitorUpdateStatus::PermanentFailure => {
- log_error!(self.logger, "Persistence of new ChannelMonitor for channel {} failed", log_funding_info!(monitor));
- return persist_res;
- },
ChannelMonitorUpdateStatus::Completed => {
- log_info!(self.logger, "Persistence of new ChannelMonitor for channel {} completed", log_funding_info!(monitor));
- }
+ log_info!(logger, "Persistence of new ChannelMonitor for channel {} completed", log_funding_info!(monitor));
+ },
+ ChannelMonitorUpdateStatus::UnrecoverableError => {
+ let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
+ log_error!(logger, "{}", err_str);
+ panic!("{}", err_str);
+ },
}
if let Some(ref chain_source) = self.chain_source {
- monitor.load_outputs_to_watch(chain_source);
+ monitor.load_outputs_to_watch(chain_source , &self.logger);
}
entry.insert(MonitorHolder {
monitor,
pending_monitor_updates: Mutex::new(pending_monitor_updates),
- channel_perm_failed: AtomicBool::new(false),
- last_chain_persist_height: AtomicUsize::new(self.highest_chain_height.load(Ordering::Acquire)),
});
- persist_res
+ Ok(persist_res)
}
- /// Note that we persist the given `ChannelMonitor` update while holding the
- /// `ChainMonitor` monitors lock.
fn update_channel(&self, funding_txo: OutPoint, update: &ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus {
+ // `ChannelMonitorUpdate`'s `channel_id` is `None` prior to 0.0.121 and all channels in those
+ // versions are V1-established. For 0.0.121+ the `channel_id` fields is always `Some`.
+ let channel_id = update.channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(funding_txo));
// Update the monitor that watches the channel referred to by the given outpoint.
let monitors = self.monitors.read().unwrap();
match monitors.get(&funding_txo) {
None => {
- log_error!(self.logger, "Failed to update channel monitor: no such monitor registered");
+ let logger = WithContext::from(&self.logger, update.counterparty_node_id, Some(channel_id), None);
+ log_error!(logger, "Failed to update channel monitor: no such monitor registered");
// We should never ever trigger this from within ChannelManager. Technically a
// user could use this object with some proxying in between which makes this
// possible, but in tests and fuzzing, this should be a panic.
- #[cfg(any(test, fuzzing))]
+ #[cfg(debug_assertions)]
panic!("ChannelManager generated a channel update for a channel that was not yet registered!");
- #[cfg(not(any(test, fuzzing)))]
- ChannelMonitorUpdateStatus::PermanentFailure
+ #[cfg(not(debug_assertions))]
+ ChannelMonitorUpdateStatus::InProgress
},
Some(monitor_state) => {
let monitor = &monitor_state.monitor;
- log_trace!(self.logger, "Updating ChannelMonitor for channel {}", log_funding_info!(monitor));
- let update_res = monitor.update_monitor(update, &self.broadcaster, &*self.fee_estimator, &self.logger);
- if update_res.is_err() {
- log_error!(self.logger, "Failed to update ChannelMonitor for channel {}.", log_funding_info!(monitor));
- }
- // Even if updating the monitor returns an error, the monitor's state will
- // still be changed. So, persist the updated monitor despite the error.
- let update_id = MonitorUpdateId::from_monitor_update(update);
+ let logger = WithChannelMonitor::from(&self.logger, &monitor, None);
+ log_trace!(logger, "Updating ChannelMonitor to id {} for channel {}", update.update_id, log_funding_info!(monitor));
+ let update_res = monitor.update_monitor(update, &self.broadcaster, &self.fee_estimator, &self.logger);
+
+ let update_id = update.update_id;
let mut pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap();
- let persist_res = self.persister.update_persisted_channel(funding_txo, Some(update), monitor, update_id);
+ let persist_res = if update_res.is_err() {
+ // Even if updating the monitor returns an error, the monitor's state will
+ // still be changed. Therefore, we should persist the updated monitor despite the error.
+ // We don't want to persist a `monitor_update` which results in a failure to apply later
+ // while reading `channel_monitor` with updates from storage. Instead, we should persist
+ // the entire `channel_monitor` here.
+ log_warn!(logger, "Failed to update ChannelMonitor for channel {}. Going ahead and persisting the entire ChannelMonitor", log_funding_info!(monitor));
+ self.persister.update_persisted_channel(funding_txo, None, monitor)
+ } else {
+ self.persister.update_persisted_channel(funding_txo, Some(update), monitor)
+ };
match persist_res {
ChannelMonitorUpdateStatus::InProgress => {
pending_monitor_updates.push(update_id);
- log_debug!(self.logger, "Persistence of ChannelMonitorUpdate for channel {} in progress", log_funding_info!(monitor));
- },
- ChannelMonitorUpdateStatus::PermanentFailure => {
- monitor_state.channel_perm_failed.store(true, Ordering::Release);
- log_error!(self.logger, "Persistence of ChannelMonitorUpdate for channel {} failed", log_funding_info!(monitor));
+ log_debug!(logger,
+ "Persistence of ChannelMonitorUpdate id {:?} for channel {} in progress",
+ update_id,
+ log_funding_info!(monitor)
+ );
},
ChannelMonitorUpdateStatus::Completed => {
- log_debug!(self.logger, "Persistence of ChannelMonitorUpdate for channel {} completed", log_funding_info!(monitor));
+ log_debug!(logger,
+ "Persistence of ChannelMonitorUpdate id {:?} for channel {} completed",
+ update_id,
+ log_funding_info!(monitor)
+ );
+ },
+ ChannelMonitorUpdateStatus::UnrecoverableError => {
+ // Take the monitors lock for writing so that we poison it and any future
+ // operations going forward fail immediately.
+ core::mem::drop(pending_monitor_updates);
+ core::mem::drop(monitors);
+ let _poison = self.monitors.write().unwrap();
+ let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
+ log_error!(logger, "{}", err_str);
+ panic!("{}", err_str);
},
}
if update_res.is_err() {
- ChannelMonitorUpdateStatus::PermanentFailure
- } else if monitor_state.channel_perm_failed.load(Ordering::Acquire) {
- ChannelMonitorUpdateStatus::PermanentFailure
+ ChannelMonitorUpdateStatus::InProgress
} else {
persist_res
}
}
}
- fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)> {
+ fn release_pending_monitor_events(&self) -> Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, Option<PublicKey>)> {
let mut pending_monitor_events = self.pending_monitor_events.lock().unwrap().split_off(0);
for monitor_state in self.monitors.read().unwrap().values() {
- let is_pending_monitor_update = monitor_state.has_pending_chainsync_updates(&monitor_state.pending_monitor_updates.lock().unwrap());
- if is_pending_monitor_update &&
- monitor_state.last_chain_persist_height.load(Ordering::Acquire) + LATENCY_GRACE_PERIOD_BLOCKS as usize
- > self.highest_chain_height.load(Ordering::Acquire)
- {
- log_info!(self.logger, "A Channel Monitor sync is still in progress, refusing to provide monitor events!");
- } else {
- if monitor_state.channel_perm_failed.load(Ordering::Acquire) {
- // If a `UpdateOrigin::ChainSync` persistence failed with `PermanantFailure`,
- // we don't really know if the latest `ChannelMonitor` state is on disk or not.
- // We're supposed to hold monitor updates until the latest state is on disk to
- // avoid duplicate events, but the user told us persistence is screw-y and may
- // not complete. We can't hold events forever because we may learn some payment
- // preimage, so instead we just log and hope the user complied with the
- // `PermanentFailure` requirements of having at least the local-disk copy
- // updated.
- log_info!(self.logger, "A Channel Monitor sync returned PermanentFailure. Returning monitor events but duplicate events may appear after reload!");
- }
- if is_pending_monitor_update {
- log_error!(self.logger, "A ChannelMonitor sync took longer than {} blocks to complete.", LATENCY_GRACE_PERIOD_BLOCKS);
- log_error!(self.logger, " To avoid funds-loss, we are allowing monitor updates to be released.");
- log_error!(self.logger, " This may cause duplicate payment events to be generated.");
- }
- let monitor_events = monitor_state.monitor.get_and_clear_pending_monitor_events();
- if monitor_events.len() > 0 {
- let monitor_outpoint = monitor_state.monitor.get_funding_txo().0;
- let counterparty_node_id = monitor_state.monitor.get_counterparty_node_id();
- pending_monitor_events.push((monitor_outpoint, monitor_events, counterparty_node_id));
- }
+ let monitor_events = monitor_state.monitor.get_and_clear_pending_monitor_events();
+ if monitor_events.len() > 0 {
+ let monitor_outpoint = monitor_state.monitor.get_funding_txo().0;
+ let monitor_channel_id = monitor_state.monitor.channel_id();
+ let counterparty_node_id = monitor_state.monitor.get_counterparty_node_id();
+ pending_monitor_events.push((monitor_outpoint, monitor_channel_id, monitor_events, counterparty_node_id));
}
}
pending_monitor_events
}
}
-impl<ChannelSigner: Sign, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref> events::EventsProvider for ChainMonitor<ChannelSigner, C, T, F, L, P>
+impl<ChannelSigner: EcdsaChannelSigner, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref> events::EventsProvider for ChainMonitor<ChannelSigner, C, T, F, L, P>
where C::Target: chain::Filter,
T::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
P::Target: Persist<ChannelSigner>,
{
- #[cfg(not(anchors))]
- /// Processes [`SpendableOutputs`] events produced from each [`ChannelMonitor`] upon maturity.
- ///
- /// An [`EventHandler`] may safely call back to the provider, though this shouldn't be needed in
- /// order to handle these events.
- ///
- /// [`SpendableOutputs`]: events::Event::SpendableOutputs
- fn process_pending_events<H: Deref>(&self, handler: H) where H::Target: EventHandler {
- let mut pending_events = Vec::new();
- for monitor_state in self.monitors.read().unwrap().values() {
- pending_events.append(&mut monitor_state.monitor.get_and_clear_pending_events());
- }
- for event in pending_events {
- handler.handle_event(event);
- }
- }
- #[cfg(anchors)]
/// Processes [`SpendableOutputs`] events produced from each [`ChannelMonitor`] upon maturity.
///
/// For channels featuring anchor outputs, this method will also process [`BumpTransaction`]
/// events produced from each [`ChannelMonitor`] while there is a balance to claim onchain
/// within each channel. As the confirmation of a commitment transaction may be critical to the
- /// safety of funds, this method must be invoked frequently, ideally once for every chain tip
- /// update (block connected or disconnected).
+ /// safety of funds, we recommend invoking this every 30 seconds, or lower if running in an
+ /// environment with spotty connections, like on mobile.
///
/// An [`EventHandler`] may safely call back to the provider, though this shouldn't be needed in
/// order to handle these events.
/// [`SpendableOutputs`]: events::Event::SpendableOutputs
/// [`BumpTransaction`]: events::Event::BumpTransaction
fn process_pending_events<H: Deref>(&self, handler: H) where H::Target: EventHandler {
- let mut pending_events = Vec::new();
for monitor_state in self.monitors.read().unwrap().values() {
- pending_events.append(&mut monitor_state.monitor.get_and_clear_pending_events());
- }
- for event in pending_events {
- handler.handle_event(event);
+ monitor_state.monitor.process_pending_events(&handler);
}
}
}
#[cfg(test)]
mod tests {
- use bitcoin::{BlockHeader, TxMerkleNode};
- use bitcoin::hashes::Hash;
- use crate::{check_added_monitors, check_closed_broadcast, check_closed_event};
- use crate::{expect_payment_sent, expect_payment_claimed, expect_payment_sent_without_paths, expect_payment_path_successful, get_event_msg};
- use crate::{get_htlc_update_msgs, get_local_commitment_txn, get_revoke_commit_msgs, get_route_and_payment_hash, unwrap_send_err};
- use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Watch};
- use crate::chain::channelmonitor::LATENCY_GRACE_PERIOD_BLOCKS;
- use crate::ln::channelmanager::{PaymentSendFailure, PaymentId};
+ use crate::check_added_monitors;
+ use crate::{expect_payment_path_successful, get_event_msg};
+ use crate::{get_htlc_update_msgs, get_revoke_commit_msgs};
+ use crate::chain::{ChannelMonitorUpdateStatus, Watch};
+ use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider};
use crate::ln::functional_test_utils::*;
use crate::ln::msgs::ChannelMessageHandler;
- use crate::util::errors::APIError;
- use crate::util::events::{ClosureReason, MessageSendEvent, MessageSendEventsProvider};
#[test]
fn test_async_ooo_offchain_updates() {
create_announced_chan_between_nodes(&nodes, 0, 1);
// Route two payments to be claimed at the same time.
- let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
- let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+ let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+ let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
chanmon_cfgs[1].persister.offchain_monitor_updates.lock().unwrap().clear();
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
nodes[1].node.claim_funds(payment_preimage_1);
check_added_monitors!(nodes[1], 1);
- expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
nodes[1].node.claim_funds(payment_preimage_2);
check_added_monitors!(nodes[1], 1);
- expect_payment_claimed!(nodes[1], payment_hash_2, 1_000_000);
let persistences = chanmon_cfgs[1].persister.offchain_monitor_updates.lock().unwrap().clone();
assert_eq!(persistences.len(), 1);
.find(|(txo, _)| txo == funding_txo).unwrap().1.contains(&next_update));
assert!(nodes[1].chain_monitor.release_pending_monitor_events().is_empty());
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+ assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(*funding_txo, update_iter.next().unwrap().clone()).unwrap();
+ let claim_events = nodes[1].node.get_and_clear_pending_events();
+ assert_eq!(claim_events.len(), 2);
+ match claim_events[0] {
+ Event::PaymentClaimed { ref payment_hash, amount_msat: 1_000_000, .. } => {
+ assert_eq!(payment_hash_1, *payment_hash);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ match claim_events[1] {
+ Event::PaymentClaimed { ref payment_hash, amount_msat: 1_000_000, .. } => {
+ assert_eq!(payment_hash_2, *payment_hash);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
// Now manually walk the commitment signed dance - because we claimed two payments
// back-to-back it doesn't fit into the neat walk commitment_signed_dance does.
let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
- expect_payment_sent_without_paths!(nodes[0], payment_preimage_1);
+ expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false);
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
check_added_monitors!(nodes[0], 1);
let (as_first_raa, as_first_update) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_second_updates.update_fulfill_htlcs[0]);
- expect_payment_sent_without_paths!(nodes[0], payment_preimage_2);
+ expect_payment_sent(&nodes[0], payment_preimage_2, None, false, false);
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_updates.commitment_signed);
check_added_monitors!(nodes[0], 1);
nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa);
check_added_monitors!(nodes[0], 1);
}
- fn do_chainsync_pauses_events(block_timeout: bool) {
- // When a chainsync monitor update occurs, any MonitorUpdates should be held before being
- // passed upstream to a `ChannelManager` via `Watch::release_pending_monitor_events`. This
- // tests that behavior, as well as some ways it might go wrong.
- let chanmon_cfgs = create_chanmon_cfgs(2);
- let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
- let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
- let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- let channel = create_announced_chan_between_nodes(&nodes, 0, 1);
-
- // Get a route for later and rebalance the channel somewhat
- send_payment(&nodes[0], &[&nodes[1]], 10_000_000);
- let (route, second_payment_hash, _, second_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
-
- // First route a payment that we will claim on chain and give the recipient the preimage.
- let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
- nodes[1].node.claim_funds(payment_preimage);
- expect_payment_claimed!(nodes[1], payment_hash, 1_000_000);
- nodes[1].node.get_and_clear_pending_msg_events();
- check_added_monitors!(nodes[1], 1);
- let remote_txn = get_local_commitment_txn!(nodes[1], channel.2);
- assert_eq!(remote_txn.len(), 2);
-
- // Temp-fail the block connection which will hold the channel-closed event
- chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
- chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
-
- // Connect B's commitment transaction, but only to the ChainMonitor/ChannelMonitor. The
- // channel is now closed, but the ChannelManager doesn't know that yet.
- let new_header = BlockHeader {
- version: 2, time: 0, bits: 0, nonce: 0,
- prev_blockhash: nodes[0].best_block_info().0,
- merkle_root: TxMerkleNode::all_zeros() };
- nodes[0].chain_monitor.chain_monitor.transactions_confirmed(&new_header,
- &[(0, &remote_txn[0]), (1, &remote_txn[1])], nodes[0].best_block_info().1 + 1);
- assert!(nodes[0].chain_monitor.release_pending_monitor_events().is_empty());
- nodes[0].chain_monitor.chain_monitor.best_block_updated(&new_header, nodes[0].best_block_info().1 + 1);
- assert!(nodes[0].chain_monitor.release_pending_monitor_events().is_empty());
-
- // If the ChannelManager tries to update the channel, however, the ChainMonitor will pass
- // the update through to the ChannelMonitor which will refuse it (as the channel is closed).
- chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
- unwrap_send_err!(nodes[0].node.send_payment(&route, second_payment_hash, &Some(second_payment_secret), PaymentId(second_payment_hash.0)),
- true, APIError::ChannelUnavailable { ref err },
- assert!(err.contains("ChannelMonitor storage failure")));
- check_added_monitors!(nodes[0], 2); // After the failure we generate a close-channel monitor update
- check_closed_broadcast!(nodes[0], true);
- check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
-
- // However, as the ChainMonitor is still waiting for the original persistence to complete,
- // it won't yet release the MonitorEvents.
- assert!(nodes[0].chain_monitor.release_pending_monitor_events().is_empty());
-
- if block_timeout {
- // After three blocks, pending MontiorEvents should be released either way.
- let latest_header = BlockHeader {
- version: 2, time: 0, bits: 0, nonce: 0,
- prev_blockhash: nodes[0].best_block_info().0,
- merkle_root: TxMerkleNode::all_zeros() };
- nodes[0].chain_monitor.chain_monitor.best_block_updated(&latest_header, nodes[0].best_block_info().1 + LATENCY_GRACE_PERIOD_BLOCKS);
- } else {
- let persistences = chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clone();
- for (funding_outpoint, update_ids) in persistences {
- for update_id in update_ids {
- nodes[0].chain_monitor.chain_monitor.channel_monitor_updated(funding_outpoint, update_id).unwrap();
- }
- }
- }
-
- expect_payment_sent!(nodes[0], payment_preimage);
- }
-
- #[test]
- fn chainsync_pauses_events() {
- do_chainsync_pauses_events(false);
- do_chainsync_pauses_events(true);
- }
-
#[test]
- fn update_during_chainsync_fails_channel() {
+ #[cfg(feature = "std")]
+ fn update_during_chainsync_poisons_channel() {
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
create_announced_chan_between_nodes(&nodes, 0, 1);
- chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
- chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
-
- connect_blocks(&nodes[0], 1);
- // Before processing events, the ChannelManager will still think the Channel is open and
- // there won't be any ChannelMonitorUpdates
- assert_eq!(nodes[0].node.list_channels().len(), 1);
- check_added_monitors!(nodes[0], 0);
- // ... however once we get events once, the channel will close, creating a channel-closed
- // ChannelMonitorUpdate.
- check_closed_broadcast!(nodes[0], true);
- check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Failed to persist ChannelMonitor update during chain sync".to_string() });
- check_added_monitors!(nodes[0], 1);
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::UnrecoverableError);
+
+ assert!(std::panic::catch_unwind(|| {
+ // Returning an UnrecoverableError should always panic immediately
+ connect_blocks(&nodes[0], 1);
+ }).is_err());
+ assert!(std::panic::catch_unwind(|| {
+ // ...and also poison our locks causing later use to panic as well
+ core::mem::drop(nodes);
+ }).is_err());
}
}
+