X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fchain%2Fmod.rs;h=89e0b155cf68b2db3f24c1a0b23f1e01aacb67b1;hb=44e87b86f27a1a6ff3860e8ab87a52e756aa7cc8;hp=01eae488700605b2f23c50d5fdfdc0c2319859bc;hpb=ccf92157620da45032d75f06b5972eaf142c1ce3;p=rust-lightning diff --git a/lightning/src/chain/mod.rs b/lightning/src/chain/mod.rs index 01eae488..89e0b155 100644 --- a/lightning/src/chain/mod.rs +++ b/lightning/src/chain/mod.rs @@ -12,13 +12,12 @@ use bitcoin::blockdata::block::{Block, BlockHeader}; use bitcoin::blockdata::constants::genesis_block; use bitcoin::blockdata::script::Script; -use bitcoin::blockdata::transaction::TxOut; use bitcoin::hash_types::{BlockHash, Txid}; use bitcoin::network::constants::Network; use bitcoin::secp256k1::PublicKey; use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, MonitorEvent}; -use crate::chain::keysinterface::WriteableEcdsaChannelSigner; +use crate::sign::WriteableEcdsaChannelSigner; use crate::chain::transaction::{OutPoint, TransactionData}; use crate::prelude::*; @@ -27,7 +26,6 @@ pub mod chaininterface; pub mod chainmonitor; pub mod channelmonitor; pub mod transaction; -pub mod keysinterface; pub(crate) mod onchaintx; pub(crate) mod package; @@ -41,7 +39,7 @@ pub struct BestBlock { impl BestBlock { /// Constructs a `BestBlock` that represents the genesis block at height 0 of the given /// network. - pub fn from_genesis(network: Network) -> Self { + pub fn from_network(network: Network) -> Self { BestBlock { block_hash: genesis_block(network).header.block_hash(), height: 0, @@ -60,26 +58,6 @@ impl BestBlock { pub fn height(&self) -> u32 { self.height } } -/// An error when accessing the chain via [`Access`]. -#[derive(Clone, Debug)] -pub enum AccessError { - /// The requested chain is unknown. - UnknownChain, - - /// The requested transaction doesn't exist or hasn't confirmed. - UnknownTx, -} - -/// The `Access` trait defines behavior for accessing chain data and state, such as blocks and -/// UTXOs. -pub trait Access { - /// Returns the transaction output of a funding transaction encoded by [`short_channel_id`]. - /// Returns an error if `genesis_hash` is for a different chain or if such a transaction output - /// is unknown. - /// - /// [`short_channel_id`]: https://github.com/lightning/bolts/blob/master/07-routing-gossip.md#definition-of-short_channel_id - fn get_utxo(&self, genesis_hash: &BlockHash, short_channel_id: u64) -> Result; -} /// The `Listen` trait is used to notify when blocks have been connected or disconnected from the /// chain. @@ -176,6 +154,9 @@ pub trait Confirm { /// Returns transactions that must be monitored for reorganization out of the chain along /// with the hash of the block as part of which it had been previously confirmed. /// + /// Note that the returned `Option` might be `None` for channels created with LDK + /// 0.0.112 and prior, in which case you need to manually track previous confirmations. + /// /// Will include any transactions passed to [`transactions_confirmed`] that have insufficient /// confirmations to be safe from a chain reorganization. Will not include any transactions /// passed to [`transaction_unconfirmed`], unless later reconfirmed. @@ -195,6 +176,25 @@ pub trait Confirm { } /// An enum representing the status of a channel monitor update persistence. +/// +/// These are generally used as the return value for an implementation of [`Persist`] which is used +/// as the storage layer for a [`ChainMonitor`]. See the docs on [`Persist`] for a high-level +/// explanation of how to handle different cases. +/// +/// While `UnrecoverableError` is provided as a failure variant, it is not truly "handled" on the +/// calling side, and generally results in an immediate panic. For those who prefer to avoid +/// panics, `InProgress` can be used and you can retry the update operation in the background or +/// shut down cleanly. +/// +/// Note that channels should generally *not* be force-closed after a persistence failure. +/// Force-closing with the latest [`ChannelMonitorUpdate`] applied may result in a transaction +/// being broadcast which can only be spent by the latest [`ChannelMonitor`]! Thus, if the +/// latest [`ChannelMonitor`] is not durably persisted anywhere and exists only in memory, naively +/// calling [`ChannelManager::force_close_broadcasting_latest_txn`] *may result in loss of funds*! +/// +/// [`Persist`]: chainmonitor::Persist +/// [`ChainMonitor`]: chainmonitor::ChainMonitor +/// [`ChannelManager::force_close_broadcasting_latest_txn`]: crate::ln::channelmanager::ChannelManager::force_close_broadcasting_latest_txn #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum ChannelMonitorUpdateStatus { /// The update has been durably persisted and all copies of the relevant [`ChannelMonitor`] @@ -203,17 +203,13 @@ pub enum ChannelMonitorUpdateStatus { /// This includes performing any `fsync()` calls required to ensure the update is guaranteed to /// be available on restart even if the application crashes. Completed, - /// Used to indicate a temporary failure (eg connection to a watchtower or remote backup of - /// our state failed, but is expected to succeed at some point in the future). + /// Indicates that the update will happen asynchronously in the background or that a transient + /// failure occurred which is being retried in the background and will eventually complete. /// - /// Such a failure will "freeze" a channel, preventing us from revoking old states or - /// submitting new commitment transactions to the counterparty. Once the update(s) which failed - /// have been successfully applied, a [`MonitorEvent::Completed`] can be used to restore the - /// channel to an operational state. - /// - /// Note that a given [`ChannelManager`] will *never* re-generate a [`ChannelMonitorUpdate`]. - /// If you return this error you must ensure that it is written to disk safely before writing - /// the latest [`ChannelManager`] state, or you should return [`PermanentFailure`] instead. + /// This will "freeze" a channel, preventing us from revoking old states or submitting a new + /// commitment transaction to the counterparty. Once the update(s) which are `InProgress` have + /// been completed, a [`MonitorEvent::Completed`] can be used to restore the channel to an + /// operational state. /// /// Even when a channel has been "frozen", updates to the [`ChannelMonitor`] can continue to /// occur (e.g. if an inbound HTLC which we forwarded was claimed upstream, resulting in us @@ -223,74 +219,40 @@ pub enum ChannelMonitorUpdateStatus { /// until a [`MonitorEvent::Completed`] is provided, even if you return no error on a later /// monitor update for the same channel. /// - /// For deployments where a copy of ChannelMonitors and other local state are backed up in a - /// remote location (with local copies persisted immediately), it is anticipated that all + /// For deployments where a copy of [`ChannelMonitor`]s and other local state are backed up in + /// a remote location (with local copies persisted immediately), it is anticipated that all /// updates will return [`InProgress`] until the remote copies could be updated. /// - /// [`PermanentFailure`]: ChannelMonitorUpdateStatus::PermanentFailure + /// Note that while fully asynchronous persistence of [`ChannelMonitor`] data is generally + /// reliable, this feature is considered beta, and a handful of edge-cases remain. Until the + /// remaining cases are fixed, in rare cases, *using this feature may lead to funds loss*. + /// /// [`InProgress`]: ChannelMonitorUpdateStatus::InProgress - /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager InProgress, - /// Used to indicate no further channel monitor updates will be allowed (likely a disk failure - /// or a remote copy of this [`ChannelMonitor`] is no longer reachable and thus not updatable). + /// Indicates that an update has failed and will not complete at any point in the future. /// - /// When this is returned, [`ChannelManager`] will force-close the channel but *not* broadcast - /// our current commitment transaction. This avoids a dangerous case where a local disk failure - /// (e.g. the Linux-default remounting of the disk as read-only) causes [`PermanentFailure`]s - /// for all monitor updates. If we were to broadcast our latest commitment transaction and then - /// restart, we could end up reading a previous [`ChannelMonitor`] and [`ChannelManager`], - /// revoking our now-broadcasted state before seeing it confirm and losing all our funds. + /// Currently returning this variant will cause LDK to immediately panic to encourage immediate + /// shutdown. In the future this may be updated to disconnect peers and refuse to continue + /// normal operation without a panic. /// - /// Note that this is somewhat of a tradeoff - if the disk is really gone and we may have lost - /// the data permanently, we really should broadcast immediately. If the data can be recovered - /// with manual intervention, we'd rather close the channel, rejecting future updates to it, - /// and broadcast the latest state only if we have HTLCs to claim which are timing out (which - /// we do as long as blocks are connected). + /// Applications which wish to perform an orderly shutdown after failure should consider + /// returning [`InProgress`] instead and simply shut down without ever marking the update + /// complete. /// - /// In order to broadcast the latest local commitment transaction, you'll need to call - /// [`ChannelMonitor::get_latest_holder_commitment_txn`] and broadcast the resulting - /// transactions once you've safely ensured no further channel updates can be generated by your - /// [`ChannelManager`]. - /// - /// Note that at least one final [`ChannelMonitorUpdate`] may still be provided, which must - /// still be processed by a running [`ChannelMonitor`]. This final update will mark the - /// [`ChannelMonitor`] as finalized, ensuring no further updates (e.g. revocation of the latest - /// commitment transaction) are allowed. - /// - /// Note that even if you return a [`PermanentFailure`] due to unavailability of secondary - /// [`ChannelMonitor`] copies, you should still make an attempt to store the update where - /// possible to ensure you can claim HTLC outputs on the latest commitment transaction - /// broadcasted later. - /// - /// In case of distributed watchtowers deployment, the new version must be written to disk, as - /// state may have been stored but rejected due to a block forcing a commitment broadcast. This - /// storage is used to claim outputs of rejected state confirmed onchain by another watchtower, - /// lagging behind on block processing. - /// - /// [`PermanentFailure`]: ChannelMonitorUpdateStatus::PermanentFailure - /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager - PermanentFailure, + /// [`InProgress`]: ChannelMonitorUpdateStatus::InProgress + UnrecoverableError, } /// The `Watch` trait defines behavior for watching on-chain activity pertaining to channels as /// blocks are connected and disconnected. /// /// Each channel is associated with a [`ChannelMonitor`]. Implementations of this trait are -/// responsible for maintaining a set of monitors such that they can be updated accordingly as -/// channel state changes and HTLCs are resolved. See method documentation for specific -/// requirements. -/// -/// Implementations **must** ensure that updates are successfully applied and persisted upon method -/// completion. If an update fails with a [`PermanentFailure`], then it must immediately shut down -/// without taking any further action such as persisting the current state. +/// responsible for maintaining a set of monitors such that they can be updated as channel state +/// changes. On each update, *all copies* of a [`ChannelMonitor`] must be updated and the update +/// persisted to disk to ensure that the latest [`ChannelMonitor`] state can be reloaded if the +/// application crashes. /// -/// If an implementation maintains multiple instances of a channel's monitor (e.g., by storing -/// backup copies), then it must ensure that updates are applied across all instances. Otherwise, it -/// could result in a revoked transaction being broadcast, allowing the counterparty to claim all -/// funds in the channel. See [`ChannelMonitorUpdateStatus`] for more details about how to handle -/// multiple instances. -/// -/// [`PermanentFailure`]: ChannelMonitorUpdateStatus::PermanentFailure +/// See method documentation and [`ChannelMonitorUpdateStatus`] for specific requirements. pub trait Watch { /// Watches a channel identified by `funding_txo` using `monitor`. /// @@ -298,20 +260,32 @@ pub trait Watch { /// with any spends of outputs returned by [`get_outputs_to_watch`]. In practice, this means /// calling [`block_connected`] and [`block_disconnected`] on the monitor. /// - /// Note: this interface MUST error with [`ChannelMonitorUpdateStatus::PermanentFailure`] if - /// the given `funding_txo` has previously been registered via `watch_channel`. + /// A return of `Err(())` indicates that the channel should immediately be force-closed without + /// broadcasting the funding transaction. + /// + /// If the given `funding_txo` has previously been registered via `watch_channel`, `Err(())` + /// must be returned. /// /// [`get_outputs_to_watch`]: channelmonitor::ChannelMonitor::get_outputs_to_watch /// [`block_connected`]: channelmonitor::ChannelMonitor::block_connected /// [`block_disconnected`]: channelmonitor::ChannelMonitor::block_disconnected - fn watch_channel(&self, funding_txo: OutPoint, monitor: ChannelMonitor) -> ChannelMonitorUpdateStatus; + fn watch_channel(&self, funding_txo: OutPoint, monitor: ChannelMonitor) -> Result; /// Updates a channel identified by `funding_txo` by applying `update` to its monitor. /// - /// Implementations must call [`update_monitor`] with the given update. See - /// [`ChannelMonitorUpdateStatus`] for invariants around returning an error. + /// Implementations must call [`ChannelMonitor::update_monitor`] with the given update. This + /// may fail (returning an `Err(())`), in which case this should return + /// [`ChannelMonitorUpdateStatus::InProgress`] (and the update should never complete). This + /// generally implies the channel has been closed (either by the funding outpoint being spent + /// on-chain or the [`ChannelMonitor`] having decided to do so and broadcasted a transaction), + /// and the [`ChannelManager`] state will be updated once it sees the funding spend on-chain. + /// + /// In general, persistence failures should be retried after returning + /// [`ChannelMonitorUpdateStatus::InProgress`] and eventually complete. If a failure truly + /// cannot be retried, the node should shut down immediately after returning + /// [`ChannelMonitorUpdateStatus::UnrecoverableError`], see its documentation for more info. /// - /// [`update_monitor`]: channelmonitor::ChannelMonitor::update_monitor + /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager fn update_channel(&self, funding_txo: OutPoint, update: &ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus; /// Returns any monitor events since the last call. Subsequent calls must only return new @@ -408,3 +382,9 @@ where self.1.block_disconnected(header, height); } } + +/// A unique identifier to track each pending output claim within a [`ChannelMonitor`]. +/// +/// This is not exported to bindings users as we just use [u8; 32] directly. +#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] +pub struct ClaimId(pub [u8; 32]);