X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fchain%2Fmod.rs;h=8ac6c39360b9624043115756ce85461144e3fb8d;hb=080c70f98f8b96bae965ca90ca690f06cead4b3b;hp=25e5a97d288df42d4a2baf2263aae9e9d493f28e;hpb=081ce7c843713a42803097a6d2dcf3631778beda;p=rust-lightning diff --git a/lightning/src/chain/mod.rs b/lightning/src/chain/mod.rs index 25e5a97d..8ac6c393 100644 --- a/lightning/src/chain/mod.rs +++ b/lightning/src/chain/mod.rs @@ -12,15 +12,16 @@ use bitcoin::blockdata::block::{Block, BlockHeader}; use bitcoin::blockdata::constants::genesis_block; use bitcoin::blockdata::script::Script; -use bitcoin::blockdata::transaction::{Transaction, TxOut}; +use bitcoin::blockdata::transaction::TxOut; use bitcoin::hash_types::{BlockHash, Txid}; use bitcoin::network::constants::Network; +use bitcoin::secp256k1::PublicKey; -use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, MonitorEvent}; -use chain::keysinterface::Sign; -use chain::transaction::{OutPoint, TransactionData}; +use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, MonitorEvent}; +use crate::chain::keysinterface::Sign; +use crate::chain::transaction::{OutPoint, TransactionData}; -use prelude::*; +use crate::prelude::*; pub mod chaininterface; pub mod chainmonitor; @@ -31,7 +32,7 @@ pub(crate) mod onchaintx; pub(crate) mod package; /// The best known block as identified by its hash and height. -#[derive(Clone, Copy, PartialEq)] +#[derive(Clone, Copy, PartialEq, Eq)] pub struct BestBlock { block_hash: BlockHash, height: u32, @@ -60,7 +61,7 @@ impl BestBlock { } /// An error when accessing the chain via [`Access`]. -#[derive(Clone)] +#[derive(Clone, Debug)] pub enum AccessError { /// The requested chain is unknown. UnknownChain, @@ -76,7 +77,7 @@ pub trait Access { /// Returns an error if `genesis_hash` is for a different chain or if such a transaction output /// is unknown. /// - /// [`short_channel_id`]: https://github.com/lightningnetwork/lightning-rfc/blob/master/07-routing-gossip.md#definition-of-short_channel_id + /// [`short_channel_id`]: https://github.com/lightning/bolts/blob/master/07-routing-gossip.md#definition-of-short_channel_id fn get_utxo(&self, genesis_hash: &BlockHash, short_channel_id: u64) -> Result; } @@ -87,9 +88,20 @@ pub trait Access { /// sourcing chain data using a block-oriented API should prefer this interface over [`Confirm`]. /// Such clients fetch the entire header chain whereas clients using [`Confirm`] only fetch headers /// when needed. +/// +/// By using [`Listen::filtered_block_connected`] this interface supports clients fetching the +/// entire header chain and only blocks with matching transaction data using BIP 157 filters or +/// other similar filtering. pub trait Listen { + /// Notifies the listener that a block was added at the given height, with the transaction data + /// possibly filtered. + fn filtered_block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32); + /// Notifies the listener that a block was added at the given height. - fn block_connected(&self, block: &Block, height: u32); + fn block_connected(&self, block: &Block, height: u32) { + let txdata: Vec<_> = block.txdata.iter().enumerate().collect(); + self.filtered_block_connected(&block.header, &txdata, height); + } /// Notifies the listener that a block was removed at the given height. fn block_disconnected(&self, header: &BlockHeader, height: u32); @@ -139,15 +151,15 @@ pub trait Confirm { /// in the event of a chain reorganization, it must not be called with a `header` that is no /// longer in the chain as of the last call to [`best_block_updated`]. /// - /// [chain order]: Confirm#Order + /// [chain order]: Confirm#order /// [`best_block_updated`]: Self::best_block_updated fn transactions_confirmed(&self, header: &BlockHeader, txdata: &TransactionData, height: u32); /// Processes a transaction that is no longer confirmed as result of a chain reorganization. /// /// Should be called for any transaction returned by [`get_relevant_txids`] if it has been - /// reorganized out of the best chain. Once called, the given transaction should not be returned - /// by [`get_relevant_txids`] unless it has been reconfirmed via [`transactions_confirmed`]. + /// reorganized out of the best chain. Once called, the given transaction will not be returned + /// by [`get_relevant_txids`], unless it has been reconfirmed via [`transactions_confirmed`]. /// /// [`get_relevant_txids`]: Self::get_relevant_txids /// [`transactions_confirmed`]: Self::transactions_confirmed @@ -161,9 +173,9 @@ pub trait Confirm { /// Returns transactions that should be monitored for reorganization out of the chain. /// - /// Should include any transactions passed to [`transactions_confirmed`] that have insufficient - /// confirmations to be safe from a chain reorganization. Should not include any transactions - /// passed to [`transaction_unconfirmed`] unless later reconfirmed. + /// Will include any transactions passed to [`transactions_confirmed`] that have insufficient + /// confirmations to be safe from a chain reorganization. Will not include any transactions + /// passed to [`transaction_unconfirmed`], unless later reconfirmed. /// /// May be called to determine the subset of transactions that must still be monitored for /// reorganization. Will be idempotent between calls but may change as a result of calls to the @@ -175,68 +187,81 @@ pub trait Confirm { fn get_relevant_txids(&self) -> Vec; } -/// An error enum representing a failure to persist a channel monitor update. -#[derive(Clone, Copy, Debug, PartialEq)] -pub enum ChannelMonitorUpdateErr { +/// An enum representing the status of a channel monitor update persistence. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum ChannelMonitorUpdateStatus { + /// The update has been durably persisted and all copies of the relevant [`ChannelMonitor`] + /// have been updated. + /// + /// This includes performing any `fsync()` calls required to ensure the update is guaranteed to + /// be available on restart even if the application crashes. + Completed, /// Used to indicate a temporary failure (eg connection to a watchtower or remote backup of /// our state failed, but is expected to succeed at some point in the future). /// /// Such a failure will "freeze" a channel, preventing us from revoking old states or - /// submitting new commitment transactions to the counterparty. Once the update(s) that failed - /// have been successfully applied, a [`MonitorEvent::UpdateCompleted`] event should be returned - /// via [`Watch::release_pending_monitor_events`] which will then restore the channel to an - /// operational state. - /// - /// Note that a given ChannelManager will *never* re-generate a given ChannelMonitorUpdate. If - /// you return a TemporaryFailure you must ensure that it is written to disk safely before - /// writing out the latest ChannelManager state. - /// - /// Even when a channel has been "frozen" updates to the ChannelMonitor can continue to occur - /// (eg if an inbound HTLC which we forwarded was claimed upstream resulting in us attempting - /// to claim it on this channel) and those updates must be applied wherever they can be. At - /// least one such updated ChannelMonitor must be persisted otherwise PermanentFailure should - /// be returned to get things on-chain ASAP using only the in-memory copy. Obviously updates to - /// the channel which would invalidate previous ChannelMonitors are not made when a channel has - /// been "frozen". - /// - /// Note that even if updates made after TemporaryFailure succeed you must still provide a - /// [`MonitorEvent::UpdateCompleted`] to ensure you have the latest monitor and re-enable - /// normal channel operation. Note that this is normally generated through a call to - /// [`ChainMonitor::channel_monitor_updated`]. - /// - /// Note that the update being processed here will not be replayed for you when you return a - /// [`MonitorEvent::UpdateCompleted`] event via [`Watch::release_pending_monitor_events`], so - /// you must store the update itself on your own local disk prior to returning a - /// TemporaryFailure. You may, of course, employ a journaling approach, storing only the - /// ChannelMonitorUpdate on disk without updating the monitor itself, replaying the journal at - /// reload-time. + /// submitting new commitment transactions to the counterparty. Once the update(s) which failed + /// have been successfully applied, a [`MonitorEvent::Completed`] can be used to restore the + /// channel to an operational state. + /// + /// Note that a given [`ChannelManager`] will *never* re-generate a [`ChannelMonitorUpdate`]. + /// If you return this error you must ensure that it is written to disk safely before writing + /// the latest [`ChannelManager`] state, or you should return [`PermanentFailure`] instead. + /// + /// Even when a channel has been "frozen", updates to the [`ChannelMonitor`] can continue to + /// occur (e.g. if an inbound HTLC which we forwarded was claimed upstream, resulting in us + /// attempting to claim it on this channel) and those updates must still be persisted. + /// + /// No updates to the channel will be made which could invalidate other [`ChannelMonitor`]s + /// until a [`MonitorEvent::Completed`] is provided, even if you return no error on a later + /// monitor update for the same channel. /// /// For deployments where a copy of ChannelMonitors and other local state are backed up in a /// remote location (with local copies persisted immediately), it is anticipated that all - /// updates will return TemporaryFailure until the remote copies could be updated. + /// updates will return [`InProgress`] until the remote copies could be updated. + /// + /// [`PermanentFailure`]: ChannelMonitorUpdateStatus::PermanentFailure + /// [`InProgress`]: ChannelMonitorUpdateStatus::InProgress + /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager + InProgress, + /// Used to indicate no further channel monitor updates will be allowed (likely a disk failure + /// or a remote copy of this [`ChannelMonitor`] is no longer reachable and thus not updatable). /// - /// [`ChainMonitor::channel_monitor_updated`]: chainmonitor::ChainMonitor::channel_monitor_updated - TemporaryFailure, - /// Used to indicate no further channel monitor updates will be allowed (eg we've moved on to a - /// different watchtower and cannot update with all watchtowers that were previously informed - /// of this channel). + /// When this is returned, [`ChannelManager`] will force-close the channel but *not* broadcast + /// our current commitment transaction. This avoids a dangerous case where a local disk failure + /// (e.g. the Linux-default remounting of the disk as read-only) causes [`PermanentFailure`]s + /// for all monitor updates. If we were to broadcast our latest commitment transaction and then + /// restart, we could end up reading a previous [`ChannelMonitor`] and [`ChannelManager`], + /// revoking our now-broadcasted state before seeing it confirm and losing all our funds. /// - /// At reception of this error, ChannelManager will force-close the channel and return at - /// least a final ChannelMonitorUpdate::ChannelForceClosed which must be delivered to at - /// least one ChannelMonitor copy. Revocation secret MUST NOT be released and offchain channel - /// update must be rejected. + /// Note that this is somewhat of a tradeoff - if the disk is really gone and we may have lost + /// the data permanently, we really should broadcast immediately. If the data can be recovered + /// with manual intervention, we'd rather close the channel, rejecting future updates to it, + /// and broadcast the latest state only if we have HTLCs to claim which are timing out (which + /// we do as long as blocks are connected). /// - /// This failure may also signal a failure to update the local persisted copy of one of - /// the channel monitor instance. + /// In order to broadcast the latest local commitment transaction, you'll need to call + /// [`ChannelMonitor::get_latest_holder_commitment_txn`] and broadcast the resulting + /// transactions once you've safely ensured no further channel updates can be generated by your + /// [`ChannelManager`]. /// - /// Note that even when you fail a holder commitment transaction update, you must store the - /// update to ensure you can claim from it in case of a duplicate copy of this ChannelMonitor - /// broadcasts it (e.g distributed channel-monitor deployment) + /// Note that at least one final [`ChannelMonitorUpdate`] may still be provided, which must + /// still be processed by a running [`ChannelMonitor`]. This final update will mark the + /// [`ChannelMonitor`] as finalized, ensuring no further updates (e.g. revocation of the latest + /// commitment transaction) are allowed. + /// + /// Note that even if you return a [`PermanentFailure`] due to unavailability of secondary + /// [`ChannelMonitor`] copies, you should still make an attempt to store the update where + /// possible to ensure you can claim HTLC outputs on the latest commitment transaction + /// broadcasted later. /// /// In case of distributed watchtowers deployment, the new version must be written to disk, as /// state may have been stored but rejected due to a block forcing a commitment broadcast. This /// storage is used to claim outputs of rejected state confirmed onchain by another watchtower, /// lagging behind on block processing. + /// + /// [`PermanentFailure`]: ChannelMonitorUpdateStatus::PermanentFailure + /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager PermanentFailure, } @@ -255,10 +280,10 @@ pub enum ChannelMonitorUpdateErr { /// If an implementation maintains multiple instances of a channel's monitor (e.g., by storing /// backup copies), then it must ensure that updates are applied across all instances. Otherwise, it /// could result in a revoked transaction being broadcast, allowing the counterparty to claim all -/// funds in the channel. See [`ChannelMonitorUpdateErr`] for more details about how to handle +/// funds in the channel. See [`ChannelMonitorUpdateStatus`] for more details about how to handle /// multiple instances. /// -/// [`PermanentFailure`]: ChannelMonitorUpdateErr::PermanentFailure +/// [`PermanentFailure`]: ChannelMonitorUpdateStatus::PermanentFailure pub trait Watch { /// Watches a channel identified by `funding_txo` using `monitor`. /// @@ -266,21 +291,21 @@ pub trait Watch { /// with any spends of outputs returned by [`get_outputs_to_watch`]. In practice, this means /// calling [`block_connected`] and [`block_disconnected`] on the monitor. /// - /// Note: this interface MUST error with `ChannelMonitorUpdateErr::PermanentFailure` if + /// Note: this interface MUST error with [`ChannelMonitorUpdateStatus::PermanentFailure`] if /// the given `funding_txo` has previously been registered via `watch_channel`. /// /// [`get_outputs_to_watch`]: channelmonitor::ChannelMonitor::get_outputs_to_watch /// [`block_connected`]: channelmonitor::ChannelMonitor::block_connected /// [`block_disconnected`]: channelmonitor::ChannelMonitor::block_disconnected - fn watch_channel(&self, funding_txo: OutPoint, monitor: ChannelMonitor) -> Result<(), ChannelMonitorUpdateErr>; + fn watch_channel(&self, funding_txo: OutPoint, monitor: ChannelMonitor) -> ChannelMonitorUpdateStatus; /// Updates a channel identified by `funding_txo` by applying `update` to its monitor. /// /// Implementations must call [`update_monitor`] with the given update. See - /// [`ChannelMonitorUpdateErr`] for invariants around returning an error. + /// [`ChannelMonitorUpdateStatus`] for invariants around returning an error. /// /// [`update_monitor`]: channelmonitor::ChannelMonitor::update_monitor - fn update_channel(&self, funding_txo: OutPoint, update: ChannelMonitorUpdate) -> Result<(), ChannelMonitorUpdateErr>; + fn update_channel(&self, funding_txo: OutPoint, update: ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus; /// Returns any monitor events since the last call. Subsequent calls must only return new /// events. @@ -290,8 +315,8 @@ pub trait Watch { /// to disk. /// /// For details on asynchronous [`ChannelMonitor`] updating and returning - /// [`MonitorEvent::UpdateCompleted`] here, see [`ChannelMonitorUpdateErr::TemporaryFailure`]. - fn release_pending_monitor_events(&self) -> Vec; + /// [`MonitorEvent::Completed`] here, see [`ChannelMonitorUpdateStatus::InProgress`]. + fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec, Option)>; } /// The `Filter` trait defines behavior for indicating chain activity of interest pertaining to @@ -309,9 +334,9 @@ pub trait Watch { /// Note that use as part of a [`Watch`] implementation involves reentrancy. Therefore, the `Filter` /// should not block on I/O. Implementations should instead queue the newly monitored data to be /// processed later. Then, in order to block until the data has been processed, any [`Watch`] -/// invocation that has called the `Filter` must return [`TemporaryFailure`]. +/// invocation that has called the `Filter` must return [`InProgress`]. /// -/// [`TemporaryFailure`]: ChannelMonitorUpdateErr::TemporaryFailure +/// [`InProgress`]: ChannelMonitorUpdateStatus::InProgress /// [BIP 157]: https://github.com/bitcoin/bips/blob/master/bip-0157.mediawiki /// [BIP 158]: https://github.com/bitcoin/bips/blob/master/bip-0158.mediawiki pub trait Filter { @@ -321,28 +346,25 @@ pub trait Filter { /// Registers interest in spends of a transaction output. /// - /// Optionally, when `output.block_hash` is set, should return any transaction spending the - /// output that is found in the corresponding block along with its index. - /// - /// This return value is useful for Electrum clients in order to supply in-block descendant - /// transactions which otherwise were not included. This is not necessary for other clients if - /// such descendant transactions were already included (e.g., when a BIP 157 client provides the - /// full block). - fn register_output(&self, output: WatchedOutput) -> Option<(usize, Transaction)>; + /// Note that this method might be called during processing of a new block. You therefore need + /// to ensure that also dependent output spents within an already connected block are correctly + /// handled, e.g., by re-scanning the block in question whenever new outputs have been + /// registered mid-processing. + fn register_output(&self, output: WatchedOutput); } /// A transaction output watched by a [`ChannelMonitor`] for spends on-chain. /// /// Used to convey to a [`Filter`] such an output with a given spending condition. Any transaction /// spending the output must be given to [`ChannelMonitor::block_connected`] either directly or via -/// the return value of [`Filter::register_output`]. +/// [`Confirm::transactions_confirmed`]. /// /// If `block_hash` is `Some`, this indicates the output was created in the corresponding block and /// may have been spent there. See [`Filter::register_output`] for details. /// /// [`ChannelMonitor`]: channelmonitor::ChannelMonitor /// [`ChannelMonitor::block_connected`]: channelmonitor::ChannelMonitor::block_connected -#[derive(Clone, PartialEq, Hash)] +#[derive(Clone, PartialEq, Eq, Hash)] pub struct WatchedOutput { /// First block where the transaction output may have been spent. pub block_hash: Option, @@ -355,8 +377,8 @@ pub struct WatchedOutput { } impl Listen for core::ops::Deref { - fn block_connected(&self, block: &Block, height: u32) { - (**self).block_connected(block, height); + fn filtered_block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) { + (**self).filtered_block_connected(header, txdata, height); } fn block_disconnected(&self, header: &BlockHeader, height: u32) { @@ -369,9 +391,9 @@ where T::Target: Listen, U::Target: Listen, { - fn block_connected(&self, block: &Block, height: u32) { - self.0.block_connected(block, height); - self.1.block_connected(block, height); + fn filtered_block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) { + self.0.filtered_block_connected(header, txdata, height); + self.1.filtered_block_connected(header, txdata, height); } fn block_disconnected(&self, header: &BlockHeader, height: u32) {