X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmonitor.rs;h=b601f741ed978a2d3827adf7b9c4b804409e7428;hb=367834ca9039eb64d6f85b6bd4432c735e776b81;hp=91f81fa573664d6d0ceb64380181ee2d4d5adc86;hpb=6622ea724f9947a52600d5743cbd8d8f45d1e6e4;p=rust-lightning diff --git a/lightning/src/ln/channelmonitor.rs b/lightning/src/ln/channelmonitor.rs index 91f81fa5..b601f741 100644 --- a/lightning/src/ln/channelmonitor.rs +++ b/lightning/src/ln/channelmonitor.rs @@ -12,13 +12,15 @@ //! //! ChannelMonitor objects are generated by ChannelManager in response to relevant //! messages/actions, and MUST be persisted to disk (and, preferably, remotely) before progress can -//! be made in responding to certain messages, see ManyChannelMonitor for more. +//! be made in responding to certain messages, see [`chain::Watch`] for more. //! //! Note that ChannelMonitors are an important part of the lightning trust model and a copy of the //! latest ChannelMonitor must always be actively monitoring for chain updates (and no out-of-date //! ChannelMonitors should do so). Thus, if you're building rust-lightning into an HSM or other //! security-domain-separated system design, you should consider having multiple paths for //! ChannelMonitors to get out of the HSM and onto monitoring devices. +//! +//! [`chain::Watch`]: ../../chain/trait.Watch.html use bitcoin::blockdata::block::BlockHeader; use bitcoin::blockdata::transaction::{TxOut,Transaction}; @@ -40,7 +42,8 @@ use ln::chan_utils; use ln::chan_utils::{CounterpartyCommitmentSecrets, HTLCOutputInCommitment, HolderCommitmentTransaction, HTLCType}; use ln::channelmanager::{HTLCSource, PaymentPreimage, PaymentHash}; use ln::onchaintx::{OnchainTxHandler, InputDescriptors}; -use chain::chaininterface::{ChainListener, ChainWatchInterface, BroadcasterInterface, FeeEstimator}; +use chain; +use chain::chaininterface::{ChainListener, ChainWatchedUtil, BroadcasterInterface, FeeEstimator}; use chain::transaction::OutPoint; use chain::keysinterface::{SpendableOutputDescriptor, ChannelKeys}; use util::logger::Logger; @@ -48,9 +51,9 @@ use util::ser::{Readable, MaybeReadable, Writer, Writeable, U48}; use util::{byte_utils, events}; use util::events::Event; -use std::collections::{HashMap, hash_map}; +use std::collections::{HashMap, HashSet, hash_map}; use std::sync::Mutex; -use std::{hash,cmp, mem}; +use std::{cmp, mem}; use std::ops::Deref; use std::io::Error; @@ -146,6 +149,11 @@ pub enum ChannelMonitorUpdateErr { /// Note that even when you fail a holder commitment transaction update, you must store the /// update to ensure you can claim from it in case of a duplicate copy of this ChannelMonitor /// broadcasts it (e.g distributed channel-monitor deployment) + /// + /// In case of distributed watchtowers deployment, the new version must be written to disk, as + /// state may have been stored but rejected due to a block forcing a commitment broadcast. This + /// storage is used to claim outputs of rejected state confirmed onchain by another watchtower, + /// lagging behind on block processing. PermanentFailure, } @@ -167,8 +175,11 @@ pub enum MonitorEvent { CommitmentTxBroadcasted(OutPoint), } -/// Simple structure send back by ManyChannelMonitor in case of HTLC detected onchain from a -/// forward channel and from which info are needed to update HTLC in a backward channel. +/// Simple structure sent back by `chain::Watch` when an HTLC from a forward channel is detected on +/// chain. Used to update the corresponding HTLC in the backward channel. Failing to pass the +/// preimage claim backward will lead to loss of funds. +/// +/// [`chain::Watch`]: ../../chain/trait.Watch.html #[derive(Clone, PartialEq)] pub struct HTLCUpdate { pub(super) payment_hash: PaymentHash, @@ -177,50 +188,104 @@ pub struct HTLCUpdate { } impl_writeable!(HTLCUpdate, 0, { payment_hash, payment_preimage, source }); -/// A simple implementation of a ManyChannelMonitor and ChainListener. Can be used to create a -/// watchtower or watch our own channels. -/// -/// Note that you must provide your own key by which to refer to channels. +/// An implementation of a [`chain::Watch`] and ChainListener. /// -/// If you're accepting remote monitors (ie are implementing a watchtower), you must verify that -/// users cannot overwrite a given channel by providing a duplicate key. ie you should probably -/// index by a PublicKey which is required to sign any updates. +/// May be used in conjunction with [`ChannelManager`] to monitor channels locally or used +/// independently to monitor channels remotely. /// -/// If you're using this for local monitoring of your own channels, you probably want to use -/// `OutPoint` as the key, which will give you a ManyChannelMonitor implementation. -/// -/// (C-not exported) due to an unconstrained generic in `Key` -pub struct SimpleManyChannelMonitor +/// [`chain::Watch`]: ../../chain/trait.Watch.html +/// [`ChannelManager`]: ../channelmanager/struct.ChannelManager.html +pub struct ChainMonitor where T::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, - C::Target: ChainWatchInterface, { /// The monitors - pub monitors: Mutex>>, - chain_monitor: C, + pub monitors: Mutex>>, + watch_events: Mutex, broadcaster: T, logger: L, fee_estimator: F } -impl - ChainListener for SimpleManyChannelMonitor +struct WatchEventQueue { + watched: ChainWatchedUtil, + events: Vec, +} + +impl WatchEventQueue { + fn new() -> Self { + Self { + watched: ChainWatchedUtil::new(), + events: Vec::new(), + } + } + + fn watch_tx(&mut self, txid: &Txid, script_pubkey: &Script) { + if self.watched.register_tx(txid, script_pubkey) { + self.events.push(chain::WatchEvent::WatchTransaction { + txid: *txid, + script_pubkey: script_pubkey.clone() + }); + } + } + + fn watch_output(&mut self, outpoint: (&Txid, usize), script_pubkey: &Script) { + let (txid, index) = outpoint; + if self.watched.register_outpoint((*txid, index as u32), script_pubkey) { + self.events.push(chain::WatchEvent::WatchOutput { + outpoint: OutPoint { + txid: *txid, + index: index as u16, + }, + script_pubkey: script_pubkey.clone(), + }); + } + } + + fn dequeue_events(&mut self) -> Vec { + let mut pending_events = Vec::with_capacity(self.events.len()); + pending_events.append(&mut self.events); + pending_events + } + + fn filter_block<'a>(&self, txdata: &[(usize, &'a Transaction)]) -> Vec<(usize, &'a Transaction)> { + let mut matched_txids = HashSet::new(); + txdata.iter().filter(|&&(_, tx)| { + // A tx matches the filter if it either matches the filter directly (via does_match_tx) + // or if it is a descendant of another matched transaction within the same block. + let mut matched = self.watched.does_match_tx(tx); + for input in tx.input.iter() { + if matched || matched_txids.contains(&input.previous_output.txid) { + matched = true; + break; + } + } + if matched { + matched_txids.insert(tx.txid()); + } + matched + }).map(|e| *e).collect() + } +} + +impl + ChainListener for ChainMonitor where T::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, - C::Target: ChainWatchInterface, { - fn block_connected(&self, header: &BlockHeader, height: u32, txn_matched: &[&Transaction], _indexes_of_txn_matched: &[usize]) { - let block_hash = header.block_hash(); + fn block_connected(&self, header: &BlockHeader, txdata: &[(usize, &Transaction)], height: u32) { + let mut watch_events = self.watch_events.lock().unwrap(); + let matched_txn = watch_events.filter_block(txdata); { let mut monitors = self.monitors.lock().unwrap(); for monitor in monitors.values_mut() { - let txn_outputs = monitor.block_connected(txn_matched, height, &block_hash, &*self.broadcaster, &*self.fee_estimator, &*self.logger); + let txn_outputs = monitor.block_connected(header, &matched_txn, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger); for (ref txid, ref outputs) in txn_outputs { for (idx, output) in outputs.iter().enumerate() { - self.chain_monitor.install_watch_outpoint((txid.clone(), idx as u32), &output.script_pubkey); + watch_events.watch_output((txid, idx), &output.script_pubkey); } } } @@ -228,49 +293,46 @@ impl SimpleManyChannelMonitor +impl ChainMonitor where T::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, - C::Target: ChainWatchInterface, { /// Creates a new object which can be used to monitor several channels given the chain /// interface with which to register to receive notifications. - pub fn new(chain_monitor: C, broadcaster: T, logger: L, feeest: F) -> SimpleManyChannelMonitor { - let res = SimpleManyChannelMonitor { + pub fn new(broadcaster: T, logger: L, feeest: F) -> Self { + Self { monitors: Mutex::new(HashMap::new()), - chain_monitor, + watch_events: Mutex::new(WatchEventQueue::new()), broadcaster, logger, fee_estimator: feeest, - }; - - res + } } - /// Adds or updates the monitor which monitors the channel referred to by the given key. - pub fn add_monitor_by_key(&self, key: Key, monitor: ChannelMonitor) -> Result<(), MonitorUpdateError> { + /// Adds the monitor that watches the channel referred to by the given outpoint. + fn add_monitor(&self, outpoint: OutPoint, monitor: ChannelMonitor) -> Result<(), MonitorUpdateError> { + let mut watch_events = self.watch_events.lock().unwrap(); let mut monitors = self.monitors.lock().unwrap(); - let entry = match monitors.entry(key) { - hash_map::Entry::Occupied(_) => return Err(MonitorUpdateError("Channel monitor for given key is already present")), + let entry = match monitors.entry(outpoint) { + hash_map::Entry::Occupied(_) => return Err(MonitorUpdateError("Channel monitor for given outpoint is already present")), hash_map::Entry::Vacant(e) => e, }; { let funding_txo = monitor.get_funding_txo(); log_trace!(self.logger, "Got new Channel Monitor for channel {}", log_bytes!(funding_txo.0.to_channel_id()[..])); - self.chain_monitor.install_watch_tx(&funding_txo.0.txid, &funding_txo.1); - self.chain_monitor.install_watch_outpoint((funding_txo.0.txid, funding_txo.0.index as u32), &funding_txo.1); + watch_events.watch_tx(&funding_txo.0.txid, &funding_txo.1); + watch_events.watch_output((&funding_txo.0.txid, funding_txo.0.index as usize), &funding_txo.1); for (txid, outputs) in monitor.get_outputs_to_watch().iter() { for (idx, script) in outputs.iter().enumerate() { - self.chain_monitor.install_watch_outpoint((*txid, idx as u32), script); + watch_events.watch_output((txid, idx), script); } } } @@ -278,10 +340,10 @@ impl Result<(), MonitorUpdateError> { + /// Updates the monitor that watches the channel referred to by the given outpoint. + fn update_monitor(&self, outpoint: OutPoint, update: ChannelMonitorUpdate) -> Result<(), MonitorUpdateError> { let mut monitors = self.monitors.lock().unwrap(); - match monitors.get_mut(&key) { + match monitors.get_mut(&outpoint) { Some(orig_monitor) => { log_trace!(self.logger, "Updating Channel Monitor for channel {}", log_funding_info!(orig_monitor)); orig_monitor.update_monitor(update, &self.broadcaster, &self.logger) @@ -291,29 +353,28 @@ impl ManyChannelMonitor for SimpleManyChannelMonitor +impl chain::Watch for ChainMonitor where T::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, - C::Target: ChainWatchInterface, { type Keys = ChanSigner; - fn add_monitor(&self, funding_txo: OutPoint, monitor: ChannelMonitor) -> Result<(), ChannelMonitorUpdateErr> { - match self.add_monitor_by_key(funding_txo, monitor) { + fn watch_channel(&self, funding_txo: OutPoint, monitor: ChannelMonitor) -> Result<(), ChannelMonitorUpdateErr> { + match self.add_monitor(funding_txo, monitor) { Ok(_) => Ok(()), Err(_) => Err(ChannelMonitorUpdateErr::PermanentFailure), } } - fn update_monitor(&self, funding_txo: OutPoint, update: ChannelMonitorUpdate) -> Result<(), ChannelMonitorUpdateErr> { - match self.update_monitor_by_key(funding_txo, update) { + fn update_channel(&self, funding_txo: OutPoint, update: ChannelMonitorUpdate) -> Result<(), ChannelMonitorUpdateErr> { + match self.update_monitor(funding_txo, update) { Ok(_) => Ok(()), Err(_) => Err(ChannelMonitorUpdateErr::PermanentFailure), } } - fn get_and_clear_pending_monitor_events(&self) -> Vec { + fn release_pending_monitor_events(&self) -> Vec { let mut pending_monitor_events = Vec::new(); for chan in self.monitors.lock().unwrap().values_mut() { pending_monitor_events.append(&mut chan.get_and_clear_pending_monitor_events()); @@ -322,11 +383,10 @@ impl events::EventsProvider for SimpleManyChannelMonitor +impl events::EventsProvider for ChainMonitor where T::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, - C::Target: ChainWatchInterface, { fn get_and_clear_pending_events(&self) -> Vec { let mut pending_events = Vec::new(); @@ -337,6 +397,16 @@ impl chain::WatchEventProvider for ChainMonitor + where T::Target: BroadcasterInterface, + F::Target: FeeEstimator, + L::Target: Logger, +{ + fn release_pending_watch_events(&self) -> Vec { + self.watch_events.lock().unwrap().dequeue_events() + } +} + /// If an HTLC expires within this many blocks, don't try to claim it in a shared transaction, /// instead claiming it in its own individual transaction. pub(crate) const CLTV_SHARED_CLAIM_BUFFER: u32 = 12; @@ -847,75 +917,6 @@ pub struct ChannelMonitor { secp_ctx: Secp256k1, //TODO: dedup this a bit... } -/// Simple trait indicating ability to track a set of ChannelMonitors and multiplex events between -/// them. Generally should be implemented by keeping a local SimpleManyChannelMonitor and passing -/// events to it, while also taking any add/update_monitor events and passing them to some remote -/// server(s). -/// -/// In general, you must always have at least one local copy in memory, which must never fail to -/// update (as it is responsible for broadcasting the latest state in case the channel is closed), -/// and then persist it to various on-disk locations. If, for some reason, the in-memory copy fails -/// to update (eg out-of-memory or some other condition), you must immediately shut down without -/// taking any further action such as writing the current state to disk. This should likely be -/// accomplished via panic!() or abort(). -/// -/// Note that any updates to a channel's monitor *must* be applied to each instance of the -/// channel's monitor everywhere (including remote watchtowers) *before* this function returns. If -/// an update occurs and a remote watchtower is left with old state, it may broadcast transactions -/// which we have revoked, allowing our counterparty to claim all funds in the channel! -/// -/// User needs to notify implementors of ManyChannelMonitor when a new block is connected or -/// disconnected using their `block_connected` and `block_disconnected` methods. However, rather -/// than calling these methods directly, the user should register implementors as listeners to the -/// BlockNotifier and call the BlockNotifier's `block_(dis)connected` methods, which will notify -/// all registered listeners in one go. -pub trait ManyChannelMonitor: Send + Sync { - /// The concrete type which signs for transactions and provides access to our channel public - /// keys. - type Keys: ChannelKeys; - - /// Adds a monitor for the given `funding_txo`. - /// - /// Implementer must also ensure that the funding_txo txid *and* outpoint are registered with - /// any relevant ChainWatchInterfaces such that the provided monitor receives block_connected - /// callbacks with the funding transaction, or any spends of it. - /// - /// Further, the implementer must also ensure that each output returned in - /// monitor.get_outputs_to_watch() is registered to ensure that the provided monitor learns about - /// any spends of any of the outputs. - /// - /// Any spends of outputs which should have been registered which aren't passed to - /// ChannelMonitors via block_connected may result in FUNDS LOSS. - fn add_monitor(&self, funding_txo: OutPoint, monitor: ChannelMonitor) -> Result<(), ChannelMonitorUpdateErr>; - - /// Updates a monitor for the given `funding_txo`. - /// - /// Implementer must also ensure that the funding_txo txid *and* outpoint are registered with - /// any relevant ChainWatchInterfaces such that the provided monitor receives block_connected - /// callbacks with the funding transaction, or any spends of it. - /// - /// Further, the implementer must also ensure that each output returned in - /// monitor.get_watch_outputs() is registered to ensure that the provided monitor learns about - /// any spends of any of the outputs. - /// - /// Any spends of outputs which should have been registered which aren't passed to - /// ChannelMonitors via block_connected may result in FUNDS LOSS. - /// - /// In case of distributed watchtowers deployment, even if an Err is return, the new version - /// must be written to disk, as state may have been stored but rejected due to a block forcing - /// a commitment broadcast. This storage is used to claim outputs of rejected state confirmed - /// onchain by another watchtower, lagging behind on block processing. - fn update_monitor(&self, funding_txo: OutPoint, monitor: ChannelMonitorUpdate) -> Result<(), ChannelMonitorUpdateErr>; - - /// Used by ChannelManager to get list of HTLC resolved onchain and which needed to be updated - /// with success or failure. - /// - /// You should probably just call through to - /// ChannelMonitor::get_and_clear_pending_monitor_events() for each ChannelMonitor and return - /// the full list. - fn get_and_clear_pending_monitor_events(&self) -> Vec; -} - #[cfg(any(test, feature = "fuzztarget"))] /// Used only in testing and fuzztarget to check serialization roundtrips don't change the /// underlying object @@ -1451,7 +1452,9 @@ impl ChannelMonitor { } /// Get the list of HTLCs who's status has been updated on chain. This should be called by - /// ChannelManager via ManyChannelMonitor::get_and_clear_pending_monitor_events(). + /// ChannelManager via [`chain::Watch::release_pending_monitor_events`]. + /// + /// [`chain::Watch::release_pending_monitor_events`]: ../../chain/trait.Watch.html#tymethod.release_pending_monitor_events pub fn get_and_clear_pending_monitor_events(&mut self) -> Vec { let mut ret = Vec::new(); mem::swap(&mut ret, &mut self.pending_monitor_events); @@ -1461,7 +1464,7 @@ impl ChannelMonitor { /// Gets the list of pending events which were generated by previous actions, clearing the list /// in the process. /// - /// This is called by ManyChannelMonitor::get_and_clear_pending_events() and is equivalent to + /// This is called by ChainMonitor::get_and_clear_pending_events() and is equivalent to /// EventsProvider::get_and_clear_pending_events() except that it requires &mut self as we do /// no internal locking in ChannelMonitors. pub fn get_and_clear_pending_events(&mut self) -> Vec { @@ -1880,17 +1883,23 @@ impl ChannelMonitor { Vec::new() } - /// Called by SimpleManyChannelMonitor::block_connected, which implements - /// ChainListener::block_connected. - /// Eventually this should be pub and, roughly, implement ChainListener, however this requires - /// &mut self, as well as returns new spendable outputs and outpoints to watch for spending of - /// on-chain. - fn block_connected(&mut self, txn_matched: &[&Transaction], height: u32, block_hash: &BlockHash, broadcaster: B, fee_estimator: F, logger: L)-> Vec<(Txid, Vec)> + /// Processes transactions in a newly connected block, which may result in any of the following: + /// - update the monitor's state against resolved HTLCs + /// - punish the counterparty in the case of seeing a revoked commitment transaction + /// - force close the channel and claim/timeout incoming/outgoing HTLCs if near expiration + /// - detect settled outputs for later spending + /// - schedule and bump any in-flight claims + /// + /// Returns any transaction outputs from `txn_matched` that spends of should be watched for. + /// After called these are also available via [`get_outputs_to_watch`]. + /// + /// [`get_outputs_to_watch`]: #method.get_outputs_to_watch + pub fn block_connected(&mut self, header: &BlockHeader, txn_matched: &[(usize, &Transaction)], height: u32, broadcaster: B, fee_estimator: F, logger: L)-> Vec<(Txid, Vec)> where B::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, { - for tx in txn_matched { + for &(_, tx) in txn_matched { let mut output_val = 0; for out in tx.output.iter() { if out.value > 21_000_000_0000_0000 { panic!("Value-overflowing transaction provided to block connected"); } @@ -1899,10 +1908,12 @@ impl ChannelMonitor { } } + let block_hash = header.block_hash(); log_trace!(logger, "Block {} at height {} connected with {} txn matched", block_hash, height, txn_matched.len()); + let mut watch_outputs = Vec::new(); let mut claimable_outpoints = Vec::new(); - for tx in txn_matched { + for &(_, tx) in txn_matched { if tx.input.len() == 1 { // Assuming our keys were not leaked (in which case we're screwed no matter what), // commitment transactions and HTLC transactions will all only ever have one input, @@ -1979,7 +1990,7 @@ impl ChannelMonitor { self.onchain_tx_handler.block_connected(txn_matched, claimable_outpoints, height, &*broadcaster, &*fee_estimator, &*logger); - self.last_block_hash = block_hash.clone(); + self.last_block_hash = block_hash; for &(ref txid, ref output_scripts) in watch_outputs.iter() { self.outputs_to_watch.insert(txid.clone(), output_scripts.iter().map(|o| o.script_pubkey.clone()).collect()); } @@ -1987,12 +1998,16 @@ impl ChannelMonitor { watch_outputs } - fn block_disconnected(&mut self, height: u32, block_hash: &BlockHash, broadcaster: B, fee_estimator: F, logger: L) + /// Determines if the disconnected block contained any transactions of interest and updates + /// appropriately. + pub fn block_disconnected(&mut self, header: &BlockHeader, height: u32, broadcaster: B, fee_estimator: F, logger: L) where B::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, { + let block_hash = header.block_hash(); log_trace!(logger, "Block {} at height {} disconnected", block_hash, height); + if let Some(_) = self.onchain_events_waiting_threshold_conf.remove(&(height + ANTI_REORG_DELAY - 1)) { //We may discard: //- htlc update there as failure-trigger tx (revoked commitment tx, non-revoked commitment tx, HTLC-timeout tx) has been disconnected @@ -2001,7 +2016,7 @@ impl ChannelMonitor { self.onchain_tx_handler.block_disconnected(height, broadcaster, fee_estimator, logger); - self.last_block_hash = block_hash.clone(); + self.last_block_hash = block_hash; } fn would_broadcast_at_height(&self, height: u32, logger: &L) -> bool where L::Target: Logger {