From: Jeffrey Czyz Date: Fri, 7 Aug 2020 17:58:15 +0000 (-0700) Subject: Move channelmonitor.rs from ln to chain module X-Git-Url: http://git.bitcoin.ninja/?a=commitdiff_plain;h=abe35338a699b0f498f24dd5b41b393a097a621d;p=rust-lightning Move channelmonitor.rs from ln to chain module Given the chain::Watch interface is defined in terms of ChannelMonitor and ChannelMonitorUpdateErr, move channelmonitor.rs from the ln module to the chain module. --- diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 7ef635923..a703abf6f 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -21,11 +21,11 @@ use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hash_types::{BlockHash, WPubkeyHash}; use lightning::chain; +use lightning::chain::channelmonitor; +use lightning::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, HTLCUpdate}; use lightning::chain::transaction::OutPoint; use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator}; use lightning::chain::keysinterface::{KeysInterface, InMemoryChannelKeys}; -use lightning::ln::channelmonitor; -use lightning::ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, HTLCUpdate}; use lightning::ln::channelmanager::{ChannelManager, PaymentHash, PaymentPreimage, PaymentSecret, ChannelManagerReadArgs}; use lightning::ln::features::{ChannelFeatures, InitFeatures, NodeFeatures}; use lightning::ln::msgs::{CommitmentUpdate, ChannelMessageHandler, ErrorAction, UpdateAddHTLC, Init}; diff --git a/fuzz/src/chanmon_deser.rs b/fuzz/src/chanmon_deser.rs index 3f4ff5ad0..5a76340ff 100644 --- a/fuzz/src/chanmon_deser.rs +++ b/fuzz/src/chanmon_deser.rs @@ -3,8 +3,8 @@ use bitcoin::hash_types::BlockHash; +use lightning::chain::channelmonitor; use lightning::util::enforcing_trait_impls::EnforcingChannelKeys; -use lightning::ln::channelmonitor; use lightning::util::ser::{Readable, Writer}; use utils::test_logger; diff --git a/fuzz/src/full_stack.rs b/fuzz/src/full_stack.rs index 908e14599..13d61f9b1 100644 --- a/fuzz/src/full_stack.rs +++ b/fuzz/src/full_stack.rs @@ -19,9 +19,9 @@ use bitcoin::hash_types::{Txid, BlockHash, WPubkeyHash}; use lightning::chain; use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator}; +use lightning::chain::channelmonitor; use lightning::chain::transaction::OutPoint; use lightning::chain::keysinterface::{InMemoryChannelKeys, KeysInterface}; -use lightning::ln::channelmonitor; use lightning::ln::channelmanager::{ChannelManager, PaymentHash, PaymentPreimage, PaymentSecret}; use lightning::ln::peer_handler::{MessageHandler,PeerManager,SocketDescriptor}; use lightning::routing::router::get_route; @@ -898,6 +898,6 @@ mod tests { assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling UpdateHTLCs event in peer_handler for node 030200000000000000000000000000000000000000000000000000000000000000 with 1 adds, 0 fulfills, 0 fails for channel 3900000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&3)); // 7 assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling UpdateHTLCs event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000000 with 0 adds, 1 fulfills, 0 fails for channel 3d00000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&1)); // 8 assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling UpdateHTLCs event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000000 with 0 adds, 0 fulfills, 1 fails for channel 3d00000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&2)); // 9 - assert_eq!(log_entries.get(&("lightning::ln::channelmonitor".to_string(), "Input spending remote commitment tx (00000000000000000000000000000000000000000000000000000000000000a1:0) in 0000000000000000000000000000000000000000000000000000000000000018 resolves outbound HTLC with payment hash ff00000000000000000000000000000000000000000000000000000000000000 with timeout".to_string())), Some(&1)); // 10 + assert_eq!(log_entries.get(&("lightning::chain::channelmonitor".to_string(), "Input spending remote commitment tx (00000000000000000000000000000000000000000000000000000000000000a1:0) in 0000000000000000000000000000000000000000000000000000000000000018 resolves outbound HTLC with payment hash ff00000000000000000000000000000000000000000000000000000000000000 with timeout".to_string())), Some(&1)); // 10 } } diff --git a/lightning-net-tokio/src/lib.rs b/lightning-net-tokio/src/lib.rs index b5282db88..86ed5c114 100644 --- a/lightning-net-tokio/src/lib.rs +++ b/lightning-net-tokio/src/lib.rs @@ -27,7 +27,7 @@ //! type Logger = dyn lightning::util::logger::Logger; //! type ChainAccess = dyn lightning::chain::Access; //! type ChainNotify = dyn lightning::chain::Notify; -//! type ChainMonitor = lightning::ln::channelmonitor::ChainMonitor, Arc, Arc, Arc>; +//! type ChainMonitor = lightning::chain::channelmonitor::ChainMonitor, Arc, Arc, Arc>; //! type ChannelManager = lightning::ln::channelmanager::SimpleArcChannelManager; //! type PeerManager = lightning::ln::peer_handler::SimpleArcPeerManager; //! diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs new file mode 100644 index 000000000..f4b8e1979 --- /dev/null +++ b/lightning/src/chain/channelmonitor.rs @@ -0,0 +1,2826 @@ +//! The logic to monitor for on-chain transactions and create the relevant claim responses lives +//! here. +//! +//! ChannelMonitor objects are generated by ChannelManager in response to relevant +//! messages/actions, and MUST be persisted to disk (and, preferably, remotely) before progress can +//! be made in responding to certain messages, see [`chain::Watch`] for more. +//! +//! Note that ChannelMonitors are an important part of the lightning trust model and a copy of the +//! latest ChannelMonitor must always be actively monitoring for chain updates (and no out-of-date +//! ChannelMonitors should do so). Thus, if you're building rust-lightning into an HSM or other +//! security-domain-separated system design, you should consider having multiple paths for +//! ChannelMonitors to get out of the HSM and onto monitoring devices. +//! +//! [`chain::Watch`]: ../../chain/trait.Watch.html + +use bitcoin::blockdata::block::BlockHeader; +use bitcoin::blockdata::transaction::{TxOut,Transaction}; +use bitcoin::blockdata::transaction::OutPoint as BitcoinOutPoint; +use bitcoin::blockdata::script::{Script, Builder}; +use bitcoin::blockdata::opcodes; +use bitcoin::consensus::encode; +use bitcoin::util::hash::BitcoinHash; + +use bitcoin::hashes::Hash; +use bitcoin::hashes::sha256::Hash as Sha256; +use bitcoin::hash_types::{Txid, BlockHash, WPubkeyHash}; + +use bitcoin::secp256k1::{Secp256k1,Signature}; +use bitcoin::secp256k1::key::{SecretKey,PublicKey}; +use bitcoin::secp256k1; + +use ln::msgs::DecodeError; +use ln::chan_utils; +use ln::chan_utils::{CounterpartyCommitmentSecrets, HTLCOutputInCommitment, LocalCommitmentTransaction, HTLCType}; +use ln::channelmanager::{HTLCSource, PaymentPreimage, PaymentHash}; +use ln::onchaintx::{OnchainTxHandler, InputDescriptors}; +use chain; +use chain::Notify; +use chain::chaininterface::{ChainWatchedUtil, BroadcasterInterface, FeeEstimator}; +use chain::transaction::OutPoint; +use chain::keysinterface::{SpendableOutputDescriptor, ChannelKeys}; +use util::logger::Logger; +use util::ser::{Readable, MaybeReadable, Writer, Writeable, U48}; +use util::{byte_utils, events}; + +use std::collections::{HashMap, hash_map}; +use std::sync::Mutex; +use std::{cmp, mem}; +use std::ops::Deref; + +/// An update generated by the underlying Channel itself which contains some new information the +/// ChannelMonitor should be made aware of. +#[cfg_attr(test, derive(PartialEq))] +#[derive(Clone)] +#[must_use] +pub struct ChannelMonitorUpdate { + pub(crate) updates: Vec, + /// The sequence number of this update. Updates *must* be replayed in-order according to this + /// sequence number (and updates may panic if they are not). The update_id values are strictly + /// increasing and increase by one for each new update. + /// + /// This sequence number is also used to track up to which points updates which returned + /// ChannelMonitorUpdateErr::TemporaryFailure have been applied to all copies of a given + /// ChannelMonitor when ChannelManager::channel_monitor_updated is called. + pub update_id: u64, +} + +impl Writeable for ChannelMonitorUpdate { + fn write(&self, w: &mut W) -> Result<(), ::std::io::Error> { + self.update_id.write(w)?; + (self.updates.len() as u64).write(w)?; + for update_step in self.updates.iter() { + update_step.write(w)?; + } + Ok(()) + } +} +impl Readable for ChannelMonitorUpdate { + fn read(r: &mut R) -> Result { + let update_id: u64 = Readable::read(r)?; + let len: u64 = Readable::read(r)?; + let mut updates = Vec::with_capacity(cmp::min(len as usize, MAX_ALLOC_SIZE / ::std::mem::size_of::())); + for _ in 0..len { + updates.push(Readable::read(r)?); + } + Ok(Self { update_id, updates }) + } +} + +/// An error enum representing a failure to persist a channel monitor update. +#[derive(Clone)] +pub enum ChannelMonitorUpdateErr { + /// Used to indicate a temporary failure (eg connection to a watchtower or remote backup of + /// our state failed, but is expected to succeed at some point in the future). + /// + /// Such a failure will "freeze" a channel, preventing us from revoking old states or + /// submitting new commitment transactions to the remote party. Once the update(s) which failed + /// have been successfully applied, ChannelManager::channel_monitor_updated can be used to + /// restore the channel to an operational state. + /// + /// Note that a given ChannelManager will *never* re-generate a given ChannelMonitorUpdate. If + /// you return a TemporaryFailure you must ensure that it is written to disk safely before + /// writing out the latest ChannelManager state. + /// + /// Even when a channel has been "frozen" updates to the ChannelMonitor can continue to occur + /// (eg if an inbound HTLC which we forwarded was claimed upstream resulting in us attempting + /// to claim it on this channel) and those updates must be applied wherever they can be. At + /// least one such updated ChannelMonitor must be persisted otherwise PermanentFailure should + /// be returned to get things on-chain ASAP using only the in-memory copy. Obviously updates to + /// the channel which would invalidate previous ChannelMonitors are not made when a channel has + /// been "frozen". + /// + /// Note that even if updates made after TemporaryFailure succeed you must still call + /// channel_monitor_updated to ensure you have the latest monitor and re-enable normal channel + /// operation. + /// + /// Note that the update being processed here will not be replayed for you when you call + /// ChannelManager::channel_monitor_updated, so you must store the update itself along + /// with the persisted ChannelMonitor on your own local disk prior to returning a + /// TemporaryFailure. You may, of course, employ a journaling approach, storing only the + /// ChannelMonitorUpdate on disk without updating the monitor itself, replaying the journal at + /// reload-time. + /// + /// For deployments where a copy of ChannelMonitors and other local state are backed up in a + /// remote location (with local copies persisted immediately), it is anticipated that all + /// updates will return TemporaryFailure until the remote copies could be updated. + TemporaryFailure, + /// Used to indicate no further channel monitor updates will be allowed (eg we've moved on to a + /// different watchtower and cannot update with all watchtowers that were previously informed + /// of this channel). This will force-close the channel in question (which will generate one + /// final ChannelMonitorUpdate which must be delivered to at least one ChannelMonitor copy). + /// + /// Should also be used to indicate a failure to update the local persisted copy of the channel + /// monitor. + PermanentFailure, +} + +/// General Err type for ChannelMonitor actions. Generally, this implies that the data provided is +/// inconsistent with the ChannelMonitor being called. eg for ChannelMonitor::update_monitor this +/// means you tried to update a monitor for a different channel or the ChannelMonitorUpdate was +/// corrupted. +/// Contains a human-readable error message. +#[derive(Debug)] +pub struct MonitorUpdateError(pub &'static str); + +/// Simple structure send back by `chain::Watch` in case of HTLC detected onchain from a +/// forward channel and from which info are needed to update HTLC in a backward channel. +/// +/// [`chain::Watch`]: ../../chain/trait.Watch.html +#[derive(Clone, PartialEq)] +pub struct HTLCUpdate { + pub(crate) payment_hash: PaymentHash, + pub(crate) payment_preimage: Option, + pub(crate) source: HTLCSource +} +impl_writeable!(HTLCUpdate, 0, { payment_hash, payment_preimage, source }); + +/// An implementation of [`chain::Watch`]. +/// +/// May be used in conjunction with [`ChannelManager`] to monitor channels locally or used +/// independently to monitor channels remotely. +/// +/// [`chain::Watch`]: ../../chain/trait.Watch.html +pub struct ChainMonitor + where C::Target: chain::Notify, + T::Target: BroadcasterInterface, + F::Target: FeeEstimator, + L::Target: Logger, +{ + #[cfg(test)] // Used in ChannelManager tests to manipulate channels directly + pub monitors: Mutex>>, + #[cfg(not(test))] + monitors: Mutex>>, + watch_events: Mutex, + chain_source: Option, + broadcaster: T, + logger: L, + fee_estimator: F +} + +struct WatchEventCache { + watched: ChainWatchedUtil, + events: Vec, +} + +/// An event indicating on-chain activity to watch for pertaining to a channel. +enum WatchEvent { + /// Watch for a transaction with `txid` and having an output with `script_pubkey` as a spending + /// condition. + WatchTransaction { + /// Identifier of the transaction. + txid: Txid, + + /// Spending condition for an output of the transaction. + script_pubkey: Script, + }, + /// Watch for spends of a transaction output identified by `outpoint` having `script_pubkey` as + /// the spending condition. + WatchOutput { + /// Identifier for the output. + outpoint: OutPoint, + + /// Spending condition for the output. + script_pubkey: Script, + } +} + +impl WatchEventCache { + fn new() -> Self { + Self { + watched: ChainWatchedUtil::new(), + events: Vec::new(), + } + } + + fn watch_tx(&mut self, txid: &Txid, script_pubkey: &Script) { + if self.watched.register_tx(txid, script_pubkey) { + self.events.push(WatchEvent::WatchTransaction { + txid: *txid, + script_pubkey: script_pubkey.clone() + }); + } + } + + fn watch_output(&mut self, outpoint: (&Txid, usize), script_pubkey: &Script) { + let (txid, index) = outpoint; + if self.watched.register_outpoint((*txid, index as u32), script_pubkey) { + self.events.push(WatchEvent::WatchOutput { + outpoint: OutPoint { + txid: *txid, + index: index as u16, + }, + script_pubkey: script_pubkey.clone(), + }); + } + } + + fn flush_events(&mut self, chain_source: &Option) -> bool where C::Target: chain::Notify { + let num_events = self.events.len(); + match chain_source { + &None => self.events.clear(), + &Some(ref chain_source) => { + for event in self.events.drain(..) { + match event { + WatchEvent::WatchTransaction { txid, script_pubkey } => { + chain_source.register_tx(txid, script_pubkey) + }, + WatchEvent::WatchOutput { outpoint, script_pubkey } => { + chain_source.register_output(outpoint, script_pubkey) + }, + } + } + } + } + num_events > 0 + } +} + +impl ChainMonitor + where C::Target: chain::Notify, + T::Target: BroadcasterInterface, + F::Target: FeeEstimator, + L::Target: Logger, +{ + /// Delegates to [`ChannelMonitor::block_connected`] for each watched channel. Any HTLCs that + /// were resolved on chain will be retuned by [`chain::Watch::release_pending_htlc_updates`]. + /// + /// Calls back to [`chain::Notify`] if any monitor indicated new outputs to watch, returning + /// `true` if so. + /// + /// [`ChannelMonitor::block_connected`]: struct.ChannelMonitor.html#method.block_connected + /// [`chain::Watch::release_pending_htlc_updates`]: ../../chain/trait.Watch.html#tymethod.release_pending_htlc_updates + /// [`chain::Notify`]: ../../chain/trait.Notify.html + pub fn block_connected(&self, header: &BlockHeader, txdata: &[(usize, &Transaction)], height: u32) -> bool { + let mut watch_events = self.watch_events.lock().unwrap(); + let matched_txn: Vec<_> = txdata.iter().filter(|&&(_, tx)| watch_events.watched.does_match_tx(tx)).map(|e| *e).collect(); + { + let mut monitors = self.monitors.lock().unwrap(); + for monitor in monitors.values_mut() { + let txn_outputs = monitor.block_connected(header, &matched_txn, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger); + + for (ref txid, ref outputs) in txn_outputs { + for (idx, output) in outputs.iter().enumerate() { + watch_events.watch_output((txid, idx), &output.script_pubkey); + } + } + } + } + watch_events.flush_events(&self.chain_source) + } + + /// Delegates to [`ChannelMonitor::block_disconnected`] for each watched channel. + /// + /// [`ChannelMonitor::block_disconnected`]: struct.ChannelMonitor.html#method.block_disconnected + pub fn block_disconnected(&self, header: &BlockHeader, disconnected_height: u32) { + let mut monitors = self.monitors.lock().unwrap(); + for monitor in monitors.values_mut() { + monitor.block_disconnected(header, disconnected_height, &*self.broadcaster, &*self.fee_estimator, &*self.logger); + } + } +} + +impl ChainMonitor + where C::Target: chain::Notify, + T::Target: BroadcasterInterface, + F::Target: FeeEstimator, + L::Target: Logger, +{ + /// Creates a new object which can be used to monitor several channels given the chain + /// interface with which to register to receive notifications. + pub fn new(chain_source: Option, broadcaster: T, logger: L, feeest: F) -> Self { + Self { + monitors: Mutex::new(HashMap::new()), + watch_events: Mutex::new(WatchEventCache::new()), + chain_source, + broadcaster, + logger, + fee_estimator: feeest, + } + } + + /// Adds or updates the monitor which monitors the channel referred to by the given outpoint. + /// + /// Calls back to [`chain::Notify`] with the funding transaction and outputs to watch. + /// + /// [`chain::Notify`]: ../../chain/trait.Notify.html + pub fn add_monitor(&self, outpoint: OutPoint, monitor: ChannelMonitor) -> Result<(), MonitorUpdateError> { + let mut watch_events = self.watch_events.lock().unwrap(); + let mut monitors = self.monitors.lock().unwrap(); + let entry = match monitors.entry(outpoint) { + hash_map::Entry::Occupied(_) => return Err(MonitorUpdateError("Channel monitor for given outpoint is already present")), + hash_map::Entry::Vacant(e) => e, + }; + { + let funding_txo = monitor.get_funding_txo(); + log_trace!(self.logger, "Got new Channel Monitor for channel {}", log_bytes!(funding_txo.0.to_channel_id()[..])); + watch_events.watch_tx(&funding_txo.0.txid, &funding_txo.1); + watch_events.watch_output((&funding_txo.0.txid, funding_txo.0.index as usize), &funding_txo.1); + for (txid, outputs) in monitor.get_outputs_to_watch().iter() { + for (idx, script) in outputs.iter().enumerate() { + watch_events.watch_output((txid, idx), script); + } + } + } + entry.insert(monitor); + watch_events.flush_events(&self.chain_source); + Ok(()) + } + + /// Updates the monitor which monitors the channel referred to by the given outpoint. + pub fn update_monitor(&self, outpoint: OutPoint, update: ChannelMonitorUpdate) -> Result<(), MonitorUpdateError> { + let mut monitors = self.monitors.lock().unwrap(); + match monitors.get_mut(&outpoint) { + Some(orig_monitor) => { + log_trace!(self.logger, "Updating Channel Monitor for channel {}", log_funding_info!(orig_monitor)); + orig_monitor.update_monitor(update, &self.broadcaster, &self.logger) + }, + None => Err(MonitorUpdateError("No such monitor registered")) + } + } +} + +impl chain::Watch for ChainMonitor + where C::Target: chain::Notify, + T::Target: BroadcasterInterface, + F::Target: FeeEstimator, + L::Target: Logger, +{ + type Keys = ChanSigner; + + fn watch_channel(&self, funding_txo: OutPoint, monitor: ChannelMonitor) -> Result<(), ChannelMonitorUpdateErr> { + match self.add_monitor(funding_txo, monitor) { + Ok(_) => Ok(()), + Err(_) => Err(ChannelMonitorUpdateErr::PermanentFailure), + } + } + + fn update_channel(&self, funding_txo: OutPoint, update: ChannelMonitorUpdate) -> Result<(), ChannelMonitorUpdateErr> { + match self.update_monitor(funding_txo, update) { + Ok(_) => Ok(()), + Err(_) => Err(ChannelMonitorUpdateErr::PermanentFailure), + } + } + + fn release_pending_htlc_updates(&self) -> Vec { + let mut pending_htlcs_updated = Vec::new(); + for chan in self.monitors.lock().unwrap().values_mut() { + pending_htlcs_updated.append(&mut chan.get_and_clear_pending_htlcs_updated()); + } + pending_htlcs_updated + } +} + +impl events::EventsProvider for ChainMonitor + where C::Target: chain::Notify, + T::Target: BroadcasterInterface, + F::Target: FeeEstimator, + L::Target: Logger, +{ + fn get_and_clear_pending_events(&self) -> Vec { + let mut pending_events = Vec::new(); + for chan in self.monitors.lock().unwrap().values_mut() { + pending_events.append(&mut chan.get_and_clear_pending_events()); + } + pending_events + } +} + +/// If an HTLC expires within this many blocks, don't try to claim it in a shared transaction, +/// instead claiming it in its own individual transaction. +pub(crate) const CLTV_SHARED_CLAIM_BUFFER: u32 = 12; +/// If an HTLC expires within this many blocks, force-close the channel to broadcast the +/// HTLC-Success transaction. +/// In other words, this is an upper bound on how many blocks we think it can take us to get a +/// transaction confirmed (and we use it in a few more, equivalent, places). +pub(crate) const CLTV_CLAIM_BUFFER: u32 = 6; +/// Number of blocks by which point we expect our counterparty to have seen new blocks on the +/// network and done a full update_fail_htlc/commitment_signed dance (+ we've updated all our +/// copies of ChannelMonitors, including watchtowers). We could enforce the contract by failing +/// at CLTV expiration height but giving a grace period to our peer may be profitable for us if he +/// can provide an over-late preimage. Nevertheless, grace period has to be accounted in our +/// CLTV_EXPIRY_DELTA to be secure. Following this policy we may decrease the rate of channel failures +/// due to expiration but increase the cost of funds being locked longuer in case of failure. +/// This delay also cover a low-power peer being slow to process blocks and so being behind us on +/// accurate block height. +/// In case of onchain failure to be pass backward we may see the last block of ANTI_REORG_DELAY +/// with at worst this delay, so we are not only using this value as a mercy for them but also +/// us as a safeguard to delay with enough time. +pub(crate) const LATENCY_GRACE_PERIOD_BLOCKS: u32 = 3; +/// Number of blocks we wait on seeing a HTLC output being solved before we fail corresponding inbound +/// HTLCs. This prevents us from failing backwards and then getting a reorg resulting in us losing money. +/// We use also this delay to be sure we can remove our in-flight claim txn from bump candidates buffer. +/// It may cause spurrious generation of bumped claim txn but that's allright given the outpoint is already +/// solved by a previous claim tx. What we want to avoid is reorg evicting our claim tx and us not +/// keeping bumping another claim tx to solve the outpoint. +pub(crate) const ANTI_REORG_DELAY: u32 = 6; +/// Number of blocks before confirmation at which we fail back an un-relayed HTLC or at which we +/// refuse to accept a new HTLC. +/// +/// This is used for a few separate purposes: +/// 1) if we've received an MPP HTLC to us and it expires within this many blocks and we are +/// waiting on additional parts (or waiting on the preimage for any HTLC from the user), we will +/// fail this HTLC, +/// 2) if we receive an HTLC within this many blocks of its expiry (plus one to avoid a race +/// condition with the above), we will fail this HTLC without telling the user we received it, +/// 3) if we are waiting on a connection or a channel state update to send an HTLC to a peer, and +/// that HTLC expires within this many blocks, we will simply fail the HTLC instead. +/// +/// (1) is all about protecting us - we need enough time to update the channel state before we hit +/// CLTV_CLAIM_BUFFER, at which point we'd go on chain to claim the HTLC with the preimage. +/// +/// (2) is the same, but with an additional buffer to avoid accepting an HTLC which is immediately +/// in a race condition between the user connecting a block (which would fail it) and the user +/// providing us the preimage (which would claim it). +/// +/// (3) is about our counterparty - we don't want to relay an HTLC to a counterparty when they may +/// end up force-closing the channel on us to claim it. +pub(crate) const HTLC_FAIL_BACK_BUFFER: u32 = CLTV_CLAIM_BUFFER + LATENCY_GRACE_PERIOD_BLOCKS; + +#[derive(Clone, PartialEq)] +struct LocalSignedTx { + /// txid of the transaction in tx, just used to make comparison faster + txid: Txid, + revocation_key: PublicKey, + a_htlc_key: PublicKey, + b_htlc_key: PublicKey, + delayed_payment_key: PublicKey, + per_commitment_point: PublicKey, + feerate_per_kw: u32, + htlc_outputs: Vec<(HTLCOutputInCommitment, Option, Option)>, +} + +/// We use this to track remote commitment transactions and htlcs outputs and +/// use it to generate any justice or 2nd-stage preimage/timeout transactions. +#[derive(PartialEq)] +struct RemoteCommitmentTransaction { + remote_delayed_payment_base_key: PublicKey, + remote_htlc_base_key: PublicKey, + on_remote_tx_csv: u16, + per_htlc: HashMap> +} + +impl Writeable for RemoteCommitmentTransaction { + fn write(&self, w: &mut W) -> Result<(), ::std::io::Error> { + self.remote_delayed_payment_base_key.write(w)?; + self.remote_htlc_base_key.write(w)?; + w.write_all(&byte_utils::be16_to_array(self.on_remote_tx_csv))?; + w.write_all(&byte_utils::be64_to_array(self.per_htlc.len() as u64))?; + for (ref txid, ref htlcs) in self.per_htlc.iter() { + w.write_all(&txid[..])?; + w.write_all(&byte_utils::be64_to_array(htlcs.len() as u64))?; + for &ref htlc in htlcs.iter() { + htlc.write(w)?; + } + } + Ok(()) + } +} +impl Readable for RemoteCommitmentTransaction { + fn read(r: &mut R) -> Result { + let remote_commitment_transaction = { + let remote_delayed_payment_base_key = Readable::read(r)?; + let remote_htlc_base_key = Readable::read(r)?; + let on_remote_tx_csv: u16 = Readable::read(r)?; + let per_htlc_len: u64 = Readable::read(r)?; + let mut per_htlc = HashMap::with_capacity(cmp::min(per_htlc_len as usize, MAX_ALLOC_SIZE / 64)); + for _ in 0..per_htlc_len { + let txid: Txid = Readable::read(r)?; + let htlcs_count: u64 = Readable::read(r)?; + let mut htlcs = Vec::with_capacity(cmp::min(htlcs_count as usize, MAX_ALLOC_SIZE / 32)); + for _ in 0..htlcs_count { + let htlc = Readable::read(r)?; + htlcs.push(htlc); + } + if let Some(_) = per_htlc.insert(txid, htlcs) { + return Err(DecodeError::InvalidValue); + } + } + RemoteCommitmentTransaction { + remote_delayed_payment_base_key, + remote_htlc_base_key, + on_remote_tx_csv, + per_htlc, + } + }; + Ok(remote_commitment_transaction) + } +} + +/// When ChannelMonitor discovers an onchain outpoint being a step of a channel and that it needs +/// to generate a tx to push channel state forward, we cache outpoint-solving tx material to build +/// a new bumped one in case of lenghty confirmation delay +#[derive(Clone, PartialEq)] +pub(crate) enum InputMaterial { + Revoked { + per_commitment_point: PublicKey, + remote_delayed_payment_base_key: PublicKey, + remote_htlc_base_key: PublicKey, + per_commitment_key: SecretKey, + input_descriptor: InputDescriptors, + amount: u64, + htlc: Option, + on_remote_tx_csv: u16, + }, + RemoteHTLC { + per_commitment_point: PublicKey, + remote_delayed_payment_base_key: PublicKey, + remote_htlc_base_key: PublicKey, + preimage: Option, + htlc: HTLCOutputInCommitment + }, + LocalHTLC { + preimage: Option, + amount: u64, + }, + Funding { + funding_redeemscript: Script, + } +} + +impl Writeable for InputMaterial { + fn write(&self, writer: &mut W) -> Result<(), ::std::io::Error> { + match self { + &InputMaterial::Revoked { ref per_commitment_point, ref remote_delayed_payment_base_key, ref remote_htlc_base_key, ref per_commitment_key, ref input_descriptor, ref amount, ref htlc, ref on_remote_tx_csv} => { + writer.write_all(&[0; 1])?; + per_commitment_point.write(writer)?; + remote_delayed_payment_base_key.write(writer)?; + remote_htlc_base_key.write(writer)?; + writer.write_all(&per_commitment_key[..])?; + input_descriptor.write(writer)?; + writer.write_all(&byte_utils::be64_to_array(*amount))?; + htlc.write(writer)?; + on_remote_tx_csv.write(writer)?; + }, + &InputMaterial::RemoteHTLC { ref per_commitment_point, ref remote_delayed_payment_base_key, ref remote_htlc_base_key, ref preimage, ref htlc} => { + writer.write_all(&[1; 1])?; + per_commitment_point.write(writer)?; + remote_delayed_payment_base_key.write(writer)?; + remote_htlc_base_key.write(writer)?; + preimage.write(writer)?; + htlc.write(writer)?; + }, + &InputMaterial::LocalHTLC { ref preimage, ref amount } => { + writer.write_all(&[2; 1])?; + preimage.write(writer)?; + writer.write_all(&byte_utils::be64_to_array(*amount))?; + }, + &InputMaterial::Funding { ref funding_redeemscript } => { + writer.write_all(&[3; 1])?; + funding_redeemscript.write(writer)?; + } + } + Ok(()) + } +} + +impl Readable for InputMaterial { + fn read(reader: &mut R) -> Result { + let input_material = match ::read(reader)? { + 0 => { + let per_commitment_point = Readable::read(reader)?; + let remote_delayed_payment_base_key = Readable::read(reader)?; + let remote_htlc_base_key = Readable::read(reader)?; + let per_commitment_key = Readable::read(reader)?; + let input_descriptor = Readable::read(reader)?; + let amount = Readable::read(reader)?; + let htlc = Readable::read(reader)?; + let on_remote_tx_csv = Readable::read(reader)?; + InputMaterial::Revoked { + per_commitment_point, + remote_delayed_payment_base_key, + remote_htlc_base_key, + per_commitment_key, + input_descriptor, + amount, + htlc, + on_remote_tx_csv + } + }, + 1 => { + let per_commitment_point = Readable::read(reader)?; + let remote_delayed_payment_base_key = Readable::read(reader)?; + let remote_htlc_base_key = Readable::read(reader)?; + let preimage = Readable::read(reader)?; + let htlc = Readable::read(reader)?; + InputMaterial::RemoteHTLC { + per_commitment_point, + remote_delayed_payment_base_key, + remote_htlc_base_key, + preimage, + htlc + } + }, + 2 => { + let preimage = Readable::read(reader)?; + let amount = Readable::read(reader)?; + InputMaterial::LocalHTLC { + preimage, + amount, + } + }, + 3 => { + InputMaterial::Funding { + funding_redeemscript: Readable::read(reader)?, + } + } + _ => return Err(DecodeError::InvalidValue), + }; + Ok(input_material) + } +} + +/// ClaimRequest is a descriptor structure to communicate between detection +/// and reaction module. They are generated by ChannelMonitor while parsing +/// onchain txn leaked from a channel and handed over to OnchainTxHandler which +/// is responsible for opportunistic aggregation, selecting and enforcing +/// bumping logic, building and signing transactions. +pub(crate) struct ClaimRequest { + // Block height before which claiming is exclusive to one party, + // after reaching it, claiming may be contentious. + pub(crate) absolute_timelock: u32, + // Timeout tx must have nLocktime set which means aggregating multiple + // ones must take the higher nLocktime among them to satisfy all of them. + // Sadly it has few pitfalls, a) it takes longuer to get fund back b) CLTV_DELTA + // of a sooner-HTLC could be swallowed by the highest nLocktime of the HTLC set. + // Do simplify we mark them as non-aggregable. + pub(crate) aggregable: bool, + // Basic bitcoin outpoint (txid, vout) + pub(crate) outpoint: BitcoinOutPoint, + // Following outpoint type, set of data needed to generate transaction digest + // and satisfy witness program. + pub(crate) witness_data: InputMaterial +} + +/// Upon discovering of some classes of onchain tx by ChannelMonitor, we may have to take actions on it +/// once they mature to enough confirmations (ANTI_REORG_DELAY) +#[derive(Clone, PartialEq)] +enum OnchainEvent { + /// HTLC output getting solved by a timeout, at maturation we pass upstream payment source information to solve + /// inbound HTLC in backward channel. Note, in case of preimage, we pass info to upstream without delay as we can + /// only win from it, so it's never an OnchainEvent + HTLCUpdate { + htlc_update: (HTLCSource, PaymentHash), + }, + MaturingOutput { + descriptor: SpendableOutputDescriptor, + }, +} + +const SERIALIZATION_VERSION: u8 = 1; +const MIN_SERIALIZATION_VERSION: u8 = 1; + +#[cfg_attr(test, derive(PartialEq))] +#[derive(Clone)] +pub(crate) enum ChannelMonitorUpdateStep { + LatestLocalCommitmentTXInfo { + commitment_tx: LocalCommitmentTransaction, + htlc_outputs: Vec<(HTLCOutputInCommitment, Option, Option)>, + }, + LatestRemoteCommitmentTXInfo { + unsigned_commitment_tx: Transaction, // TODO: We should actually only need the txid here + htlc_outputs: Vec<(HTLCOutputInCommitment, Option>)>, + commitment_number: u64, + their_revocation_point: PublicKey, + }, + PaymentPreimage { + payment_preimage: PaymentPreimage, + }, + CommitmentSecret { + idx: u64, + secret: [u8; 32], + }, + /// Used to indicate that the no future updates will occur, and likely that the latest local + /// commitment transaction(s) should be broadcast, as the channel has been force-closed. + ChannelForceClosed { + /// If set to false, we shouldn't broadcast the latest local commitment transaction as we + /// think we've fallen behind! + should_broadcast: bool, + }, +} + +impl Writeable for ChannelMonitorUpdateStep { + fn write(&self, w: &mut W) -> Result<(), ::std::io::Error> { + match self { + &ChannelMonitorUpdateStep::LatestLocalCommitmentTXInfo { ref commitment_tx, ref htlc_outputs } => { + 0u8.write(w)?; + commitment_tx.write(w)?; + (htlc_outputs.len() as u64).write(w)?; + for &(ref output, ref signature, ref source) in htlc_outputs.iter() { + output.write(w)?; + signature.write(w)?; + source.write(w)?; + } + } + &ChannelMonitorUpdateStep::LatestRemoteCommitmentTXInfo { ref unsigned_commitment_tx, ref htlc_outputs, ref commitment_number, ref their_revocation_point } => { + 1u8.write(w)?; + unsigned_commitment_tx.write(w)?; + commitment_number.write(w)?; + their_revocation_point.write(w)?; + (htlc_outputs.len() as u64).write(w)?; + for &(ref output, ref source) in htlc_outputs.iter() { + output.write(w)?; + source.as_ref().map(|b| b.as_ref()).write(w)?; + } + }, + &ChannelMonitorUpdateStep::PaymentPreimage { ref payment_preimage } => { + 2u8.write(w)?; + payment_preimage.write(w)?; + }, + &ChannelMonitorUpdateStep::CommitmentSecret { ref idx, ref secret } => { + 3u8.write(w)?; + idx.write(w)?; + secret.write(w)?; + }, + &ChannelMonitorUpdateStep::ChannelForceClosed { ref should_broadcast } => { + 4u8.write(w)?; + should_broadcast.write(w)?; + }, + } + Ok(()) + } +} +impl Readable for ChannelMonitorUpdateStep { + fn read(r: &mut R) -> Result { + match Readable::read(r)? { + 0u8 => { + Ok(ChannelMonitorUpdateStep::LatestLocalCommitmentTXInfo { + commitment_tx: Readable::read(r)?, + htlc_outputs: { + let len: u64 = Readable::read(r)?; + let mut res = Vec::new(); + for _ in 0..len { + res.push((Readable::read(r)?, Readable::read(r)?, Readable::read(r)?)); + } + res + }, + }) + }, + 1u8 => { + Ok(ChannelMonitorUpdateStep::LatestRemoteCommitmentTXInfo { + unsigned_commitment_tx: Readable::read(r)?, + commitment_number: Readable::read(r)?, + their_revocation_point: Readable::read(r)?, + htlc_outputs: { + let len: u64 = Readable::read(r)?; + let mut res = Vec::new(); + for _ in 0..len { + res.push((Readable::read(r)?, as Readable>::read(r)?.map(|o| Box::new(o)))); + } + res + }, + }) + }, + 2u8 => { + Ok(ChannelMonitorUpdateStep::PaymentPreimage { + payment_preimage: Readable::read(r)?, + }) + }, + 3u8 => { + Ok(ChannelMonitorUpdateStep::CommitmentSecret { + idx: Readable::read(r)?, + secret: Readable::read(r)?, + }) + }, + 4u8 => { + Ok(ChannelMonitorUpdateStep::ChannelForceClosed { + should_broadcast: Readable::read(r)? + }) + }, + _ => Err(DecodeError::InvalidValue), + } + } +} + +/// A ChannelMonitor handles chain events (blocks connected and disconnected) and generates +/// on-chain transactions to ensure no loss of funds occurs. +/// +/// You MUST ensure that no ChannelMonitors for a given channel anywhere contain out-of-date +/// information and are actively monitoring the chain. +/// +/// Pending Events or updated HTLCs which have not yet been read out by +/// get_and_clear_pending_htlcs_updated or get_and_clear_pending_events are serialized to disk and +/// reloaded at deserialize-time. Thus, you must ensure that, when handling events, all events +/// gotten are fully handled before re-serializing the new state. +pub struct ChannelMonitor { + latest_update_id: u64, + commitment_transaction_number_obscure_factor: u64, + + destination_script: Script, + broadcasted_local_revokable_script: Option<(Script, PublicKey, PublicKey)>, + remote_payment_script: Script, + shutdown_script: Script, + + keys: ChanSigner, + funding_info: (OutPoint, Script), + current_remote_commitment_txid: Option, + prev_remote_commitment_txid: Option, + + remote_tx_cache: RemoteCommitmentTransaction, + funding_redeemscript: Script, + channel_value_satoshis: u64, + // first is the idx of the first of the two revocation points + their_cur_revocation_points: Option<(u64, PublicKey, Option)>, + + on_local_tx_csv: u16, + + commitment_secrets: CounterpartyCommitmentSecrets, + remote_claimable_outpoints: HashMap>)>>, + /// We cannot identify HTLC-Success or HTLC-Timeout transactions by themselves on the chain. + /// Nor can we figure out their commitment numbers without the commitment transaction they are + /// spending. Thus, in order to claim them via revocation key, we track all the remote + /// commitment transactions which we find on-chain, mapping them to the commitment number which + /// can be used to derive the revocation key and claim the transactions. + remote_commitment_txn_on_chain: HashMap)>, + /// Cache used to make pruning of payment_preimages faster. + /// Maps payment_hash values to commitment numbers for remote transactions for non-revoked + /// remote transactions (ie should remain pretty small). + /// Serialized to disk but should generally not be sent to Watchtowers. + remote_hash_commitment_number: HashMap, + + // We store two local commitment transactions to avoid any race conditions where we may update + // some monitors (potentially on watchtowers) but then fail to update others, resulting in the + // various monitors for one channel being out of sync, and us broadcasting a local + // transaction for which we have deleted claim information on some watchtowers. + prev_local_signed_commitment_tx: Option, + current_local_commitment_tx: LocalSignedTx, + + // Used just for ChannelManager to make sure it has the latest channel data during + // deserialization + current_remote_commitment_number: u64, + // Used just for ChannelManager to make sure it has the latest channel data during + // deserialization + current_local_commitment_number: u64, + + payment_preimages: HashMap, + + pending_htlcs_updated: Vec, + pending_events: Vec, + + // Used to track onchain events, i.e transactions parts of channels confirmed on chain, on which + // we have to take actions once they reach enough confs. Key is a block height timer, i.e we enforce + // actions when we receive a block with given height. Actions depend on OnchainEvent type. + onchain_events_waiting_threshold_conf: HashMap>, + + // If we get serialized out and re-read, we need to make sure that the chain monitoring + // interface knows about the TXOs that we want to be notified of spends of. We could probably + // be smart and derive them from the above storage fields, but its much simpler and more + // Obviously Correct (tm) if we just keep track of them explicitly. + outputs_to_watch: HashMap>, + + #[cfg(test)] + pub onchain_tx_handler: OnchainTxHandler, + #[cfg(not(test))] + onchain_tx_handler: OnchainTxHandler, + + // This is set when the Channel[Manager] generated a ChannelMonitorUpdate which indicated the + // channel has been force-closed. After this is set, no further local commitment transaction + // updates may occur, and we panic!() if one is provided. + lockdown_from_offchain: bool, + + // Set once we've signed a local commitment transaction and handed it over to our + // OnchainTxHandler. After this is set, no future updates to our local commitment transactions + // may occur, and we fail any such monitor updates. + local_tx_signed: bool, + + // We simply modify last_block_hash in Channel's block_connected so that serialization is + // consistent but hopefully the users' copy handles block_connected in a consistent way. + // (we do *not*, however, update them in update_monitor to ensure any local user copies keep + // their last_block_hash from its state and not based on updated copies that didn't run through + // the full block_connected). + pub(crate) last_block_hash: BlockHash, + secp_ctx: Secp256k1, //TODO: dedup this a bit... +} + +#[cfg(any(test, feature = "fuzztarget"))] +/// Used only in testing and fuzztarget to check serialization roundtrips don't change the +/// underlying object +impl PartialEq for ChannelMonitor { + fn eq(&self, other: &Self) -> bool { + if self.latest_update_id != other.latest_update_id || + self.commitment_transaction_number_obscure_factor != other.commitment_transaction_number_obscure_factor || + self.destination_script != other.destination_script || + self.broadcasted_local_revokable_script != other.broadcasted_local_revokable_script || + self.remote_payment_script != other.remote_payment_script || + self.keys.pubkeys() != other.keys.pubkeys() || + self.funding_info != other.funding_info || + self.current_remote_commitment_txid != other.current_remote_commitment_txid || + self.prev_remote_commitment_txid != other.prev_remote_commitment_txid || + self.remote_tx_cache != other.remote_tx_cache || + self.funding_redeemscript != other.funding_redeemscript || + self.channel_value_satoshis != other.channel_value_satoshis || + self.their_cur_revocation_points != other.their_cur_revocation_points || + self.on_local_tx_csv != other.on_local_tx_csv || + self.commitment_secrets != other.commitment_secrets || + self.remote_claimable_outpoints != other.remote_claimable_outpoints || + self.remote_commitment_txn_on_chain != other.remote_commitment_txn_on_chain || + self.remote_hash_commitment_number != other.remote_hash_commitment_number || + self.prev_local_signed_commitment_tx != other.prev_local_signed_commitment_tx || + self.current_remote_commitment_number != other.current_remote_commitment_number || + self.current_local_commitment_number != other.current_local_commitment_number || + self.current_local_commitment_tx != other.current_local_commitment_tx || + self.payment_preimages != other.payment_preimages || + self.pending_htlcs_updated != other.pending_htlcs_updated || + self.pending_events.len() != other.pending_events.len() || // We trust events to round-trip properly + self.onchain_events_waiting_threshold_conf != other.onchain_events_waiting_threshold_conf || + self.outputs_to_watch != other.outputs_to_watch || + self.lockdown_from_offchain != other.lockdown_from_offchain || + self.local_tx_signed != other.local_tx_signed + { + false + } else { + true + } + } +} + +impl ChannelMonitor { + /// Writes this monitor into the given writer, suitable for writing to disk. + /// + /// Note that the deserializer is only implemented for (Sha256dHash, ChannelMonitor), which + /// tells you the last block hash which was block_connect()ed. You MUST rescan any blocks along + /// the "reorg path" (ie disconnecting blocks until you find a common ancestor from both the + /// returned block hash and the the current chain and then reconnecting blocks to get to the + /// best chain) upon deserializing the object! + pub fn write_for_disk(&self, writer: &mut W) -> Result<(), ::std::io::Error> { + //TODO: We still write out all the serialization here manually instead of using the fancy + //serialization framework we have, we should migrate things over to it. + writer.write_all(&[SERIALIZATION_VERSION; 1])?; + writer.write_all(&[MIN_SERIALIZATION_VERSION; 1])?; + + self.latest_update_id.write(writer)?; + + // Set in initial Channel-object creation, so should always be set by now: + U48(self.commitment_transaction_number_obscure_factor).write(writer)?; + + self.destination_script.write(writer)?; + if let Some(ref broadcasted_local_revokable_script) = self.broadcasted_local_revokable_script { + writer.write_all(&[0; 1])?; + broadcasted_local_revokable_script.0.write(writer)?; + broadcasted_local_revokable_script.1.write(writer)?; + broadcasted_local_revokable_script.2.write(writer)?; + } else { + writer.write_all(&[1; 1])?; + } + + self.remote_payment_script.write(writer)?; + self.shutdown_script.write(writer)?; + + self.keys.write(writer)?; + writer.write_all(&self.funding_info.0.txid[..])?; + writer.write_all(&byte_utils::be16_to_array(self.funding_info.0.index))?; + self.funding_info.1.write(writer)?; + self.current_remote_commitment_txid.write(writer)?; + self.prev_remote_commitment_txid.write(writer)?; + + self.remote_tx_cache.write(writer)?; + self.funding_redeemscript.write(writer)?; + self.channel_value_satoshis.write(writer)?; + + match self.their_cur_revocation_points { + Some((idx, pubkey, second_option)) => { + writer.write_all(&byte_utils::be48_to_array(idx))?; + writer.write_all(&pubkey.serialize())?; + match second_option { + Some(second_pubkey) => { + writer.write_all(&second_pubkey.serialize())?; + }, + None => { + writer.write_all(&[0; 33])?; + }, + } + }, + None => { + writer.write_all(&byte_utils::be48_to_array(0))?; + }, + } + + writer.write_all(&byte_utils::be16_to_array(self.on_local_tx_csv))?; + + self.commitment_secrets.write(writer)?; + + macro_rules! serialize_htlc_in_commitment { + ($htlc_output: expr) => { + writer.write_all(&[$htlc_output.offered as u8; 1])?; + writer.write_all(&byte_utils::be64_to_array($htlc_output.amount_msat))?; + writer.write_all(&byte_utils::be32_to_array($htlc_output.cltv_expiry))?; + writer.write_all(&$htlc_output.payment_hash.0[..])?; + $htlc_output.transaction_output_index.write(writer)?; + } + } + + writer.write_all(&byte_utils::be64_to_array(self.remote_claimable_outpoints.len() as u64))?; + for (ref txid, ref htlc_infos) in self.remote_claimable_outpoints.iter() { + writer.write_all(&txid[..])?; + writer.write_all(&byte_utils::be64_to_array(htlc_infos.len() as u64))?; + for &(ref htlc_output, ref htlc_source) in htlc_infos.iter() { + serialize_htlc_in_commitment!(htlc_output); + htlc_source.as_ref().map(|b| b.as_ref()).write(writer)?; + } + } + + writer.write_all(&byte_utils::be64_to_array(self.remote_commitment_txn_on_chain.len() as u64))?; + for (ref txid, &(commitment_number, ref txouts)) in self.remote_commitment_txn_on_chain.iter() { + writer.write_all(&txid[..])?; + writer.write_all(&byte_utils::be48_to_array(commitment_number))?; + (txouts.len() as u64).write(writer)?; + for script in txouts.iter() { + script.write(writer)?; + } + } + + writer.write_all(&byte_utils::be64_to_array(self.remote_hash_commitment_number.len() as u64))?; + for (ref payment_hash, commitment_number) in self.remote_hash_commitment_number.iter() { + writer.write_all(&payment_hash.0[..])?; + writer.write_all(&byte_utils::be48_to_array(*commitment_number))?; + } + + macro_rules! serialize_local_tx { + ($local_tx: expr) => { + $local_tx.txid.write(writer)?; + writer.write_all(&$local_tx.revocation_key.serialize())?; + writer.write_all(&$local_tx.a_htlc_key.serialize())?; + writer.write_all(&$local_tx.b_htlc_key.serialize())?; + writer.write_all(&$local_tx.delayed_payment_key.serialize())?; + writer.write_all(&$local_tx.per_commitment_point.serialize())?; + + writer.write_all(&byte_utils::be32_to_array($local_tx.feerate_per_kw))?; + writer.write_all(&byte_utils::be64_to_array($local_tx.htlc_outputs.len() as u64))?; + for &(ref htlc_output, ref sig, ref htlc_source) in $local_tx.htlc_outputs.iter() { + serialize_htlc_in_commitment!(htlc_output); + if let &Some(ref their_sig) = sig { + 1u8.write(writer)?; + writer.write_all(&their_sig.serialize_compact())?; + } else { + 0u8.write(writer)?; + } + htlc_source.write(writer)?; + } + } + } + + if let Some(ref prev_local_tx) = self.prev_local_signed_commitment_tx { + writer.write_all(&[1; 1])?; + serialize_local_tx!(prev_local_tx); + } else { + writer.write_all(&[0; 1])?; + } + + serialize_local_tx!(self.current_local_commitment_tx); + + writer.write_all(&byte_utils::be48_to_array(self.current_remote_commitment_number))?; + writer.write_all(&byte_utils::be48_to_array(self.current_local_commitment_number))?; + + writer.write_all(&byte_utils::be64_to_array(self.payment_preimages.len() as u64))?; + for payment_preimage in self.payment_preimages.values() { + writer.write_all(&payment_preimage.0[..])?; + } + + writer.write_all(&byte_utils::be64_to_array(self.pending_htlcs_updated.len() as u64))?; + for data in self.pending_htlcs_updated.iter() { + data.write(writer)?; + } + + writer.write_all(&byte_utils::be64_to_array(self.pending_events.len() as u64))?; + for event in self.pending_events.iter() { + event.write(writer)?; + } + + self.last_block_hash.write(writer)?; + + writer.write_all(&byte_utils::be64_to_array(self.onchain_events_waiting_threshold_conf.len() as u64))?; + for (ref target, ref events) in self.onchain_events_waiting_threshold_conf.iter() { + writer.write_all(&byte_utils::be32_to_array(**target))?; + writer.write_all(&byte_utils::be64_to_array(events.len() as u64))?; + for ev in events.iter() { + match *ev { + OnchainEvent::HTLCUpdate { ref htlc_update } => { + 0u8.write(writer)?; + htlc_update.0.write(writer)?; + htlc_update.1.write(writer)?; + }, + OnchainEvent::MaturingOutput { ref descriptor } => { + 1u8.write(writer)?; + descriptor.write(writer)?; + }, + } + } + } + + (self.outputs_to_watch.len() as u64).write(writer)?; + for (txid, output_scripts) in self.outputs_to_watch.iter() { + txid.write(writer)?; + (output_scripts.len() as u64).write(writer)?; + for script in output_scripts.iter() { + script.write(writer)?; + } + } + self.onchain_tx_handler.write(writer)?; + + self.lockdown_from_offchain.write(writer)?; + self.local_tx_signed.write(writer)?; + + Ok(()) + } +} + +impl ChannelMonitor { + pub(crate) fn new(keys: ChanSigner, shutdown_pubkey: &PublicKey, + on_remote_tx_csv: u16, destination_script: &Script, funding_info: (OutPoint, Script), + remote_htlc_base_key: &PublicKey, remote_delayed_payment_base_key: &PublicKey, + on_local_tx_csv: u16, funding_redeemscript: Script, channel_value_satoshis: u64, + commitment_transaction_number_obscure_factor: u64, + initial_local_commitment_tx: LocalCommitmentTransaction) -> ChannelMonitor { + + assert!(commitment_transaction_number_obscure_factor <= (1 << 48)); + let our_channel_close_key_hash = WPubkeyHash::hash(&shutdown_pubkey.serialize()); + let shutdown_script = Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&our_channel_close_key_hash[..]).into_script(); + let payment_key_hash = WPubkeyHash::hash(&keys.pubkeys().payment_point.serialize()); + let remote_payment_script = Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&payment_key_hash[..]).into_script(); + + let remote_tx_cache = RemoteCommitmentTransaction { remote_delayed_payment_base_key: *remote_delayed_payment_base_key, remote_htlc_base_key: *remote_htlc_base_key, on_remote_tx_csv, per_htlc: HashMap::new() }; + + let mut onchain_tx_handler = OnchainTxHandler::new(destination_script.clone(), keys.clone(), on_local_tx_csv); + + let local_tx_sequence = initial_local_commitment_tx.unsigned_tx.input[0].sequence as u64; + let local_tx_locktime = initial_local_commitment_tx.unsigned_tx.lock_time as u64; + let local_commitment_tx = LocalSignedTx { + txid: initial_local_commitment_tx.txid(), + revocation_key: initial_local_commitment_tx.local_keys.revocation_key, + a_htlc_key: initial_local_commitment_tx.local_keys.a_htlc_key, + b_htlc_key: initial_local_commitment_tx.local_keys.b_htlc_key, + delayed_payment_key: initial_local_commitment_tx.local_keys.a_delayed_payment_key, + per_commitment_point: initial_local_commitment_tx.local_keys.per_commitment_point, + feerate_per_kw: initial_local_commitment_tx.feerate_per_kw, + htlc_outputs: Vec::new(), // There are never any HTLCs in the initial commitment transactions + }; + // Returning a monitor error before updating tracking points means in case of using + // a concurrent watchtower implementation for same channel, if this one doesn't + // reject update as we do, you MAY have the latest local valid commitment tx onchain + // for which you want to spend outputs. We're NOT robust again this scenario right + // now but we should consider it later. + onchain_tx_handler.provide_latest_local_tx(initial_local_commitment_tx).unwrap(); + + ChannelMonitor { + latest_update_id: 0, + commitment_transaction_number_obscure_factor, + + destination_script: destination_script.clone(), + broadcasted_local_revokable_script: None, + remote_payment_script, + shutdown_script, + + keys, + funding_info, + current_remote_commitment_txid: None, + prev_remote_commitment_txid: None, + + remote_tx_cache, + funding_redeemscript, + channel_value_satoshis: channel_value_satoshis, + their_cur_revocation_points: None, + + on_local_tx_csv, + + commitment_secrets: CounterpartyCommitmentSecrets::new(), + remote_claimable_outpoints: HashMap::new(), + remote_commitment_txn_on_chain: HashMap::new(), + remote_hash_commitment_number: HashMap::new(), + + prev_local_signed_commitment_tx: None, + current_local_commitment_tx: local_commitment_tx, + current_remote_commitment_number: 1 << 48, + current_local_commitment_number: 0xffff_ffff_ffff - ((((local_tx_sequence & 0xffffff) << 3*8) | (local_tx_locktime as u64 & 0xffffff)) ^ commitment_transaction_number_obscure_factor), + + payment_preimages: HashMap::new(), + pending_htlcs_updated: Vec::new(), + pending_events: Vec::new(), + + onchain_events_waiting_threshold_conf: HashMap::new(), + outputs_to_watch: HashMap::new(), + + onchain_tx_handler, + + lockdown_from_offchain: false, + local_tx_signed: false, + + last_block_hash: Default::default(), + secp_ctx: Secp256k1::new(), + } + } + + /// Inserts a revocation secret into this channel monitor. Prunes old preimages if neither + /// needed by local commitment transactions HTCLs nor by remote ones. Unless we haven't already seen remote + /// commitment transaction's secret, they are de facto pruned (we can use revocation key). + fn provide_secret(&mut self, idx: u64, secret: [u8; 32]) -> Result<(), MonitorUpdateError> { + if let Err(()) = self.commitment_secrets.provide_secret(idx, secret) { + return Err(MonitorUpdateError("Previous secret did not match new one")); + } + + // Prune HTLCs from the previous remote commitment tx so we don't generate failure/fulfill + // events for now-revoked/fulfilled HTLCs. + if let Some(txid) = self.prev_remote_commitment_txid.take() { + for &mut (_, ref mut source) in self.remote_claimable_outpoints.get_mut(&txid).unwrap() { + *source = None; + } + } + + if !self.payment_preimages.is_empty() { + let cur_local_signed_commitment_tx = &self.current_local_commitment_tx; + let prev_local_signed_commitment_tx = self.prev_local_signed_commitment_tx.as_ref(); + let min_idx = self.get_min_seen_secret(); + let remote_hash_commitment_number = &mut self.remote_hash_commitment_number; + + self.payment_preimages.retain(|&k, _| { + for &(ref htlc, _, _) in cur_local_signed_commitment_tx.htlc_outputs.iter() { + if k == htlc.payment_hash { + return true + } + } + if let Some(prev_local_commitment_tx) = prev_local_signed_commitment_tx { + for &(ref htlc, _, _) in prev_local_commitment_tx.htlc_outputs.iter() { + if k == htlc.payment_hash { + return true + } + } + } + let contains = if let Some(cn) = remote_hash_commitment_number.get(&k) { + if *cn < min_idx { + return true + } + true + } else { false }; + if contains { + remote_hash_commitment_number.remove(&k); + } + false + }); + } + + Ok(()) + } + + /// Informs this monitor of the latest remote (ie non-broadcastable) commitment transaction. + /// The monitor watches for it to be broadcasted and then uses the HTLC information (and + /// possibly future revocation/preimage information) to claim outputs where possible. + /// We cache also the mapping hash:commitment number to lighten pruning of old preimages by watchtowers. + pub(crate) fn provide_latest_remote_commitment_tx_info(&mut self, unsigned_commitment_tx: &Transaction, htlc_outputs: Vec<(HTLCOutputInCommitment, Option>)>, commitment_number: u64, their_revocation_point: PublicKey, logger: &L) where L::Target: Logger { + // TODO: Encrypt the htlc_outputs data with the single-hash of the commitment transaction + // so that a remote monitor doesn't learn anything unless there is a malicious close. + // (only maybe, sadly we cant do the same for local info, as we need to be aware of + // timeouts) + for &(ref htlc, _) in &htlc_outputs { + self.remote_hash_commitment_number.insert(htlc.payment_hash, commitment_number); + } + + let new_txid = unsigned_commitment_tx.txid(); + log_trace!(logger, "Tracking new remote commitment transaction with txid {} at commitment number {} with {} HTLC outputs", new_txid, commitment_number, htlc_outputs.len()); + log_trace!(logger, "New potential remote commitment transaction: {}", encode::serialize_hex(unsigned_commitment_tx)); + self.prev_remote_commitment_txid = self.current_remote_commitment_txid.take(); + self.current_remote_commitment_txid = Some(new_txid); + self.remote_claimable_outpoints.insert(new_txid, htlc_outputs.clone()); + self.current_remote_commitment_number = commitment_number; + //TODO: Merge this into the other per-remote-transaction output storage stuff + match self.their_cur_revocation_points { + Some(old_points) => { + if old_points.0 == commitment_number + 1 { + self.their_cur_revocation_points = Some((old_points.0, old_points.1, Some(their_revocation_point))); + } else if old_points.0 == commitment_number + 2 { + if let Some(old_second_point) = old_points.2 { + self.their_cur_revocation_points = Some((old_points.0 - 1, old_second_point, Some(their_revocation_point))); + } else { + self.their_cur_revocation_points = Some((commitment_number, their_revocation_point, None)); + } + } else { + self.their_cur_revocation_points = Some((commitment_number, their_revocation_point, None)); + } + }, + None => { + self.their_cur_revocation_points = Some((commitment_number, their_revocation_point, None)); + } + } + let mut htlcs = Vec::with_capacity(htlc_outputs.len()); + for htlc in htlc_outputs { + if htlc.0.transaction_output_index.is_some() { + htlcs.push(htlc.0); + } + } + self.remote_tx_cache.per_htlc.insert(new_txid, htlcs); + } + + /// Informs this monitor of the latest local (ie broadcastable) commitment transaction. The + /// monitor watches for timeouts and may broadcast it if we approach such a timeout. Thus, it + /// is important that any clones of this channel monitor (including remote clones) by kept + /// up-to-date as our local commitment transaction is updated. + /// Panics if set_on_local_tx_csv has never been called. + fn provide_latest_local_commitment_tx_info(&mut self, commitment_tx: LocalCommitmentTransaction, htlc_outputs: Vec<(HTLCOutputInCommitment, Option, Option)>) -> Result<(), MonitorUpdateError> { + if self.local_tx_signed { + return Err(MonitorUpdateError("A local commitment tx has already been signed, no new local commitment txn can be sent to our counterparty")); + } + let txid = commitment_tx.txid(); + let sequence = commitment_tx.unsigned_tx.input[0].sequence as u64; + let locktime = commitment_tx.unsigned_tx.lock_time as u64; + let mut new_local_commitment_tx = LocalSignedTx { + txid, + revocation_key: commitment_tx.local_keys.revocation_key, + a_htlc_key: commitment_tx.local_keys.a_htlc_key, + b_htlc_key: commitment_tx.local_keys.b_htlc_key, + delayed_payment_key: commitment_tx.local_keys.a_delayed_payment_key, + per_commitment_point: commitment_tx.local_keys.per_commitment_point, + feerate_per_kw: commitment_tx.feerate_per_kw, + htlc_outputs: htlc_outputs, + }; + // Returning a monitor error before updating tracking points means in case of using + // a concurrent watchtower implementation for same channel, if this one doesn't + // reject update as we do, you MAY have the latest local valid commitment tx onchain + // for which you want to spend outputs. We're NOT robust again this scenario right + // now but we should consider it later. + if let Err(_) = self.onchain_tx_handler.provide_latest_local_tx(commitment_tx) { + return Err(MonitorUpdateError("Local commitment signed has already been signed, no further update of LOCAL commitment transaction is allowed")); + } + self.current_local_commitment_number = 0xffff_ffff_ffff - ((((sequence & 0xffffff) << 3*8) | (locktime as u64 & 0xffffff)) ^ self.commitment_transaction_number_obscure_factor); + mem::swap(&mut new_local_commitment_tx, &mut self.current_local_commitment_tx); + self.prev_local_signed_commitment_tx = Some(new_local_commitment_tx); + Ok(()) + } + + /// Provides a payment_hash->payment_preimage mapping. Will be automatically pruned when all + /// commitment_tx_infos which contain the payment hash have been revoked. + pub(crate) fn provide_payment_preimage(&mut self, payment_hash: &PaymentHash, payment_preimage: &PaymentPreimage) { + self.payment_preimages.insert(payment_hash.clone(), payment_preimage.clone()); + } + + pub(crate) fn broadcast_latest_local_commitment_txn(&mut self, broadcaster: &B, logger: &L) + where B::Target: BroadcasterInterface, + L::Target: Logger, + { + for tx in self.get_latest_local_commitment_txn(logger).iter() { + broadcaster.broadcast_transaction(tx); + } + } + + /// Used in Channel to cheat wrt the update_ids since it plays games, will be removed soon! + pub(crate) fn update_monitor_ooo(&mut self, mut updates: ChannelMonitorUpdate, logger: &L) -> Result<(), MonitorUpdateError> where L::Target: Logger { + for update in updates.updates.drain(..) { + match update { + ChannelMonitorUpdateStep::LatestLocalCommitmentTXInfo { commitment_tx, htlc_outputs } => { + if self.lockdown_from_offchain { panic!(); } + self.provide_latest_local_commitment_tx_info(commitment_tx, htlc_outputs)? + }, + ChannelMonitorUpdateStep::LatestRemoteCommitmentTXInfo { unsigned_commitment_tx, htlc_outputs, commitment_number, their_revocation_point } => + self.provide_latest_remote_commitment_tx_info(&unsigned_commitment_tx, htlc_outputs, commitment_number, their_revocation_point, logger), + ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage } => + self.provide_payment_preimage(&PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner()), &payment_preimage), + ChannelMonitorUpdateStep::CommitmentSecret { idx, secret } => + self.provide_secret(idx, secret)?, + ChannelMonitorUpdateStep::ChannelForceClosed { .. } => {}, + } + } + self.latest_update_id = updates.update_id; + Ok(()) + } + + /// Updates a ChannelMonitor on the basis of some new information provided by the Channel + /// itself. + /// + /// panics if the given update is not the next update by update_id. + pub fn update_monitor(&mut self, mut updates: ChannelMonitorUpdate, broadcaster: &B, logger: &L) -> Result<(), MonitorUpdateError> + where B::Target: BroadcasterInterface, + L::Target: Logger, + { + if self.latest_update_id + 1 != updates.update_id { + panic!("Attempted to apply ChannelMonitorUpdates out of order, check the update_id before passing an update to update_monitor!"); + } + for update in updates.updates.drain(..) { + match update { + ChannelMonitorUpdateStep::LatestLocalCommitmentTXInfo { commitment_tx, htlc_outputs } => { + if self.lockdown_from_offchain { panic!(); } + self.provide_latest_local_commitment_tx_info(commitment_tx, htlc_outputs)? + }, + ChannelMonitorUpdateStep::LatestRemoteCommitmentTXInfo { unsigned_commitment_tx, htlc_outputs, commitment_number, their_revocation_point } => + self.provide_latest_remote_commitment_tx_info(&unsigned_commitment_tx, htlc_outputs, commitment_number, their_revocation_point, logger), + ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage } => + self.provide_payment_preimage(&PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner()), &payment_preimage), + ChannelMonitorUpdateStep::CommitmentSecret { idx, secret } => + self.provide_secret(idx, secret)?, + ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } => { + self.lockdown_from_offchain = true; + if should_broadcast { + self.broadcast_latest_local_commitment_txn(broadcaster, logger); + } else { + log_error!(logger, "You have a toxic local commitment transaction avaible in channel monitor, read comment in ChannelMonitor::get_latest_local_commitment_txn to be informed of manual action to take"); + } + } + } + } + self.latest_update_id = updates.update_id; + Ok(()) + } + + /// Gets the update_id from the latest ChannelMonitorUpdate which was applied to this + /// ChannelMonitor. + pub fn get_latest_update_id(&self) -> u64 { + self.latest_update_id + } + + /// Gets the funding transaction outpoint of the channel this ChannelMonitor is monitoring for. + pub fn get_funding_txo(&self) -> &(OutPoint, Script) { + &self.funding_info + } + + /// Gets a list of txids, with their output scripts (in the order they appear in the + /// transaction), which we must learn about spends of via block_connected(). + pub fn get_outputs_to_watch(&self) -> &HashMap> { + &self.outputs_to_watch + } + + /// Gets the sets of all outpoints which this ChannelMonitor expects to hear about spends of. + /// Generally useful when deserializing as during normal operation the return values of + /// block_connected are sufficient to ensure all relevant outpoints are being monitored (note + /// that the get_funding_txo outpoint and transaction must also be monitored for!). + pub fn get_monitored_outpoints(&self) -> Vec<(Txid, u32, &Script)> { + let mut res = Vec::with_capacity(self.remote_commitment_txn_on_chain.len() * 2); + for (ref txid, &(_, ref outputs)) in self.remote_commitment_txn_on_chain.iter() { + for (idx, output) in outputs.iter().enumerate() { + res.push(((*txid).clone(), idx as u32, output)); + } + } + res + } + + /// Get the list of HTLCs who's status has been updated on chain. This should be called by + /// ChannelManager via [`chain::Watch::release_pending_htlc_updates`]. + /// + /// [`chain::Watch::release_pending_htlc_updates`]: ../../chain/trait.Watch.html#tymethod.release_pending_htlc_updates + pub fn get_and_clear_pending_htlcs_updated(&mut self) -> Vec { + let mut ret = Vec::new(); + mem::swap(&mut ret, &mut self.pending_htlcs_updated); + ret + } + + /// Gets the list of pending events which were generated by previous actions, clearing the list + /// in the process. + /// + /// This is called by ChainMonitor::get_and_clear_pending_events() and is equivalent to + /// EventsProvider::get_and_clear_pending_events() except that it requires &mut self as we do + /// no internal locking in ChannelMonitors. + pub fn get_and_clear_pending_events(&mut self) -> Vec { + let mut ret = Vec::new(); + mem::swap(&mut ret, &mut self.pending_events); + ret + } + + /// Can only fail if idx is < get_min_seen_secret + fn get_secret(&self, idx: u64) -> Option<[u8; 32]> { + self.commitment_secrets.get_secret(idx) + } + + pub(crate) fn get_min_seen_secret(&self) -> u64 { + self.commitment_secrets.get_min_seen_secret() + } + + pub(crate) fn get_cur_remote_commitment_number(&self) -> u64 { + self.current_remote_commitment_number + } + + pub(crate) fn get_cur_local_commitment_number(&self) -> u64 { + self.current_local_commitment_number + } + + /// Attempts to claim a remote commitment transaction's outputs using the revocation key and + /// data in remote_claimable_outpoints. Will directly claim any HTLC outputs which expire at a + /// height > height + CLTV_SHARED_CLAIM_BUFFER. In any case, will install monitoring for + /// HTLC-Success/HTLC-Timeout transactions. + /// Return updates for HTLC pending in the channel and failed automatically by the broadcast of + /// revoked remote commitment tx + fn check_spend_remote_transaction(&mut self, tx: &Transaction, height: u32, logger: &L) -> (Vec, (Txid, Vec)) where L::Target: Logger { + // Most secp and related errors trying to create keys means we have no hope of constructing + // a spend transaction...so we return no transactions to broadcast + let mut claimable_outpoints = Vec::new(); + let mut watch_outputs = Vec::new(); + + let commitment_txid = tx.txid(); //TODO: This is gonna be a performance bottleneck for watchtowers! + let per_commitment_option = self.remote_claimable_outpoints.get(&commitment_txid); + + macro_rules! ignore_error { + ( $thing : expr ) => { + match $thing { + Ok(a) => a, + Err(_) => return (claimable_outpoints, (commitment_txid, watch_outputs)) + } + }; + } + + let commitment_number = 0xffffffffffff - ((((tx.input[0].sequence as u64 & 0xffffff) << 3*8) | (tx.lock_time as u64 & 0xffffff)) ^ self.commitment_transaction_number_obscure_factor); + if commitment_number >= self.get_min_seen_secret() { + let secret = self.get_secret(commitment_number).unwrap(); + let per_commitment_key = ignore_error!(SecretKey::from_slice(&secret)); + let per_commitment_point = PublicKey::from_secret_key(&self.secp_ctx, &per_commitment_key); + let revocation_pubkey = ignore_error!(chan_utils::derive_public_revocation_key(&self.secp_ctx, &per_commitment_point, &self.keys.pubkeys().revocation_basepoint)); + let delayed_key = ignore_error!(chan_utils::derive_public_key(&self.secp_ctx, &PublicKey::from_secret_key(&self.secp_ctx, &per_commitment_key), &self.remote_tx_cache.remote_delayed_payment_base_key)); + + let revokeable_redeemscript = chan_utils::get_revokeable_redeemscript(&revocation_pubkey, self.remote_tx_cache.on_remote_tx_csv, &delayed_key); + let revokeable_p2wsh = revokeable_redeemscript.to_v0_p2wsh(); + + // First, process non-htlc outputs (to_local & to_remote) + for (idx, outp) in tx.output.iter().enumerate() { + if outp.script_pubkey == revokeable_p2wsh { + let witness_data = InputMaterial::Revoked { per_commitment_point, remote_delayed_payment_base_key: self.remote_tx_cache.remote_delayed_payment_base_key, remote_htlc_base_key: self.remote_tx_cache.remote_htlc_base_key, per_commitment_key, input_descriptor: InputDescriptors::RevokedOutput, amount: outp.value, htlc: None, on_remote_tx_csv: self.remote_tx_cache.on_remote_tx_csv}; + claimable_outpoints.push(ClaimRequest { absolute_timelock: height + self.remote_tx_cache.on_remote_tx_csv as u32, aggregable: true, outpoint: BitcoinOutPoint { txid: commitment_txid, vout: idx as u32 }, witness_data}); + } + } + + // Then, try to find revoked htlc outputs + if let Some(ref per_commitment_data) = per_commitment_option { + for (_, &(ref htlc, _)) in per_commitment_data.iter().enumerate() { + if let Some(transaction_output_index) = htlc.transaction_output_index { + if transaction_output_index as usize >= tx.output.len() || + tx.output[transaction_output_index as usize].value != htlc.amount_msat / 1000 { + return (claimable_outpoints, (commitment_txid, watch_outputs)); // Corrupted per_commitment_data, fuck this user + } + let witness_data = InputMaterial::Revoked { per_commitment_point, remote_delayed_payment_base_key: self.remote_tx_cache.remote_delayed_payment_base_key, remote_htlc_base_key: self.remote_tx_cache.remote_htlc_base_key, per_commitment_key, input_descriptor: if htlc.offered { InputDescriptors::RevokedOfferedHTLC } else { InputDescriptors::RevokedReceivedHTLC }, amount: tx.output[transaction_output_index as usize].value, htlc: Some(htlc.clone()), on_remote_tx_csv: self.remote_tx_cache.on_remote_tx_csv}; + claimable_outpoints.push(ClaimRequest { absolute_timelock: htlc.cltv_expiry, aggregable: true, outpoint: BitcoinOutPoint { txid: commitment_txid, vout: transaction_output_index }, witness_data }); + } + } + } + + // Last, track onchain revoked commitment transaction and fail backward outgoing HTLCs as payment path is broken + if !claimable_outpoints.is_empty() || per_commitment_option.is_some() { // ie we're confident this is actually ours + // We're definitely a remote commitment transaction! + log_trace!(logger, "Got broadcast of revoked remote commitment transaction, going to generate general spend tx with {} inputs", claimable_outpoints.len()); + watch_outputs.append(&mut tx.output.clone()); + self.remote_commitment_txn_on_chain.insert(commitment_txid, (commitment_number, tx.output.iter().map(|output| { output.script_pubkey.clone() }).collect())); + + macro_rules! check_htlc_fails { + ($txid: expr, $commitment_tx: expr) => { + if let Some(ref outpoints) = self.remote_claimable_outpoints.get($txid) { + for &(ref htlc, ref source_option) in outpoints.iter() { + if let &Some(ref source) = source_option { + log_info!(logger, "Failing HTLC with payment_hash {} from {} remote commitment tx due to broadcast of revoked remote commitment transaction, waiting for confirmation (at height {})", log_bytes!(htlc.payment_hash.0), $commitment_tx, height + ANTI_REORG_DELAY - 1); + match self.onchain_events_waiting_threshold_conf.entry(height + ANTI_REORG_DELAY - 1) { + hash_map::Entry::Occupied(mut entry) => { + let e = entry.get_mut(); + e.retain(|ref event| { + match **event { + OnchainEvent::HTLCUpdate { ref htlc_update } => { + return htlc_update.0 != **source + }, + _ => true + } + }); + e.push(OnchainEvent::HTLCUpdate { htlc_update: ((**source).clone(), htlc.payment_hash.clone())}); + } + hash_map::Entry::Vacant(entry) => { + entry.insert(vec![OnchainEvent::HTLCUpdate { htlc_update: ((**source).clone(), htlc.payment_hash.clone())}]); + } + } + } + } + } + } + } + if let Some(ref txid) = self.current_remote_commitment_txid { + check_htlc_fails!(txid, "current"); + } + if let Some(ref txid) = self.prev_remote_commitment_txid { + check_htlc_fails!(txid, "remote"); + } + // No need to check local commitment txn, symmetric HTLCSource must be present as per-htlc data on remote commitment tx + } + } else if let Some(per_commitment_data) = per_commitment_option { + // While this isn't useful yet, there is a potential race where if a counterparty + // revokes a state at the same time as the commitment transaction for that state is + // confirmed, and the watchtower receives the block before the user, the user could + // upload a new ChannelMonitor with the revocation secret but the watchtower has + // already processed the block, resulting in the remote_commitment_txn_on_chain entry + // not being generated by the above conditional. Thus, to be safe, we go ahead and + // insert it here. + watch_outputs.append(&mut tx.output.clone()); + self.remote_commitment_txn_on_chain.insert(commitment_txid, (commitment_number, tx.output.iter().map(|output| { output.script_pubkey.clone() }).collect())); + + log_trace!(logger, "Got broadcast of non-revoked remote commitment transaction {}", commitment_txid); + + macro_rules! check_htlc_fails { + ($txid: expr, $commitment_tx: expr, $id: tt) => { + if let Some(ref latest_outpoints) = self.remote_claimable_outpoints.get($txid) { + $id: for &(ref htlc, ref source_option) in latest_outpoints.iter() { + if let &Some(ref source) = source_option { + // Check if the HTLC is present in the commitment transaction that was + // broadcast, but not if it was below the dust limit, which we should + // fail backwards immediately as there is no way for us to learn the + // payment_preimage. + // Note that if the dust limit were allowed to change between + // commitment transactions we'd want to be check whether *any* + // broadcastable commitment transaction has the HTLC in it, but it + // cannot currently change after channel initialization, so we don't + // need to here. + for &(ref broadcast_htlc, ref broadcast_source) in per_commitment_data.iter() { + if broadcast_htlc.transaction_output_index.is_some() && Some(source) == broadcast_source.as_ref() { + continue $id; + } + } + log_trace!(logger, "Failing HTLC with payment_hash {} from {} remote commitment tx due to broadcast of remote commitment transaction", log_bytes!(htlc.payment_hash.0), $commitment_tx); + match self.onchain_events_waiting_threshold_conf.entry(height + ANTI_REORG_DELAY - 1) { + hash_map::Entry::Occupied(mut entry) => { + let e = entry.get_mut(); + e.retain(|ref event| { + match **event { + OnchainEvent::HTLCUpdate { ref htlc_update } => { + return htlc_update.0 != **source + }, + _ => true + } + }); + e.push(OnchainEvent::HTLCUpdate { htlc_update: ((**source).clone(), htlc.payment_hash.clone())}); + } + hash_map::Entry::Vacant(entry) => { + entry.insert(vec![OnchainEvent::HTLCUpdate { htlc_update: ((**source).clone(), htlc.payment_hash.clone())}]); + } + } + } + } + } + } + } + if let Some(ref txid) = self.current_remote_commitment_txid { + check_htlc_fails!(txid, "current", 'current_loop); + } + if let Some(ref txid) = self.prev_remote_commitment_txid { + check_htlc_fails!(txid, "previous", 'prev_loop); + } + + if let Some(revocation_points) = self.their_cur_revocation_points { + let revocation_point_option = + if revocation_points.0 == commitment_number { Some(&revocation_points.1) } + else if let Some(point) = revocation_points.2.as_ref() { + if revocation_points.0 == commitment_number + 1 { Some(point) } else { None } + } else { None }; + if let Some(revocation_point) = revocation_point_option { + self.remote_payment_script = { + // Note that the Network here is ignored as we immediately drop the address for the + // script_pubkey version + let payment_hash160 = WPubkeyHash::hash(&self.keys.pubkeys().payment_point.serialize()); + Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&payment_hash160[..]).into_script() + }; + + // Then, try to find htlc outputs + for (_, &(ref htlc, _)) in per_commitment_data.iter().enumerate() { + if let Some(transaction_output_index) = htlc.transaction_output_index { + if transaction_output_index as usize >= tx.output.len() || + tx.output[transaction_output_index as usize].value != htlc.amount_msat / 1000 { + return (claimable_outpoints, (commitment_txid, watch_outputs)); // Corrupted per_commitment_data, fuck this user + } + let preimage = if htlc.offered { if let Some(p) = self.payment_preimages.get(&htlc.payment_hash) { Some(*p) } else { None } } else { None }; + let aggregable = if !htlc.offered { false } else { true }; + if preimage.is_some() || !htlc.offered { + let witness_data = InputMaterial::RemoteHTLC { per_commitment_point: *revocation_point, remote_delayed_payment_base_key: self.remote_tx_cache.remote_delayed_payment_base_key, remote_htlc_base_key: self.remote_tx_cache.remote_htlc_base_key, preimage, htlc: htlc.clone() }; + claimable_outpoints.push(ClaimRequest { absolute_timelock: htlc.cltv_expiry, aggregable, outpoint: BitcoinOutPoint { txid: commitment_txid, vout: transaction_output_index }, witness_data }); + } + } + } + } + } + } + (claimable_outpoints, (commitment_txid, watch_outputs)) + } + + /// Attempts to claim a remote HTLC-Success/HTLC-Timeout's outputs using the revocation key + fn check_spend_remote_htlc(&mut self, tx: &Transaction, commitment_number: u64, height: u32, logger: &L) -> (Vec, Option<(Txid, Vec)>) where L::Target: Logger { + let htlc_txid = tx.txid(); + if tx.input.len() != 1 || tx.output.len() != 1 || tx.input[0].witness.len() != 5 { + return (Vec::new(), None) + } + + macro_rules! ignore_error { + ( $thing : expr ) => { + match $thing { + Ok(a) => a, + Err(_) => return (Vec::new(), None) + } + }; + } + + let secret = if let Some(secret) = self.get_secret(commitment_number) { secret } else { return (Vec::new(), None); }; + let per_commitment_key = ignore_error!(SecretKey::from_slice(&secret)); + let per_commitment_point = PublicKey::from_secret_key(&self.secp_ctx, &per_commitment_key); + + log_trace!(logger, "Remote HTLC broadcast {}:{}", htlc_txid, 0); + let witness_data = InputMaterial::Revoked { per_commitment_point, remote_delayed_payment_base_key: self.remote_tx_cache.remote_delayed_payment_base_key, remote_htlc_base_key: self.remote_tx_cache.remote_htlc_base_key, per_commitment_key, input_descriptor: InputDescriptors::RevokedOutput, amount: tx.output[0].value, htlc: None, on_remote_tx_csv: self.remote_tx_cache.on_remote_tx_csv }; + let claimable_outpoints = vec!(ClaimRequest { absolute_timelock: height + self.remote_tx_cache.on_remote_tx_csv as u32, aggregable: true, outpoint: BitcoinOutPoint { txid: htlc_txid, vout: 0}, witness_data }); + (claimable_outpoints, Some((htlc_txid, tx.output.clone()))) + } + + fn broadcast_by_local_state(&self, commitment_tx: &Transaction, local_tx: &LocalSignedTx) -> (Vec, Vec, Option<(Script, PublicKey, PublicKey)>) { + let mut claim_requests = Vec::with_capacity(local_tx.htlc_outputs.len()); + let mut watch_outputs = Vec::with_capacity(local_tx.htlc_outputs.len()); + + let redeemscript = chan_utils::get_revokeable_redeemscript(&local_tx.revocation_key, self.on_local_tx_csv, &local_tx.delayed_payment_key); + let broadcasted_local_revokable_script = Some((redeemscript.to_v0_p2wsh(), local_tx.per_commitment_point.clone(), local_tx.revocation_key.clone())); + + for &(ref htlc, _, _) in local_tx.htlc_outputs.iter() { + if let Some(transaction_output_index) = htlc.transaction_output_index { + claim_requests.push(ClaimRequest { absolute_timelock: ::std::u32::MAX, aggregable: false, outpoint: BitcoinOutPoint { txid: local_tx.txid, vout: transaction_output_index as u32 }, + witness_data: InputMaterial::LocalHTLC { + preimage: if !htlc.offered { + if let Some(preimage) = self.payment_preimages.get(&htlc.payment_hash) { + Some(preimage.clone()) + } else { + // We can't build an HTLC-Success transaction without the preimage + continue; + } + } else { None }, + amount: htlc.amount_msat, + }}); + watch_outputs.push(commitment_tx.output[transaction_output_index as usize].clone()); + } + } + + (claim_requests, watch_outputs, broadcasted_local_revokable_script) + } + + /// Attempts to claim any claimable HTLCs in a commitment transaction which was not (yet) + /// revoked using data in local_claimable_outpoints. + /// Should not be used if check_spend_revoked_transaction succeeds. + fn check_spend_local_transaction(&mut self, tx: &Transaction, height: u32, logger: &L) -> (Vec, (Txid, Vec)) where L::Target: Logger { + let commitment_txid = tx.txid(); + let mut claim_requests = Vec::new(); + let mut watch_outputs = Vec::new(); + + macro_rules! wait_threshold_conf { + ($height: expr, $source: expr, $commitment_tx: expr, $payment_hash: expr) => { + log_trace!(logger, "Failing HTLC with payment_hash {} from {} local commitment tx due to broadcast of transaction, waiting confirmation (at height{})", log_bytes!($payment_hash.0), $commitment_tx, height + ANTI_REORG_DELAY - 1); + match self.onchain_events_waiting_threshold_conf.entry($height + ANTI_REORG_DELAY - 1) { + hash_map::Entry::Occupied(mut entry) => { + let e = entry.get_mut(); + e.retain(|ref event| { + match **event { + OnchainEvent::HTLCUpdate { ref htlc_update } => { + return htlc_update.0 != $source + }, + _ => true + } + }); + e.push(OnchainEvent::HTLCUpdate { htlc_update: ($source, $payment_hash)}); + } + hash_map::Entry::Vacant(entry) => { + entry.insert(vec![OnchainEvent::HTLCUpdate { htlc_update: ($source, $payment_hash)}]); + } + } + } + } + + macro_rules! append_onchain_update { + ($updates: expr) => { + claim_requests = $updates.0; + watch_outputs.append(&mut $updates.1); + self.broadcasted_local_revokable_script = $updates.2; + } + } + + // HTLCs set may differ between last and previous local commitment txn, in case of one them hitting chain, ensure we cancel all HTLCs backward + let mut is_local_tx = false; + + if self.current_local_commitment_tx.txid == commitment_txid { + is_local_tx = true; + log_trace!(logger, "Got latest local commitment tx broadcast, searching for available HTLCs to claim"); + let mut res = self.broadcast_by_local_state(tx, &self.current_local_commitment_tx); + append_onchain_update!(res); + } else if let &Some(ref local_tx) = &self.prev_local_signed_commitment_tx { + if local_tx.txid == commitment_txid { + is_local_tx = true; + log_trace!(logger, "Got previous local commitment tx broadcast, searching for available HTLCs to claim"); + let mut res = self.broadcast_by_local_state(tx, local_tx); + append_onchain_update!(res); + } + } + + macro_rules! fail_dust_htlcs_after_threshold_conf { + ($local_tx: expr) => { + for &(ref htlc, _, ref source) in &$local_tx.htlc_outputs { + if htlc.transaction_output_index.is_none() { + if let &Some(ref source) = source { + wait_threshold_conf!(height, source.clone(), "lastest", htlc.payment_hash.clone()); + } + } + } + } + } + + if is_local_tx { + fail_dust_htlcs_after_threshold_conf!(self.current_local_commitment_tx); + if let &Some(ref local_tx) = &self.prev_local_signed_commitment_tx { + fail_dust_htlcs_after_threshold_conf!(local_tx); + } + } + + (claim_requests, (commitment_txid, watch_outputs)) + } + + /// Used by ChannelManager deserialization to broadcast the latest local state if its copy of + /// the Channel was out-of-date. You may use it to get a broadcastable local toxic tx in case of + /// fallen-behind, i.e when receiving a channel_reestablish with a proof that our remote side knows + /// a higher revocation secret than the local commitment number we are aware of. Broadcasting these + /// transactions are UNSAFE, as they allow remote side to punish you. Nevertheless you may want to + /// broadcast them if remote don't close channel with his higher commitment transaction after a + /// substantial amount of time (a month or even a year) to get back funds. Best may be to contact + /// out-of-band the other node operator to coordinate with him if option is available to you. + /// In any-case, choice is up to the user. + pub fn get_latest_local_commitment_txn(&mut self, logger: &L) -> Vec where L::Target: Logger { + log_trace!(logger, "Getting signed latest local commitment transaction!"); + self.local_tx_signed = true; + if let Some(commitment_tx) = self.onchain_tx_handler.get_fully_signed_local_tx(&self.funding_redeemscript) { + let txid = commitment_tx.txid(); + let mut res = vec![commitment_tx]; + for htlc in self.current_local_commitment_tx.htlc_outputs.iter() { + if let Some(vout) = htlc.0.transaction_output_index { + let preimage = if !htlc.0.offered { + if let Some(preimage) = self.payment_preimages.get(&htlc.0.payment_hash) { Some(preimage.clone()) } else { + // We can't build an HTLC-Success transaction without the preimage + continue; + } + } else { None }; + if let Some(htlc_tx) = self.onchain_tx_handler.get_fully_signed_htlc_tx( + &::bitcoin::OutPoint { txid, vout }, &preimage) { + res.push(htlc_tx); + } + } + } + // We throw away the generated waiting_first_conf data as we aren't (yet) confirmed and we don't actually know what the caller wants to do. + // The data will be re-generated and tracked in check_spend_local_transaction if we get a confirmation. + return res + } + Vec::new() + } + + /// Unsafe test-only version of get_latest_local_commitment_txn used by our test framework + /// to bypass LocalCommitmentTransaction state update lockdown after signature and generate + /// revoked commitment transaction. + #[cfg(test)] + pub fn unsafe_get_latest_local_commitment_txn(&mut self, logger: &L) -> Vec where L::Target: Logger { + log_trace!(logger, "Getting signed copy of latest local commitment transaction!"); + if let Some(commitment_tx) = self.onchain_tx_handler.get_fully_signed_copy_local_tx(&self.funding_redeemscript) { + let txid = commitment_tx.txid(); + let mut res = vec![commitment_tx]; + for htlc in self.current_local_commitment_tx.htlc_outputs.iter() { + if let Some(vout) = htlc.0.transaction_output_index { + let preimage = if !htlc.0.offered { + if let Some(preimage) = self.payment_preimages.get(&htlc.0.payment_hash) { Some(preimage.clone()) } else { + // We can't build an HTLC-Success transaction without the preimage + continue; + } + } else { None }; + if let Some(htlc_tx) = self.onchain_tx_handler.unsafe_get_fully_signed_htlc_tx( + &::bitcoin::OutPoint { txid, vout }, &preimage) { + res.push(htlc_tx); + } + } + } + return res + } + Vec::new() + } + + /// Determines if any HTLCs have been resolved on chain in the connected block. + /// + /// TODO: Include how `broadcaster` and `fee_estimator` are used. + /// + /// Returns any transaction outputs from `txn_matched` that spends of should be watched for. + /// After called these are also available via [`get_outputs_to_watch`]. + /// + /// [`get_outputs_to_watch`]: #method.get_outputs_to_watch + pub fn block_connected(&mut self, header: &BlockHeader, txn_matched: &[(usize, &Transaction)], height: u32, broadcaster: B, fee_estimator: F, logger: L)-> Vec<(Txid, Vec)> + where B::Target: BroadcasterInterface, + F::Target: FeeEstimator, + L::Target: Logger, + { + for &(_, tx) in txn_matched { + let mut output_val = 0; + for out in tx.output.iter() { + if out.value > 21_000_000_0000_0000 { panic!("Value-overflowing transaction provided to block connected"); } + output_val += out.value; + if output_val > 21_000_000_0000_0000 { panic!("Value-overflowing transaction provided to block connected"); } + } + } + + let block_hash = header.bitcoin_hash(); + log_trace!(logger, "Block {} at height {} connected with {} txn matched", block_hash, height, txn_matched.len()); + + let mut watch_outputs = Vec::new(); + let mut claimable_outpoints = Vec::new(); + for &(_, tx) in txn_matched { + if tx.input.len() == 1 { + // Assuming our keys were not leaked (in which case we're screwed no matter what), + // commitment transactions and HTLC transactions will all only ever have one input, + // which is an easy way to filter out any potential non-matching txn for lazy + // filters. + let prevout = &tx.input[0].previous_output; + if prevout.txid == self.funding_info.0.txid && prevout.vout == self.funding_info.0.index as u32 { + if (tx.input[0].sequence >> 8*3) as u8 == 0x80 && (tx.lock_time >> 8*3) as u8 == 0x20 { + let (mut new_outpoints, new_outputs) = self.check_spend_remote_transaction(&tx, height, &logger); + if !new_outputs.1.is_empty() { + watch_outputs.push(new_outputs); + } + if new_outpoints.is_empty() { + let (mut new_outpoints, new_outputs) = self.check_spend_local_transaction(&tx, height, &logger); + if !new_outputs.1.is_empty() { + watch_outputs.push(new_outputs); + } + claimable_outpoints.append(&mut new_outpoints); + } + claimable_outpoints.append(&mut new_outpoints); + } + } else { + if let Some(&(commitment_number, _)) = self.remote_commitment_txn_on_chain.get(&prevout.txid) { + let (mut new_outpoints, new_outputs_option) = self.check_spend_remote_htlc(&tx, commitment_number, height, &logger); + claimable_outpoints.append(&mut new_outpoints); + if let Some(new_outputs) = new_outputs_option { + watch_outputs.push(new_outputs); + } + } + } + } + // While all commitment/HTLC-Success/HTLC-Timeout transactions have one input, HTLCs + // can also be resolved in a few other ways which can have more than one output. Thus, + // we call is_resolving_htlc_output here outside of the tx.input.len() == 1 check. + self.is_resolving_htlc_output(&tx, height, &logger); + + self.is_paying_spendable_output(&tx, height, &logger); + } + let should_broadcast = self.would_broadcast_at_height(height, &logger); + if should_broadcast { + claimable_outpoints.push(ClaimRequest { absolute_timelock: height, aggregable: false, outpoint: BitcoinOutPoint { txid: self.funding_info.0.txid.clone(), vout: self.funding_info.0.index as u32 }, witness_data: InputMaterial::Funding { funding_redeemscript: self.funding_redeemscript.clone() }}); + } + if should_broadcast { + if let Some(commitment_tx) = self.onchain_tx_handler.get_fully_signed_local_tx(&self.funding_redeemscript) { + let (mut new_outpoints, new_outputs, _) = self.broadcast_by_local_state(&commitment_tx, &self.current_local_commitment_tx); + if !new_outputs.is_empty() { + watch_outputs.push((self.current_local_commitment_tx.txid.clone(), new_outputs)); + } + claimable_outpoints.append(&mut new_outpoints); + } + } + if let Some(events) = self.onchain_events_waiting_threshold_conf.remove(&height) { + for ev in events { + match ev { + OnchainEvent::HTLCUpdate { htlc_update } => { + log_trace!(logger, "HTLC {} failure update has got enough confirmations to be passed upstream", log_bytes!((htlc_update.1).0)); + self.pending_htlcs_updated.push(HTLCUpdate { + payment_hash: htlc_update.1, + payment_preimage: None, + source: htlc_update.0, + }); + }, + OnchainEvent::MaturingOutput { descriptor } => { + log_trace!(logger, "Descriptor {} has got enough confirmations to be passed upstream", log_spendable!(descriptor)); + self.pending_events.push(events::Event::SpendableOutputs { + outputs: vec![descriptor] + }); + } + } + } + } + self.onchain_tx_handler.block_connected(txn_matched, claimable_outpoints, height, &*broadcaster, &*fee_estimator, &*logger); + + self.last_block_hash = block_hash; + for &(ref txid, ref output_scripts) in watch_outputs.iter() { + self.outputs_to_watch.insert(txid.clone(), output_scripts.iter().map(|o| o.script_pubkey.clone()).collect()); + } + + watch_outputs + } + + /// Determines if the disconnected block contained any transactions of interest and updates + /// appropriately. + /// + /// TODO: Include how `broadcaster` and `fee_estimator` are used. + pub fn block_disconnected(&mut self, header: &BlockHeader, height: u32, broadcaster: B, fee_estimator: F, logger: L) + where B::Target: BroadcasterInterface, + F::Target: FeeEstimator, + L::Target: Logger, + { + let block_hash = header.bitcoin_hash(); + log_trace!(logger, "Block {} at height {} disconnected", block_hash, height); + + if let Some(_) = self.onchain_events_waiting_threshold_conf.remove(&(height + ANTI_REORG_DELAY - 1)) { + //We may discard: + //- htlc update there as failure-trigger tx (revoked commitment tx, non-revoked commitment tx, HTLC-timeout tx) has been disconnected + //- maturing spendable output has transaction paying us has been disconnected + } + + self.onchain_tx_handler.block_disconnected(height, broadcaster, fee_estimator, logger); + + self.last_block_hash = block_hash; + } + + pub(crate) fn would_broadcast_at_height(&self, height: u32, logger: &L) -> bool where L::Target: Logger { + // We need to consider all HTLCs which are: + // * in any unrevoked remote commitment transaction, as they could broadcast said + // transactions and we'd end up in a race, or + // * are in our latest local commitment transaction, as this is the thing we will + // broadcast if we go on-chain. + // Note that we consider HTLCs which were below dust threshold here - while they don't + // strictly imply that we need to fail the channel, we need to go ahead and fail them back + // to the source, and if we don't fail the channel we will have to ensure that the next + // updates that peer sends us are update_fails, failing the channel if not. It's probably + // easier to just fail the channel as this case should be rare enough anyway. + macro_rules! scan_commitment { + ($htlcs: expr, $local_tx: expr) => { + for ref htlc in $htlcs { + // For inbound HTLCs which we know the preimage for, we have to ensure we hit the + // chain with enough room to claim the HTLC without our counterparty being able to + // time out the HTLC first. + // For outbound HTLCs which our counterparty hasn't failed/claimed, our primary + // concern is being able to claim the corresponding inbound HTLC (on another + // channel) before it expires. In fact, we don't even really care if our + // counterparty here claims such an outbound HTLC after it expired as long as we + // can still claim the corresponding HTLC. Thus, to avoid needlessly hitting the + // chain when our counterparty is waiting for expiration to off-chain fail an HTLC + // we give ourselves a few blocks of headroom after expiration before going + // on-chain for an expired HTLC. + // Note that, to avoid a potential attack whereby a node delays claiming an HTLC + // from us until we've reached the point where we go on-chain with the + // corresponding inbound HTLC, we must ensure that outbound HTLCs go on chain at + // least CLTV_CLAIM_BUFFER blocks prior to the inbound HTLC. + // aka outbound_cltv + LATENCY_GRACE_PERIOD_BLOCKS == height - CLTV_CLAIM_BUFFER + // inbound_cltv == height + CLTV_CLAIM_BUFFER + // outbound_cltv + LATENCY_GRACE_PERIOD_BLOCKS + CLTV_CLAIM_BUFFER <= inbound_cltv - CLTV_CLAIM_BUFFER + // LATENCY_GRACE_PERIOD_BLOCKS + 2*CLTV_CLAIM_BUFFER <= inbound_cltv - outbound_cltv + // CLTV_EXPIRY_DELTA <= inbound_cltv - outbound_cltv (by check in ChannelManager::decode_update_add_htlc_onion) + // LATENCY_GRACE_PERIOD_BLOCKS + 2*CLTV_CLAIM_BUFFER <= CLTV_EXPIRY_DELTA + // The final, above, condition is checked for statically in channelmanager + // with CHECK_CLTV_EXPIRY_SANITY_2. + let htlc_outbound = $local_tx == htlc.offered; + if ( htlc_outbound && htlc.cltv_expiry + LATENCY_GRACE_PERIOD_BLOCKS <= height) || + (!htlc_outbound && htlc.cltv_expiry <= height + CLTV_CLAIM_BUFFER && self.payment_preimages.contains_key(&htlc.payment_hash)) { + log_info!(logger, "Force-closing channel due to {} HTLC timeout, HTLC expiry is {}", if htlc_outbound { "outbound" } else { "inbound "}, htlc.cltv_expiry); + return true; + } + } + } + } + + scan_commitment!(self.current_local_commitment_tx.htlc_outputs.iter().map(|&(ref a, _, _)| a), true); + + if let Some(ref txid) = self.current_remote_commitment_txid { + if let Some(ref htlc_outputs) = self.remote_claimable_outpoints.get(txid) { + scan_commitment!(htlc_outputs.iter().map(|&(ref a, _)| a), false); + } + } + if let Some(ref txid) = self.prev_remote_commitment_txid { + if let Some(ref htlc_outputs) = self.remote_claimable_outpoints.get(txid) { + scan_commitment!(htlc_outputs.iter().map(|&(ref a, _)| a), false); + } + } + + false + } + + /// Check if any transaction broadcasted is resolving HTLC output by a success or timeout on a local + /// or remote commitment tx, if so send back the source, preimage if found and payment_hash of resolved HTLC + fn is_resolving_htlc_output(&mut self, tx: &Transaction, height: u32, logger: &L) where L::Target: Logger { + 'outer_loop: for input in &tx.input { + let mut payment_data = None; + let revocation_sig_claim = (input.witness.len() == 3 && HTLCType::scriptlen_to_htlctype(input.witness[2].len()) == Some(HTLCType::OfferedHTLC) && input.witness[1].len() == 33) + || (input.witness.len() == 3 && HTLCType::scriptlen_to_htlctype(input.witness[2].len()) == Some(HTLCType::AcceptedHTLC) && input.witness[1].len() == 33); + let accepted_preimage_claim = input.witness.len() == 5 && HTLCType::scriptlen_to_htlctype(input.witness[4].len()) == Some(HTLCType::AcceptedHTLC); + let offered_preimage_claim = input.witness.len() == 3 && HTLCType::scriptlen_to_htlctype(input.witness[2].len()) == Some(HTLCType::OfferedHTLC); + + macro_rules! log_claim { + ($tx_info: expr, $local_tx: expr, $htlc: expr, $source_avail: expr) => { + // We found the output in question, but aren't failing it backwards + // as we have no corresponding source and no valid remote commitment txid + // to try a weak source binding with same-hash, same-value still-valid offered HTLC. + // This implies either it is an inbound HTLC or an outbound HTLC on a revoked transaction. + let outbound_htlc = $local_tx == $htlc.offered; + if ($local_tx && revocation_sig_claim) || + (outbound_htlc && !$source_avail && (accepted_preimage_claim || offered_preimage_claim)) { + log_error!(logger, "Input spending {} ({}:{}) in {} resolves {} HTLC with payment hash {} with {}!", + $tx_info, input.previous_output.txid, input.previous_output.vout, tx.txid(), + if outbound_htlc { "outbound" } else { "inbound" }, log_bytes!($htlc.payment_hash.0), + if revocation_sig_claim { "revocation sig" } else { "preimage claim after we'd passed the HTLC resolution back" }); + } else { + log_info!(logger, "Input spending {} ({}:{}) in {} resolves {} HTLC with payment hash {} with {}", + $tx_info, input.previous_output.txid, input.previous_output.vout, tx.txid(), + if outbound_htlc { "outbound" } else { "inbound" }, log_bytes!($htlc.payment_hash.0), + if revocation_sig_claim { "revocation sig" } else if accepted_preimage_claim || offered_preimage_claim { "preimage" } else { "timeout" }); + } + } + } + + macro_rules! check_htlc_valid_remote { + ($remote_txid: expr, $htlc_output: expr) => { + if let Some(txid) = $remote_txid { + for &(ref pending_htlc, ref pending_source) in self.remote_claimable_outpoints.get(&txid).unwrap() { + if pending_htlc.payment_hash == $htlc_output.payment_hash && pending_htlc.amount_msat == $htlc_output.amount_msat { + if let &Some(ref source) = pending_source { + log_claim!("revoked remote commitment tx", false, pending_htlc, true); + payment_data = Some(((**source).clone(), $htlc_output.payment_hash)); + break; + } + } + } + } + } + } + + macro_rules! scan_commitment { + ($htlcs: expr, $tx_info: expr, $local_tx: expr) => { + for (ref htlc_output, source_option) in $htlcs { + if Some(input.previous_output.vout) == htlc_output.transaction_output_index { + if let Some(ref source) = source_option { + log_claim!($tx_info, $local_tx, htlc_output, true); + // We have a resolution of an HTLC either from one of our latest + // local commitment transactions or an unrevoked remote commitment + // transaction. This implies we either learned a preimage, the HTLC + // has timed out, or we screwed up. In any case, we should now + // resolve the source HTLC with the original sender. + payment_data = Some(((*source).clone(), htlc_output.payment_hash)); + } else if !$local_tx { + check_htlc_valid_remote!(self.current_remote_commitment_txid, htlc_output); + if payment_data.is_none() { + check_htlc_valid_remote!(self.prev_remote_commitment_txid, htlc_output); + } + } + if payment_data.is_none() { + log_claim!($tx_info, $local_tx, htlc_output, false); + continue 'outer_loop; + } + } + } + } + } + + if input.previous_output.txid == self.current_local_commitment_tx.txid { + scan_commitment!(self.current_local_commitment_tx.htlc_outputs.iter().map(|&(ref a, _, ref b)| (a, b.as_ref())), + "our latest local commitment tx", true); + } + if let Some(ref prev_local_signed_commitment_tx) = self.prev_local_signed_commitment_tx { + if input.previous_output.txid == prev_local_signed_commitment_tx.txid { + scan_commitment!(prev_local_signed_commitment_tx.htlc_outputs.iter().map(|&(ref a, _, ref b)| (a, b.as_ref())), + "our previous local commitment tx", true); + } + } + if let Some(ref htlc_outputs) = self.remote_claimable_outpoints.get(&input.previous_output.txid) { + scan_commitment!(htlc_outputs.iter().map(|&(ref a, ref b)| (a, (b.as_ref().clone()).map(|boxed| &**boxed))), + "remote commitment tx", false); + } + + // Check that scan_commitment, above, decided there is some source worth relaying an + // HTLC resolution backwards to and figure out whether we learned a preimage from it. + if let Some((source, payment_hash)) = payment_data { + let mut payment_preimage = PaymentPreimage([0; 32]); + if accepted_preimage_claim { + if !self.pending_htlcs_updated.iter().any(|update| update.source == source) { + payment_preimage.0.copy_from_slice(&input.witness[3]); + self.pending_htlcs_updated.push(HTLCUpdate { + source, + payment_preimage: Some(payment_preimage), + payment_hash + }); + } + } else if offered_preimage_claim { + if !self.pending_htlcs_updated.iter().any(|update| update.source == source) { + payment_preimage.0.copy_from_slice(&input.witness[1]); + self.pending_htlcs_updated.push(HTLCUpdate { + source, + payment_preimage: Some(payment_preimage), + payment_hash + }); + } + } else { + log_info!(logger, "Failing HTLC with payment_hash {} timeout by a spend tx, waiting for confirmation (at height{})", log_bytes!(payment_hash.0), height + ANTI_REORG_DELAY - 1); + match self.onchain_events_waiting_threshold_conf.entry(height + ANTI_REORG_DELAY - 1) { + hash_map::Entry::Occupied(mut entry) => { + let e = entry.get_mut(); + e.retain(|ref event| { + match **event { + OnchainEvent::HTLCUpdate { ref htlc_update } => { + return htlc_update.0 != source + }, + _ => true + } + }); + e.push(OnchainEvent::HTLCUpdate { htlc_update: (source, payment_hash)}); + } + hash_map::Entry::Vacant(entry) => { + entry.insert(vec![OnchainEvent::HTLCUpdate { htlc_update: (source, payment_hash)}]); + } + } + } + } + } + } + + /// Check if any transaction broadcasted is paying fund back to some address we can assume to own + fn is_paying_spendable_output(&mut self, tx: &Transaction, height: u32, logger: &L) where L::Target: Logger { + let mut spendable_output = None; + for (i, outp) in tx.output.iter().enumerate() { // There is max one spendable output for any channel tx, including ones generated by us + if outp.script_pubkey == self.destination_script { + spendable_output = Some(SpendableOutputDescriptor::StaticOutput { + outpoint: BitcoinOutPoint { txid: tx.txid(), vout: i as u32 }, + output: outp.clone(), + }); + break; + } else if let Some(ref broadcasted_local_revokable_script) = self.broadcasted_local_revokable_script { + if broadcasted_local_revokable_script.0 == outp.script_pubkey { + spendable_output = Some(SpendableOutputDescriptor::DynamicOutputP2WSH { + outpoint: BitcoinOutPoint { txid: tx.txid(), vout: i as u32 }, + per_commitment_point: broadcasted_local_revokable_script.1, + to_self_delay: self.on_local_tx_csv, + output: outp.clone(), + key_derivation_params: self.keys.key_derivation_params(), + remote_revocation_pubkey: broadcasted_local_revokable_script.2.clone(), + }); + break; + } + } else if self.remote_payment_script == outp.script_pubkey { + spendable_output = Some(SpendableOutputDescriptor::StaticOutputRemotePayment { + outpoint: BitcoinOutPoint { txid: tx.txid(), vout: i as u32 }, + output: outp.clone(), + key_derivation_params: self.keys.key_derivation_params(), + }); + break; + } else if outp.script_pubkey == self.shutdown_script { + spendable_output = Some(SpendableOutputDescriptor::StaticOutput { + outpoint: BitcoinOutPoint { txid: tx.txid(), vout: i as u32 }, + output: outp.clone(), + }); + } + } + if let Some(spendable_output) = spendable_output { + log_trace!(logger, "Maturing {} until {}", log_spendable!(spendable_output), height + ANTI_REORG_DELAY - 1); + match self.onchain_events_waiting_threshold_conf.entry(height + ANTI_REORG_DELAY - 1) { + hash_map::Entry::Occupied(mut entry) => { + let e = entry.get_mut(); + e.push(OnchainEvent::MaturingOutput { descriptor: spendable_output }); + } + hash_map::Entry::Vacant(entry) => { + entry.insert(vec![OnchainEvent::MaturingOutput { descriptor: spendable_output }]); + } + } + } + } +} + +const MAX_ALLOC_SIZE: usize = 64*1024; + +impl Readable for (BlockHash, ChannelMonitor) { + fn read(reader: &mut R) -> Result { + macro_rules! unwrap_obj { + ($key: expr) => { + match $key { + Ok(res) => res, + Err(_) => return Err(DecodeError::InvalidValue), + } + } + } + + let _ver: u8 = Readable::read(reader)?; + let min_ver: u8 = Readable::read(reader)?; + if min_ver > SERIALIZATION_VERSION { + return Err(DecodeError::UnknownVersion); + } + + let latest_update_id: u64 = Readable::read(reader)?; + let commitment_transaction_number_obscure_factor = ::read(reader)?.0; + + let destination_script = Readable::read(reader)?; + let broadcasted_local_revokable_script = match ::read(reader)? { + 0 => { + let revokable_address = Readable::read(reader)?; + let per_commitment_point = Readable::read(reader)?; + let revokable_script = Readable::read(reader)?; + Some((revokable_address, per_commitment_point, revokable_script)) + }, + 1 => { None }, + _ => return Err(DecodeError::InvalidValue), + }; + let remote_payment_script = Readable::read(reader)?; + let shutdown_script = Readable::read(reader)?; + + let keys = Readable::read(reader)?; + // Technically this can fail and serialize fail a round-trip, but only for serialization of + // barely-init'd ChannelMonitors that we can't do anything with. + let outpoint = OutPoint { + txid: Readable::read(reader)?, + index: Readable::read(reader)?, + }; + let funding_info = (outpoint, Readable::read(reader)?); + let current_remote_commitment_txid = Readable::read(reader)?; + let prev_remote_commitment_txid = Readable::read(reader)?; + + let remote_tx_cache = Readable::read(reader)?; + let funding_redeemscript = Readable::read(reader)?; + let channel_value_satoshis = Readable::read(reader)?; + + let their_cur_revocation_points = { + let first_idx = ::read(reader)?.0; + if first_idx == 0 { + None + } else { + let first_point = Readable::read(reader)?; + let second_point_slice: [u8; 33] = Readable::read(reader)?; + if second_point_slice[0..32] == [0; 32] && second_point_slice[32] == 0 { + Some((first_idx, first_point, None)) + } else { + Some((first_idx, first_point, Some(unwrap_obj!(PublicKey::from_slice(&second_point_slice))))) + } + } + }; + + let on_local_tx_csv: u16 = Readable::read(reader)?; + + let commitment_secrets = Readable::read(reader)?; + + macro_rules! read_htlc_in_commitment { + () => { + { + let offered: bool = Readable::read(reader)?; + let amount_msat: u64 = Readable::read(reader)?; + let cltv_expiry: u32 = Readable::read(reader)?; + let payment_hash: PaymentHash = Readable::read(reader)?; + let transaction_output_index: Option = Readable::read(reader)?; + + HTLCOutputInCommitment { + offered, amount_msat, cltv_expiry, payment_hash, transaction_output_index + } + } + } + } + + let remote_claimable_outpoints_len: u64 = Readable::read(reader)?; + let mut remote_claimable_outpoints = HashMap::with_capacity(cmp::min(remote_claimable_outpoints_len as usize, MAX_ALLOC_SIZE / 64)); + for _ in 0..remote_claimable_outpoints_len { + let txid: Txid = Readable::read(reader)?; + let htlcs_count: u64 = Readable::read(reader)?; + let mut htlcs = Vec::with_capacity(cmp::min(htlcs_count as usize, MAX_ALLOC_SIZE / 32)); + for _ in 0..htlcs_count { + htlcs.push((read_htlc_in_commitment!(), as Readable>::read(reader)?.map(|o: HTLCSource| Box::new(o)))); + } + if let Some(_) = remote_claimable_outpoints.insert(txid, htlcs) { + return Err(DecodeError::InvalidValue); + } + } + + let remote_commitment_txn_on_chain_len: u64 = Readable::read(reader)?; + let mut remote_commitment_txn_on_chain = HashMap::with_capacity(cmp::min(remote_commitment_txn_on_chain_len as usize, MAX_ALLOC_SIZE / 32)); + for _ in 0..remote_commitment_txn_on_chain_len { + let txid: Txid = Readable::read(reader)?; + let commitment_number = ::read(reader)?.0; + let outputs_count = ::read(reader)?; + let mut outputs = Vec::with_capacity(cmp::min(outputs_count as usize, MAX_ALLOC_SIZE / 8)); + for _ in 0..outputs_count { + outputs.push(Readable::read(reader)?); + } + if let Some(_) = remote_commitment_txn_on_chain.insert(txid, (commitment_number, outputs)) { + return Err(DecodeError::InvalidValue); + } + } + + let remote_hash_commitment_number_len: u64 = Readable::read(reader)?; + let mut remote_hash_commitment_number = HashMap::with_capacity(cmp::min(remote_hash_commitment_number_len as usize, MAX_ALLOC_SIZE / 32)); + for _ in 0..remote_hash_commitment_number_len { + let payment_hash: PaymentHash = Readable::read(reader)?; + let commitment_number = ::read(reader)?.0; + if let Some(_) = remote_hash_commitment_number.insert(payment_hash, commitment_number) { + return Err(DecodeError::InvalidValue); + } + } + + macro_rules! read_local_tx { + () => { + { + let txid = Readable::read(reader)?; + let revocation_key = Readable::read(reader)?; + let a_htlc_key = Readable::read(reader)?; + let b_htlc_key = Readable::read(reader)?; + let delayed_payment_key = Readable::read(reader)?; + let per_commitment_point = Readable::read(reader)?; + let feerate_per_kw: u32 = Readable::read(reader)?; + + let htlcs_len: u64 = Readable::read(reader)?; + let mut htlcs = Vec::with_capacity(cmp::min(htlcs_len as usize, MAX_ALLOC_SIZE / 128)); + for _ in 0..htlcs_len { + let htlc = read_htlc_in_commitment!(); + let sigs = match ::read(reader)? { + 0 => None, + 1 => Some(Readable::read(reader)?), + _ => return Err(DecodeError::InvalidValue), + }; + htlcs.push((htlc, sigs, Readable::read(reader)?)); + } + + LocalSignedTx { + txid, + revocation_key, a_htlc_key, b_htlc_key, delayed_payment_key, per_commitment_point, feerate_per_kw, + htlc_outputs: htlcs + } + } + } + } + + let prev_local_signed_commitment_tx = match ::read(reader)? { + 0 => None, + 1 => { + Some(read_local_tx!()) + }, + _ => return Err(DecodeError::InvalidValue), + }; + let current_local_commitment_tx = read_local_tx!(); + + let current_remote_commitment_number = ::read(reader)?.0; + let current_local_commitment_number = ::read(reader)?.0; + + let payment_preimages_len: u64 = Readable::read(reader)?; + let mut payment_preimages = HashMap::with_capacity(cmp::min(payment_preimages_len as usize, MAX_ALLOC_SIZE / 32)); + for _ in 0..payment_preimages_len { + let preimage: PaymentPreimage = Readable::read(reader)?; + let hash = PaymentHash(Sha256::hash(&preimage.0[..]).into_inner()); + if let Some(_) = payment_preimages.insert(hash, preimage) { + return Err(DecodeError::InvalidValue); + } + } + + let pending_htlcs_updated_len: u64 = Readable::read(reader)?; + let mut pending_htlcs_updated = Vec::with_capacity(cmp::min(pending_htlcs_updated_len as usize, MAX_ALLOC_SIZE / (32 + 8*3))); + for _ in 0..pending_htlcs_updated_len { + pending_htlcs_updated.push(Readable::read(reader)?); + } + + let pending_events_len: u64 = Readable::read(reader)?; + let mut pending_events = Vec::with_capacity(cmp::min(pending_events_len as usize, MAX_ALLOC_SIZE / mem::size_of::())); + for _ in 0..pending_events_len { + if let Some(event) = MaybeReadable::read(reader)? { + pending_events.push(event); + } + } + + let last_block_hash: BlockHash = Readable::read(reader)?; + + let waiting_threshold_conf_len: u64 = Readable::read(reader)?; + let mut onchain_events_waiting_threshold_conf = HashMap::with_capacity(cmp::min(waiting_threshold_conf_len as usize, MAX_ALLOC_SIZE / 128)); + for _ in 0..waiting_threshold_conf_len { + let height_target = Readable::read(reader)?; + let events_len: u64 = Readable::read(reader)?; + let mut events = Vec::with_capacity(cmp::min(events_len as usize, MAX_ALLOC_SIZE / 128)); + for _ in 0..events_len { + let ev = match ::read(reader)? { + 0 => { + let htlc_source = Readable::read(reader)?; + let hash = Readable::read(reader)?; + OnchainEvent::HTLCUpdate { + htlc_update: (htlc_source, hash) + } + }, + 1 => { + let descriptor = Readable::read(reader)?; + OnchainEvent::MaturingOutput { + descriptor + } + }, + _ => return Err(DecodeError::InvalidValue), + }; + events.push(ev); + } + onchain_events_waiting_threshold_conf.insert(height_target, events); + } + + let outputs_to_watch_len: u64 = Readable::read(reader)?; + let mut outputs_to_watch = HashMap::with_capacity(cmp::min(outputs_to_watch_len as usize, MAX_ALLOC_SIZE / (mem::size_of::() + mem::size_of::>()))); + for _ in 0..outputs_to_watch_len { + let txid = Readable::read(reader)?; + let outputs_len: u64 = Readable::read(reader)?; + let mut outputs = Vec::with_capacity(cmp::min(outputs_len as usize, MAX_ALLOC_SIZE / mem::size_of::