X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fchain%2Fchannelmonitor.rs;h=939337d7b42d572d64c7ab5b1a6224479f6c1ce9;hb=e23c270720df2798c3e35e2ba804d98060d76d17;hp=0a9cf6997c99b9bda1e5e6217782f7813b79de63;hpb=8e9980032952e04e4951df027bf86a8a69733719;p=rust-lightning diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 0a9cf699..939337d7 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -19,15 +19,12 @@ //! ChannelMonitors should do so). Thus, if you're building rust-lightning into an HSM or other //! security-domain-separated system design, you should consider having multiple paths for //! ChannelMonitors to get out of the HSM and onto monitoring devices. -//! -//! [`chain::Watch`]: ../trait.Watch.html -use bitcoin::blockdata::block::BlockHeader; +use bitcoin::blockdata::block::{Block, BlockHeader}; use bitcoin::blockdata::transaction::{TxOut,Transaction}; use bitcoin::blockdata::transaction::OutPoint as BitcoinOutPoint; use bitcoin::blockdata::script::{Script, Builder}; use bitcoin::blockdata::opcodes; -use bitcoin::consensus::encode; use bitcoin::hashes::Hash; use bitcoin::hashes::sha256::Hash as Sha256; @@ -39,39 +36,56 @@ use bitcoin::secp256k1; use ln::msgs::DecodeError; use ln::chan_utils; -use ln::chan_utils::{CounterpartyCommitmentSecrets, HTLCOutputInCommitment, HolderCommitmentTransaction, HTLCType}; +use ln::chan_utils::{CounterpartyCommitmentSecrets, HTLCOutputInCommitment, HTLCType, ChannelTransactionParameters, HolderCommitmentTransaction}; use ln::channelmanager::{HTLCSource, PaymentPreimage, PaymentHash}; use ln::onchaintx::{OnchainTxHandler, InputDescriptors}; +use chain; +use chain::WatchedOutput; use chain::chaininterface::{BroadcasterInterface, FeeEstimator}; use chain::transaction::{OutPoint, TransactionData}; -use chain::keysinterface::{SpendableOutputDescriptor, ChannelKeys}; +use chain::keysinterface::{SpendableOutputDescriptor, StaticPaymentOutputDescriptor, DelayedPaymentOutputDescriptor, Sign, KeysInterface}; +use chain::Filter; use util::logger::Logger; -use util::ser::{Readable, MaybeReadable, Writer, Writeable, U48}; +use util::ser::{Readable, ReadableArgs, MaybeReadable, Writer, Writeable, U48}; use util::byte_utils; use util::events::Event; use std::collections::{HashMap, HashSet, hash_map}; use std::{cmp, mem}; -use std::ops::Deref; use std::io::Error; +use std::ops::Deref; +use std::sync::Mutex; /// An update generated by the underlying Channel itself which contains some new information the /// ChannelMonitor should be made aware of. -#[cfg_attr(test, derive(PartialEq))] +#[cfg_attr(any(test, feature = "fuzztarget", feature = "_test_utils"), derive(PartialEq))] #[derive(Clone)] #[must_use] pub struct ChannelMonitorUpdate { pub(crate) updates: Vec, /// The sequence number of this update. Updates *must* be replayed in-order according to this /// sequence number (and updates may panic if they are not). The update_id values are strictly - /// increasing and increase by one for each new update. + /// increasing and increase by one for each new update, with one exception specified below. /// /// This sequence number is also used to track up to which points updates which returned /// ChannelMonitorUpdateErr::TemporaryFailure have been applied to all copies of a given /// ChannelMonitor when ChannelManager::channel_monitor_updated is called. + /// + /// The only instance where update_id values are not strictly increasing is the case where we + /// allow post-force-close updates with a special update ID of [`CLOSED_CHANNEL_UPDATE_ID`]. See + /// its docs for more details. pub update_id: u64, } +/// If: +/// (1) a channel has been force closed and +/// (2) we receive a preimage from a forward link that allows us to spend an HTLC output on +/// this channel's (the backward link's) broadcasted commitment transaction +/// then we allow the `ChannelManager` to send a `ChannelMonitorUpdate` with this update ID, +/// with the update providing said payment preimage. No other update types are allowed after +/// force-close. +pub const CLOSED_CHANNEL_UPDATE_ID: u64 = std::u64::MAX; + impl Writeable for ChannelMonitorUpdate { fn write(&self, w: &mut W) -> Result<(), ::std::io::Error> { self.update_id.write(w)?; @@ -95,7 +109,7 @@ impl Readable for ChannelMonitorUpdate { } /// An error enum representing a failure to persist a channel monitor update. -#[derive(Clone)] +#[derive(Clone, Debug)] pub enum ChannelMonitorUpdateErr { /// Used to indicate a temporary failure (eg connection to a watchtower or remote backup of /// our state failed, but is expected to succeed at some point in the future). @@ -159,12 +173,12 @@ pub enum ChannelMonitorUpdateErr { /// inconsistent with the ChannelMonitor being called. eg for ChannelMonitor::update_monitor this /// means you tried to update a monitor for a different channel or the ChannelMonitorUpdate was /// corrupted. -/// Contains a human-readable error message. -#[derive(Debug)] +/// Contains a developer-readable error message. +#[derive(Clone, Debug)] pub struct MonitorUpdateError(pub &'static str); /// An event to be processed by the ChannelManager. -#[derive(PartialEq)] +#[derive(Clone, PartialEq)] pub enum MonitorEvent { /// A monitor event containing an HTLCUpdate. HTLCEvent(HTLCUpdate), @@ -176,8 +190,6 @@ pub enum MonitorEvent { /// Simple structure sent back by `chain::Watch` when an HTLC from a forward channel is detected on /// chain. Used to update the corresponding HTLC in the backward channel. Failing to pass the /// preimage claim backward will lead to loss of funds. -/// -/// [`chain::Watch`]: ../trait.Watch.html #[derive(Clone, PartialEq)] pub struct HTLCUpdate { pub(crate) payment_hash: PaymentHash, @@ -237,6 +249,7 @@ pub(crate) const ANTI_REORG_DELAY: u32 = 6; /// end up force-closing the channel on us to claim it. pub(crate) const HTLC_FAIL_BACK_BUFFER: u32 = CLTV_CLAIM_BUFFER + LATENCY_GRACE_PERIOD_BLOCKS; +// TODO(devrandom) replace this with HolderCommitmentTransaction #[derive(Clone, PartialEq)] struct HolderSignedTx { /// txid of the transaction in tx, just used to make comparison faster @@ -470,7 +483,7 @@ enum OnchainEvent { const SERIALIZATION_VERSION: u8 = 1; const MIN_SERIALIZATION_VERSION: u8 = 1; -#[cfg_attr(test, derive(PartialEq))] +#[cfg_attr(any(test, feature = "fuzztarget", feature = "_test_utils"), derive(PartialEq))] #[derive(Clone)] pub(crate) enum ChannelMonitorUpdateStep { LatestHolderCommitmentTXInfo { @@ -478,7 +491,7 @@ pub(crate) enum ChannelMonitorUpdateStep { htlc_outputs: Vec<(HTLCOutputInCommitment, Option, Option)>, }, LatestCounterpartyCommitmentTXInfo { - unsigned_commitment_tx: Transaction, // TODO: We should actually only need the txid here + commitment_txid: Txid, htlc_outputs: Vec<(HTLCOutputInCommitment, Option>)>, commitment_number: u64, their_revocation_point: PublicKey, @@ -512,9 +525,9 @@ impl Writeable for ChannelMonitorUpdateStep { source.write(w)?; } } - &ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { ref unsigned_commitment_tx, ref htlc_outputs, ref commitment_number, ref their_revocation_point } => { + &ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { commitment_txid, ref htlc_outputs, ref commitment_number, ref their_revocation_point } => { 1u8.write(w)?; - unsigned_commitment_tx.write(w)?; + commitment_txid.write(w)?; commitment_number.write(w)?; their_revocation_point.write(w)?; (htlc_outputs.len() as u64).write(w)?; @@ -558,7 +571,7 @@ impl Readable for ChannelMonitorUpdateStep { }, 1u8 => { Ok(ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { - unsigned_commitment_tx: Readable::read(r)?, + commitment_txid: Readable::read(r)?, commitment_number: Readable::read(r)?, their_revocation_point: Readable::read(r)?, htlc_outputs: { @@ -602,7 +615,20 @@ impl Readable for ChannelMonitorUpdateStep { /// get_and_clear_pending_monitor_events or get_and_clear_pending_events are serialized to disk and /// reloaded at deserialize-time. Thus, you must ensure that, when handling events, all events /// gotten are fully handled before re-serializing the new state. -pub struct ChannelMonitor { +/// +/// Note that the deserializer is only implemented for (BlockHash, ChannelMonitor), which +/// tells you the last block hash which was block_connect()ed. You MUST rescan any blocks along +/// the "reorg path" (ie disconnecting blocks until you find a common ancestor from both the +/// returned block hash and the the current chain and then reconnecting blocks to get to the +/// best chain) upon deserializing the object! +pub struct ChannelMonitor { + #[cfg(test)] + pub(crate) inner: Mutex>, + #[cfg(not(test))] + inner: Mutex>, +} + +pub(crate) struct ChannelMonitorImpl { latest_update_id: u64, commitment_transaction_number_obscure_factor: u64, @@ -611,7 +637,8 @@ pub struct ChannelMonitor { counterparty_payment_script: Script, shutdown_script: Script, - keys: ChanSigner, + channel_keys_id: [u8; 32], + holder_revocation_basepoint: PublicKey, funding_info: (OutPoint, Script), current_counterparty_commitment_txid: Option, prev_counterparty_commitment_txid: Option, @@ -666,12 +693,12 @@ pub struct ChannelMonitor { // interface knows about the TXOs that we want to be notified of spends of. We could probably // be smart and derive them from the above storage fields, but its much simpler and more // Obviously Correct (tm) if we just keep track of them explicitly. - outputs_to_watch: HashMap>, + outputs_to_watch: HashMap>, #[cfg(test)] - pub onchain_tx_handler: OnchainTxHandler, + pub onchain_tx_handler: OnchainTxHandler, #[cfg(not(test))] - onchain_tx_handler: OnchainTxHandler, + onchain_tx_handler: OnchainTxHandler, // This is set when the Channel[Manager] generated a ChannelMonitorUpdate which indicated the // channel has been force-closed. After this is set, no further holder commitment transaction @@ -696,17 +723,29 @@ pub struct ChannelMonitor { secp_ctx: Secp256k1, //TODO: dedup this a bit... } -#[cfg(any(test, feature = "fuzztarget"))] +#[cfg(any(test, feature = "fuzztarget", feature = "_test_utils"))] +/// Used only in testing and fuzztarget to check serialization roundtrips don't change the +/// underlying object +impl PartialEq for ChannelMonitor { + fn eq(&self, other: &Self) -> bool { + let inner = self.inner.lock().unwrap(); + let other = other.inner.lock().unwrap(); + inner.eq(&other) + } +} + +#[cfg(any(test, feature = "fuzztarget", feature = "_test_utils"))] /// Used only in testing and fuzztarget to check serialization roundtrips don't change the /// underlying object -impl PartialEq for ChannelMonitor { +impl PartialEq for ChannelMonitorImpl { fn eq(&self, other: &Self) -> bool { if self.latest_update_id != other.latest_update_id || self.commitment_transaction_number_obscure_factor != other.commitment_transaction_number_obscure_factor || self.destination_script != other.destination_script || self.broadcasted_holder_revokable_script != other.broadcasted_holder_revokable_script || self.counterparty_payment_script != other.counterparty_payment_script || - self.keys.pubkeys() != other.keys.pubkeys() || + self.channel_keys_id != other.channel_keys_id || + self.holder_revocation_basepoint != other.holder_revocation_basepoint || self.funding_info != other.funding_info || self.current_counterparty_commitment_txid != other.current_counterparty_commitment_txid || self.prev_counterparty_commitment_txid != other.prev_counterparty_commitment_txid || @@ -738,20 +777,19 @@ impl PartialEq for ChannelMonitor { } } -impl ChannelMonitor { - /// Writes this monitor into the given writer, suitable for writing to disk. - /// - /// Note that the deserializer is only implemented for (Sha256dHash, ChannelMonitor), which - /// tells you the last block hash which was block_connect()ed. You MUST rescan any blocks along - /// the "reorg path" (ie disconnecting blocks until you find a common ancestor from both the - /// returned block hash and the the current chain and then reconnecting blocks to get to the - /// best chain) upon deserializing the object! - pub fn write_for_disk(&self, writer: &mut W) -> Result<(), Error> { +impl Writeable for ChannelMonitor { + fn write(&self, writer: &mut W) -> Result<(), Error> { //TODO: We still write out all the serialization here manually instead of using the fancy //serialization framework we have, we should migrate things over to it. writer.write_all(&[SERIALIZATION_VERSION; 1])?; writer.write_all(&[MIN_SERIALIZATION_VERSION; 1])?; + self.inner.lock().unwrap().write(writer) + } +} + +impl Writeable for ChannelMonitorImpl { + fn write(&self, writer: &mut W) -> Result<(), Error> { self.latest_update_id.write(writer)?; // Set in initial Channel-object creation, so should always be set by now: @@ -770,7 +808,8 @@ impl ChannelMonitor { self.counterparty_payment_script.write(writer)?; self.shutdown_script.write(writer)?; - self.keys.write(writer)?; + self.channel_keys_id.write(writer)?; + self.holder_revocation_basepoint.write(writer)?; writer.write_all(&self.funding_info.0.txid[..])?; writer.write_all(&byte_utils::be16_to_array(self.funding_info.0.index))?; self.funding_info.1.write(writer)?; @@ -914,10 +953,11 @@ impl ChannelMonitor { } (self.outputs_to_watch.len() as u64).write(writer)?; - for (txid, output_scripts) in self.outputs_to_watch.iter() { + for (txid, idx_scripts) in self.outputs_to_watch.iter() { txid.write(writer)?; - (output_scripts.len() as u64).write(writer)?; - for script in output_scripts.iter() { + (idx_scripts.len() as u64).write(writer)?; + for (idx, script) in idx_scripts.iter() { + idx.write(writer)?; script.write(writer)?; } } @@ -930,13 +970,14 @@ impl ChannelMonitor { } } -impl ChannelMonitor { - pub(crate) fn new(keys: ChanSigner, shutdown_pubkey: &PublicKey, - on_counterparty_tx_csv: u16, destination_script: &Script, funding_info: (OutPoint, Script), - counterparty_htlc_base_key: &PublicKey, counterparty_delayed_payment_base_key: &PublicKey, - on_holder_tx_csv: u16, funding_redeemscript: Script, channel_value_satoshis: u64, - commitment_transaction_number_obscure_factor: u64, - initial_holder_commitment_tx: HolderCommitmentTransaction) -> ChannelMonitor { +impl ChannelMonitor { + pub(crate) fn new(secp_ctx: Secp256k1, keys: Signer, shutdown_pubkey: &PublicKey, + on_counterparty_tx_csv: u16, destination_script: &Script, funding_info: (OutPoint, Script), + channel_parameters: &ChannelTransactionParameters, + funding_redeemscript: Script, channel_value_satoshis: u64, + commitment_transaction_number_obscure_factor: u64, + initial_holder_commitment_tx: HolderCommitmentTransaction, + last_block_hash: BlockHash) -> ChannelMonitor { assert!(commitment_transaction_number_obscure_factor <= (1 << 48)); let our_channel_close_key_hash = WPubkeyHash::hash(&shutdown_pubkey.serialize()); @@ -944,75 +985,305 @@ impl ChannelMonitor { let payment_key_hash = WPubkeyHash::hash(&keys.pubkeys().payment_point.serialize()); let counterparty_payment_script = Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&payment_key_hash[..]).into_script(); - let counterparty_tx_cache = CounterpartyCommitmentTransaction { counterparty_delayed_payment_base_key: *counterparty_delayed_payment_base_key, counterparty_htlc_base_key: *counterparty_htlc_base_key, on_counterparty_tx_csv, per_htlc: HashMap::new() }; - - let mut onchain_tx_handler = OnchainTxHandler::new(destination_script.clone(), keys.clone(), on_holder_tx_csv); - - let holder_tx_sequence = initial_holder_commitment_tx.unsigned_tx.input[0].sequence as u64; - let holder_tx_locktime = initial_holder_commitment_tx.unsigned_tx.lock_time as u64; - let holder_commitment_tx = HolderSignedTx { - txid: initial_holder_commitment_tx.txid(), - revocation_key: initial_holder_commitment_tx.keys.revocation_key, - a_htlc_key: initial_holder_commitment_tx.keys.broadcaster_htlc_key, - b_htlc_key: initial_holder_commitment_tx.keys.countersignatory_htlc_key, - delayed_payment_key: initial_holder_commitment_tx.keys.broadcaster_delayed_payment_key, - per_commitment_point: initial_holder_commitment_tx.keys.per_commitment_point, - feerate_per_kw: initial_holder_commitment_tx.feerate_per_kw, - htlc_outputs: Vec::new(), // There are never any HTLCs in the initial commitment transactions + let counterparty_channel_parameters = channel_parameters.counterparty_parameters.as_ref().unwrap(); + let counterparty_delayed_payment_base_key = counterparty_channel_parameters.pubkeys.delayed_payment_basepoint; + let counterparty_htlc_base_key = counterparty_channel_parameters.pubkeys.htlc_basepoint; + let counterparty_tx_cache = CounterpartyCommitmentTransaction { counterparty_delayed_payment_base_key, counterparty_htlc_base_key, on_counterparty_tx_csv, per_htlc: HashMap::new() }; + + let channel_keys_id = keys.channel_keys_id(); + let holder_revocation_basepoint = keys.pubkeys().revocation_basepoint; + + // block for Rust 1.34 compat + let (holder_commitment_tx, current_holder_commitment_number) = { + let trusted_tx = initial_holder_commitment_tx.trust(); + let txid = trusted_tx.txid(); + + let tx_keys = trusted_tx.keys(); + let holder_commitment_tx = HolderSignedTx { + txid, + revocation_key: tx_keys.revocation_key, + a_htlc_key: tx_keys.broadcaster_htlc_key, + b_htlc_key: tx_keys.countersignatory_htlc_key, + delayed_payment_key: tx_keys.broadcaster_delayed_payment_key, + per_commitment_point: tx_keys.per_commitment_point, + feerate_per_kw: trusted_tx.feerate_per_kw(), + htlc_outputs: Vec::new(), // There are never any HTLCs in the initial commitment transactions + }; + (holder_commitment_tx, trusted_tx.commitment_number()) }; - onchain_tx_handler.provide_latest_holder_tx(initial_holder_commitment_tx); + + let onchain_tx_handler = + OnchainTxHandler::new(destination_script.clone(), keys, + channel_parameters.clone(), initial_holder_commitment_tx, secp_ctx.clone()); let mut outputs_to_watch = HashMap::new(); - outputs_to_watch.insert(funding_info.0.txid, vec![funding_info.1.clone()]); + outputs_to_watch.insert(funding_info.0.txid, vec![(funding_info.0.index as u32, funding_info.1.clone())]); ChannelMonitor { - latest_update_id: 0, - commitment_transaction_number_obscure_factor, + inner: Mutex::new(ChannelMonitorImpl { + latest_update_id: 0, + commitment_transaction_number_obscure_factor, + + destination_script: destination_script.clone(), + broadcasted_holder_revokable_script: None, + counterparty_payment_script, + shutdown_script, + + channel_keys_id, + holder_revocation_basepoint, + funding_info, + current_counterparty_commitment_txid: None, + prev_counterparty_commitment_txid: None, - destination_script: destination_script.clone(), - broadcasted_holder_revokable_script: None, - counterparty_payment_script, - shutdown_script, + counterparty_tx_cache, + funding_redeemscript, + channel_value_satoshis, + their_cur_revocation_points: None, - keys, - funding_info, - current_counterparty_commitment_txid: None, - prev_counterparty_commitment_txid: None, + on_holder_tx_csv: counterparty_channel_parameters.selected_contest_delay, - counterparty_tx_cache, - funding_redeemscript, - channel_value_satoshis: channel_value_satoshis, - their_cur_revocation_points: None, + commitment_secrets: CounterpartyCommitmentSecrets::new(), + counterparty_claimable_outpoints: HashMap::new(), + counterparty_commitment_txn_on_chain: HashMap::new(), + counterparty_hash_commitment_number: HashMap::new(), - on_holder_tx_csv, + prev_holder_signed_commitment_tx: None, + current_holder_commitment_tx: holder_commitment_tx, + current_counterparty_commitment_number: 1 << 48, + current_holder_commitment_number, - commitment_secrets: CounterpartyCommitmentSecrets::new(), - counterparty_claimable_outpoints: HashMap::new(), - counterparty_commitment_txn_on_chain: HashMap::new(), - counterparty_hash_commitment_number: HashMap::new(), + payment_preimages: HashMap::new(), + pending_monitor_events: Vec::new(), + pending_events: Vec::new(), - prev_holder_signed_commitment_tx: None, - current_holder_commitment_tx: holder_commitment_tx, - current_counterparty_commitment_number: 1 << 48, - current_holder_commitment_number: 0xffff_ffff_ffff - ((((holder_tx_sequence & 0xffffff) << 3*8) | (holder_tx_locktime as u64 & 0xffffff)) ^ commitment_transaction_number_obscure_factor), + onchain_events_waiting_threshold_conf: HashMap::new(), + outputs_to_watch, - payment_preimages: HashMap::new(), - pending_monitor_events: Vec::new(), - pending_events: Vec::new(), + onchain_tx_handler, + + lockdown_from_offchain: false, + holder_tx_signed: false, + + last_block_hash, + secp_ctx, + }), + } + } + + #[cfg(test)] + fn provide_secret(&self, idx: u64, secret: [u8; 32]) -> Result<(), MonitorUpdateError> { + self.inner.lock().unwrap().provide_secret(idx, secret) + } + + /// Informs this monitor of the latest counterparty (ie non-broadcastable) commitment transaction. + /// The monitor watches for it to be broadcasted and then uses the HTLC information (and + /// possibly future revocation/preimage information) to claim outputs where possible. + /// We cache also the mapping hash:commitment number to lighten pruning of old preimages by watchtowers. + pub(crate) fn provide_latest_counterparty_commitment_tx( + &self, + txid: Txid, + htlc_outputs: Vec<(HTLCOutputInCommitment, Option>)>, + commitment_number: u64, + their_revocation_point: PublicKey, + logger: &L, + ) where L::Target: Logger { + self.inner.lock().unwrap().provide_latest_counterparty_commitment_tx( + txid, htlc_outputs, commitment_number, their_revocation_point, logger) + } + + #[cfg(test)] + fn provide_latest_holder_commitment_tx( + &self, + holder_commitment_tx: HolderCommitmentTransaction, + htlc_outputs: Vec<(HTLCOutputInCommitment, Option, Option)>, + ) -> Result<(), MonitorUpdateError> { + self.inner.lock().unwrap().provide_latest_holder_commitment_tx( + holder_commitment_tx, htlc_outputs) + } + + #[cfg(test)] + pub(crate) fn provide_payment_preimage( + &self, + payment_hash: &PaymentHash, + payment_preimage: &PaymentPreimage, + broadcaster: &B, + fee_estimator: &F, + logger: &L, + ) where + B::Target: BroadcasterInterface, + F::Target: FeeEstimator, + L::Target: Logger, + { + self.inner.lock().unwrap().provide_payment_preimage( + payment_hash, payment_preimage, broadcaster, fee_estimator, logger) + } + + pub(crate) fn broadcast_latest_holder_commitment_txn( + &self, + broadcaster: &B, + logger: &L, + ) where + B::Target: BroadcasterInterface, + L::Target: Logger, + { + self.inner.lock().unwrap().broadcast_latest_holder_commitment_txn(broadcaster, logger) + } + + /// Updates a ChannelMonitor on the basis of some new information provided by the Channel + /// itself. + /// + /// panics if the given update is not the next update by update_id. + pub fn update_monitor( + &self, + updates: &ChannelMonitorUpdate, + broadcaster: &B, + fee_estimator: &F, + logger: &L, + ) -> Result<(), MonitorUpdateError> + where + B::Target: BroadcasterInterface, + F::Target: FeeEstimator, + L::Target: Logger, + { + self.inner.lock().unwrap().update_monitor(updates, broadcaster, fee_estimator, logger) + } - onchain_events_waiting_threshold_conf: HashMap::new(), - outputs_to_watch, + /// Gets the update_id from the latest ChannelMonitorUpdate which was applied to this + /// ChannelMonitor. + pub fn get_latest_update_id(&self) -> u64 { + self.inner.lock().unwrap().get_latest_update_id() + } - onchain_tx_handler, + /// Gets the funding transaction outpoint of the channel this ChannelMonitor is monitoring for. + pub fn get_funding_txo(&self) -> (OutPoint, Script) { + self.inner.lock().unwrap().get_funding_txo().clone() + } - lockdown_from_offchain: false, - holder_tx_signed: false, + /// Gets a list of txids, with their output scripts (in the order they appear in the + /// transaction), which we must learn about spends of via block_connected(). + pub fn get_outputs_to_watch(&self) -> Vec<(Txid, Vec<(u32, Script)>)> { + self.inner.lock().unwrap().get_outputs_to_watch() + .iter().map(|(txid, outputs)| (*txid, outputs.clone())).collect() + } - last_block_hash: Default::default(), - secp_ctx: Secp256k1::new(), + /// Loads the funding txo and outputs to watch into the given `chain::Filter` by repeatedly + /// calling `chain::Filter::register_output` and `chain::Filter::register_tx` until all outputs + /// have been registered. + pub fn load_outputs_to_watch(&self, filter: &F) where F::Target: chain::Filter { + let lock = self.inner.lock().unwrap(); + filter.register_tx(&lock.get_funding_txo().0.txid, &lock.get_funding_txo().1); + for (txid, outputs) in lock.get_outputs_to_watch().iter() { + for (index, script_pubkey) in outputs.iter() { + assert!(*index <= u16::max_value() as u32); + filter.register_output(WatchedOutput { + block_hash: None, + outpoint: OutPoint { txid: *txid, index: *index as u16 }, + script_pubkey: script_pubkey.clone(), + }); + } } } + /// Get the list of HTLCs who's status has been updated on chain. This should be called by + /// ChannelManager via [`chain::Watch::release_pending_monitor_events`]. + pub fn get_and_clear_pending_monitor_events(&self) -> Vec { + self.inner.lock().unwrap().get_and_clear_pending_monitor_events() + } + + /// Gets the list of pending events which were generated by previous actions, clearing the list + /// in the process. + /// + /// This is called by ChainMonitor::get_and_clear_pending_events() and is equivalent to + /// EventsProvider::get_and_clear_pending_events() except that it requires &mut self as we do + /// no internal locking in ChannelMonitors. + pub fn get_and_clear_pending_events(&self) -> Vec { + self.inner.lock().unwrap().get_and_clear_pending_events() + } + + pub(crate) fn get_min_seen_secret(&self) -> u64 { + self.inner.lock().unwrap().get_min_seen_secret() + } + + pub(crate) fn get_cur_counterparty_commitment_number(&self) -> u64 { + self.inner.lock().unwrap().get_cur_counterparty_commitment_number() + } + + pub(crate) fn get_cur_holder_commitment_number(&self) -> u64 { + self.inner.lock().unwrap().get_cur_holder_commitment_number() + } + + /// Used by ChannelManager deserialization to broadcast the latest holder state if its copy of + /// the Channel was out-of-date. You may use it to get a broadcastable holder toxic tx in case of + /// fallen-behind, i.e when receiving a channel_reestablish with a proof that our counterparty side knows + /// a higher revocation secret than the holder commitment number we are aware of. Broadcasting these + /// transactions are UNSAFE, as they allow counterparty side to punish you. Nevertheless you may want to + /// broadcast them if counterparty don't close channel with his higher commitment transaction after a + /// substantial amount of time (a month or even a year) to get back funds. Best may be to contact + /// out-of-band the other node operator to coordinate with him if option is available to you. + /// In any-case, choice is up to the user. + pub fn get_latest_holder_commitment_txn(&self, logger: &L) -> Vec + where L::Target: Logger { + self.inner.lock().unwrap().get_latest_holder_commitment_txn(logger) + } + + /// Unsafe test-only version of get_latest_holder_commitment_txn used by our test framework + /// to bypass HolderCommitmentTransaction state update lockdown after signature and generate + /// revoked commitment transaction. + #[cfg(any(test, feature = "unsafe_revoked_tx_signing"))] + pub fn unsafe_get_latest_holder_commitment_txn(&self, logger: &L) -> Vec + where L::Target: Logger { + self.inner.lock().unwrap().unsafe_get_latest_holder_commitment_txn(logger) + } + + /// Processes transactions in a newly connected block, which may result in any of the following: + /// - update the monitor's state against resolved HTLCs + /// - punish the counterparty in the case of seeing a revoked commitment transaction + /// - force close the channel and claim/timeout incoming/outgoing HTLCs if near expiration + /// - detect settled outputs for later spending + /// - schedule and bump any in-flight claims + /// + /// Returns any new outputs to watch from `txdata`; after called, these are also included in + /// [`get_outputs_to_watch`]. + /// + /// [`get_outputs_to_watch`]: #method.get_outputs_to_watch + pub fn block_connected( + &self, + header: &BlockHeader, + txdata: &TransactionData, + height: u32, + broadcaster: B, + fee_estimator: F, + logger: L, + ) -> Vec<(Txid, Vec<(u32, TxOut)>)> + where + B::Target: BroadcasterInterface, + F::Target: FeeEstimator, + L::Target: Logger, + { + self.inner.lock().unwrap().block_connected( + header, txdata, height, broadcaster, fee_estimator, logger) + } + + /// Determines if the disconnected block contained any transactions of interest and updates + /// appropriately. + pub fn block_disconnected( + &self, + header: &BlockHeader, + height: u32, + broadcaster: B, + fee_estimator: F, + logger: L, + ) where + B::Target: BroadcasterInterface, + F::Target: FeeEstimator, + L::Target: Logger, + { + self.inner.lock().unwrap().block_disconnected( + header, height, broadcaster, fee_estimator, logger) + } +} + +impl ChannelMonitorImpl { /// Inserts a revocation secret into this channel monitor. Prunes old preimages if neither /// needed by holder commitment transactions HTCLs nor by counterparty ones. Unless we haven't already seen /// counterparty commitment transaction's secret, they are de facto pruned (we can use revocation key). @@ -1064,11 +1335,7 @@ impl ChannelMonitor { Ok(()) } - /// Informs this monitor of the latest counterparty (ie non-broadcastable) commitment transaction. - /// The monitor watches for it to be broadcasted and then uses the HTLC information (and - /// possibly future revocation/preimage information) to claim outputs where possible. - /// We cache also the mapping hash:commitment number to lighten pruning of old preimages by watchtowers. - pub(crate) fn provide_latest_counterparty_commitment_tx_info(&mut self, unsigned_commitment_tx: &Transaction, htlc_outputs: Vec<(HTLCOutputInCommitment, Option>)>, commitment_number: u64, their_revocation_point: PublicKey, logger: &L) where L::Target: Logger { + pub(crate) fn provide_latest_counterparty_commitment_tx(&mut self, txid: Txid, htlc_outputs: Vec<(HTLCOutputInCommitment, Option>)>, commitment_number: u64, their_revocation_point: PublicKey, logger: &L) where L::Target: Logger { // TODO: Encrypt the htlc_outputs data with the single-hash of the commitment transaction // so that a remote monitor doesn't learn anything unless there is a malicious close. // (only maybe, sadly we cant do the same for local info, as we need to be aware of @@ -1077,12 +1344,10 @@ impl ChannelMonitor { self.counterparty_hash_commitment_number.insert(htlc.payment_hash, commitment_number); } - let new_txid = unsigned_commitment_tx.txid(); - log_trace!(logger, "Tracking new counterparty commitment transaction with txid {} at commitment number {} with {} HTLC outputs", new_txid, commitment_number, htlc_outputs.len()); - log_trace!(logger, "New potential counterparty commitment transaction: {}", encode::serialize_hex(unsigned_commitment_tx)); + log_trace!(logger, "Tracking new counterparty commitment transaction with txid {} at commitment number {} with {} HTLC outputs", txid, commitment_number, htlc_outputs.len()); self.prev_counterparty_commitment_txid = self.current_counterparty_commitment_txid.take(); - self.current_counterparty_commitment_txid = Some(new_txid); - self.counterparty_claimable_outpoints.insert(new_txid, htlc_outputs.clone()); + self.current_counterparty_commitment_txid = Some(txid); + self.counterparty_claimable_outpoints.insert(txid, htlc_outputs.clone()); self.current_counterparty_commitment_number = commitment_number; //TODO: Merge this into the other per-counterparty-transaction output storage stuff match self.their_cur_revocation_points { @@ -1109,7 +1374,7 @@ impl ChannelMonitor { htlcs.push(htlc.0); } } - self.counterparty_tx_cache.per_htlc.insert(new_txid, htlcs); + self.counterparty_tx_cache.per_htlc.insert(txid, htlcs); } /// Informs this monitor of the latest holder (ie broadcastable) commitment transaction. The @@ -1117,22 +1382,25 @@ impl ChannelMonitor { /// is important that any clones of this channel monitor (including remote clones) by kept /// up-to-date as our holder commitment transaction is updated. /// Panics if set_on_holder_tx_csv has never been called. - fn provide_latest_holder_commitment_tx_info(&mut self, commitment_tx: HolderCommitmentTransaction, htlc_outputs: Vec<(HTLCOutputInCommitment, Option, Option)>) -> Result<(), MonitorUpdateError> { - let txid = commitment_tx.txid(); - let sequence = commitment_tx.unsigned_tx.input[0].sequence as u64; - let locktime = commitment_tx.unsigned_tx.lock_time as u64; - let mut new_holder_commitment_tx = HolderSignedTx { - txid, - revocation_key: commitment_tx.keys.revocation_key, - a_htlc_key: commitment_tx.keys.broadcaster_htlc_key, - b_htlc_key: commitment_tx.keys.countersignatory_htlc_key, - delayed_payment_key: commitment_tx.keys.broadcaster_delayed_payment_key, - per_commitment_point: commitment_tx.keys.per_commitment_point, - feerate_per_kw: commitment_tx.feerate_per_kw, - htlc_outputs: htlc_outputs, + fn provide_latest_holder_commitment_tx(&mut self, holder_commitment_tx: HolderCommitmentTransaction, htlc_outputs: Vec<(HTLCOutputInCommitment, Option, Option)>) -> Result<(), MonitorUpdateError> { + // block for Rust 1.34 compat + let mut new_holder_commitment_tx = { + let trusted_tx = holder_commitment_tx.trust(); + let txid = trusted_tx.txid(); + let tx_keys = trusted_tx.keys(); + self.current_holder_commitment_number = trusted_tx.commitment_number(); + HolderSignedTx { + txid, + revocation_key: tx_keys.revocation_key, + a_htlc_key: tx_keys.broadcaster_htlc_key, + b_htlc_key: tx_keys.countersignatory_htlc_key, + delayed_payment_key: tx_keys.broadcaster_delayed_payment_key, + per_commitment_point: tx_keys.per_commitment_point, + feerate_per_kw: trusted_tx.feerate_per_kw(), + htlc_outputs, + } }; - self.onchain_tx_handler.provide_latest_holder_tx(commitment_tx); - self.current_holder_commitment_number = 0xffff_ffff_ffff - ((((sequence & 0xffffff) << 3*8) | (locktime as u64 & 0xffffff)) ^ self.commitment_transaction_number_obscure_factor); + self.onchain_tx_handler.provide_latest_holder_tx(holder_commitment_tx); mem::swap(&mut new_holder_commitment_tx, &mut self.current_holder_commitment_tx); self.prev_holder_signed_commitment_tx = Some(new_holder_commitment_tx); if self.holder_tx_signed { @@ -1143,8 +1411,47 @@ impl ChannelMonitor { /// Provides a payment_hash->payment_preimage mapping. Will be automatically pruned when all /// commitment_tx_infos which contain the payment hash have been revoked. - pub(crate) fn provide_payment_preimage(&mut self, payment_hash: &PaymentHash, payment_preimage: &PaymentPreimage) { + fn provide_payment_preimage(&mut self, payment_hash: &PaymentHash, payment_preimage: &PaymentPreimage, broadcaster: &B, fee_estimator: &F, logger: &L) + where B::Target: BroadcasterInterface, + F::Target: FeeEstimator, + L::Target: Logger, + { self.payment_preimages.insert(payment_hash.clone(), payment_preimage.clone()); + + // If the channel is force closed, try to claim the output from this preimage. + // First check if a counterparty commitment transaction has been broadcasted: + macro_rules! claim_htlcs { + ($commitment_number: expr, $txid: expr) => { + let htlc_claim_reqs = self.get_counterparty_htlc_output_claim_reqs($commitment_number, $txid, None); + self.onchain_tx_handler.update_claims_view(&Vec::new(), htlc_claim_reqs, None, broadcaster, fee_estimator, logger); + } + } + if let Some(txid) = self.current_counterparty_commitment_txid { + if let Some(commitment_number) = self.counterparty_commitment_txn_on_chain.get(&txid) { + claim_htlcs!(*commitment_number, txid); + return; + } + } + if let Some(txid) = self.prev_counterparty_commitment_txid { + if let Some(commitment_number) = self.counterparty_commitment_txn_on_chain.get(&txid) { + claim_htlcs!(*commitment_number, txid); + return; + } + } + + // Then if a holder commitment transaction has been seen on-chain, broadcast transactions + // claiming the HTLC output from each of the holder commitment transactions. + // Note that we can't just use `self.holder_tx_signed`, because that only covers the case where + // *we* sign a holder commitment transaction, not when e.g. a watchtower broadcasts one of our + // holder commitment transactions. + if self.broadcasted_holder_revokable_script.is_some() { + let (claim_reqs, _) = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx); + self.onchain_tx_handler.update_claims_view(&Vec::new(), claim_reqs, None, broadcaster, fee_estimator, logger); + if let Some(ref tx) = self.prev_holder_signed_commitment_tx { + let (claim_reqs, _) = self.get_broadcasted_holder_claims(&tx); + self.onchain_tx_handler.update_claims_view(&Vec::new(), claim_reqs, None, broadcaster, fee_estimator, logger); + } + } } pub(crate) fn broadcast_latest_holder_commitment_txn(&mut self, broadcaster: &B, logger: &L) @@ -1157,35 +1464,55 @@ impl ChannelMonitor { self.pending_monitor_events.push(MonitorEvent::CommitmentTxBroadcasted(self.funding_info.0)); } - /// Updates a ChannelMonitor on the basis of some new information provided by the Channel - /// itself. - /// - /// panics if the given update is not the next update by update_id. - pub fn update_monitor(&mut self, mut updates: ChannelMonitorUpdate, broadcaster: &B, logger: &L) -> Result<(), MonitorUpdateError> - where B::Target: BroadcasterInterface, - L::Target: Logger, + pub fn update_monitor(&mut self, updates: &ChannelMonitorUpdate, broadcaster: &B, fee_estimator: &F, logger: &L) -> Result<(), MonitorUpdateError> + where B::Target: BroadcasterInterface, + F::Target: FeeEstimator, + L::Target: Logger, { - if self.latest_update_id + 1 != updates.update_id { + // ChannelMonitor updates may be applied after force close if we receive a + // preimage for a broadcasted commitment transaction HTLC output that we'd + // like to claim on-chain. If this is the case, we no longer have guaranteed + // access to the monitor's update ID, so we use a sentinel value instead. + if updates.update_id == CLOSED_CHANNEL_UPDATE_ID { + match updates.updates[0] { + ChannelMonitorUpdateStep::PaymentPreimage { .. } => {}, + _ => panic!("Attempted to apply post-force-close ChannelMonitorUpdate that wasn't providing a payment preimage"), + } + assert_eq!(updates.updates.len(), 1); + } else if self.latest_update_id + 1 != updates.update_id { panic!("Attempted to apply ChannelMonitorUpdates out of order, check the update_id before passing an update to update_monitor!"); } - for update in updates.updates.drain(..) { + for update in updates.updates.iter() { match update { ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo { commitment_tx, htlc_outputs } => { + log_trace!(logger, "Updating ChannelMonitor with latest holder commitment transaction info"); if self.lockdown_from_offchain { panic!(); } - self.provide_latest_holder_commitment_tx_info(commitment_tx, htlc_outputs)? + self.provide_latest_holder_commitment_tx(commitment_tx.clone(), htlc_outputs.clone())? + } + ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { commitment_txid, htlc_outputs, commitment_number, their_revocation_point } => { + log_trace!(logger, "Updating ChannelMonitor with latest counterparty commitment transaction info"); + self.provide_latest_counterparty_commitment_tx(*commitment_txid, htlc_outputs.clone(), *commitment_number, *their_revocation_point, logger) + }, + ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage } => { + log_trace!(logger, "Updating ChannelMonitor with payment preimage"); + self.provide_payment_preimage(&PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner()), &payment_preimage, broadcaster, fee_estimator, logger) + }, + ChannelMonitorUpdateStep::CommitmentSecret { idx, secret } => { + log_trace!(logger, "Updating ChannelMonitor with commitment secret"); + self.provide_secret(*idx, *secret)? }, - ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { unsigned_commitment_tx, htlc_outputs, commitment_number, their_revocation_point } => - self.provide_latest_counterparty_commitment_tx_info(&unsigned_commitment_tx, htlc_outputs, commitment_number, their_revocation_point, logger), - ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage } => - self.provide_payment_preimage(&PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner()), &payment_preimage), - ChannelMonitorUpdateStep::CommitmentSecret { idx, secret } => - self.provide_secret(idx, secret)?, ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } => { + log_trace!(logger, "Updating ChannelMonitor: channel force closed, should broadcast: {}", should_broadcast); self.lockdown_from_offchain = true; - if should_broadcast { + if *should_broadcast { self.broadcast_latest_holder_commitment_txn(broadcaster, logger); - } else { + } else if !self.holder_tx_signed { log_error!(logger, "You have a toxic holder commitment transaction avaible in channel monitor, read comment in ChannelMonitor::get_latest_holder_commitment_txn to be informed of manual action to take"); + } else { + // If we generated a MonitorEvent::CommitmentTxBroadcasted, the ChannelManager + // will still give us a ChannelForceClosed event with !should_broadcast, but we + // shouldn't print the scary warning above. + log_info!(logger, "Channel off-chain state closed after we broadcasted our latest commitment transaction."); } } } @@ -1194,22 +1521,15 @@ impl ChannelMonitor { Ok(()) } - /// Gets the update_id from the latest ChannelMonitorUpdate which was applied to this - /// ChannelMonitor. pub fn get_latest_update_id(&self) -> u64 { self.latest_update_id } - /// Gets the funding transaction outpoint of the channel this ChannelMonitor is monitoring for. pub fn get_funding_txo(&self) -> &(OutPoint, Script) { &self.funding_info } - /// Gets a list of txids, with their output scripts (in the order they appear in the - /// transaction), which we must learn about spends of via block_connected(). - /// - /// (C-not exported) because we have no HashMap bindings - pub fn get_outputs_to_watch(&self) -> &HashMap> { + pub fn get_outputs_to_watch(&self) -> &HashMap> { // If we've detected a counterparty commitment tx on chain, we must include it in the set // of outputs to watch for spends of, otherwise we're likely to lose user funds. Because // its trivial to do, double-check that here. @@ -1219,22 +1539,12 @@ impl ChannelMonitor { &self.outputs_to_watch } - /// Get the list of HTLCs who's status has been updated on chain. This should be called by - /// ChannelManager via [`chain::Watch::release_pending_monitor_events`]. - /// - /// [`chain::Watch::release_pending_monitor_events`]: ../trait.Watch.html#tymethod.release_pending_monitor_events pub fn get_and_clear_pending_monitor_events(&mut self) -> Vec { let mut ret = Vec::new(); mem::swap(&mut ret, &mut self.pending_monitor_events); ret } - /// Gets the list of pending events which were generated by previous actions, clearing the list - /// in the process. - /// - /// This is called by ChainMonitor::get_and_clear_pending_events() and is equivalent to - /// EventsProvider::get_and_clear_pending_events() except that it requires &mut self as we do - /// no internal locking in ChannelMonitors. pub fn get_and_clear_pending_events(&mut self) -> Vec { let mut ret = Vec::new(); mem::swap(&mut ret, &mut self.pending_events); @@ -1264,7 +1574,7 @@ impl ChannelMonitor { /// HTLC-Success/HTLC-Timeout transactions. /// Return updates for HTLC pending in the channel and failed automatically by the broadcast of /// revoked counterparty commitment tx - fn check_spend_counterparty_transaction(&mut self, tx: &Transaction, height: u32, logger: &L) -> (Vec, (Txid, Vec)) where L::Target: Logger { + fn check_spend_counterparty_transaction(&mut self, tx: &Transaction, height: u32, logger: &L) -> (Vec, (Txid, Vec<(u32, TxOut)>)) where L::Target: Logger { // Most secp and related errors trying to create keys means we have no hope of constructing // a spend transaction...so we return no transactions to broadcast let mut claimable_outpoints = Vec::new(); @@ -1287,7 +1597,7 @@ impl ChannelMonitor { let secret = self.get_secret(commitment_number).unwrap(); let per_commitment_key = ignore_error!(SecretKey::from_slice(&secret)); let per_commitment_point = PublicKey::from_secret_key(&self.secp_ctx, &per_commitment_key); - let revocation_pubkey = ignore_error!(chan_utils::derive_public_revocation_key(&self.secp_ctx, &per_commitment_point, &self.keys.pubkeys().revocation_basepoint)); + let revocation_pubkey = ignore_error!(chan_utils::derive_public_revocation_key(&self.secp_ctx, &per_commitment_point, &self.holder_revocation_basepoint)); let delayed_key = ignore_error!(chan_utils::derive_public_key(&self.secp_ctx, &PublicKey::from_secret_key(&self.secp_ctx, &per_commitment_key), &self.counterparty_tx_cache.counterparty_delayed_payment_base_key)); let revokeable_redeemscript = chan_utils::get_revokeable_redeemscript(&revocation_pubkey, self.counterparty_tx_cache.on_counterparty_tx_csv, &delayed_key); @@ -1319,7 +1629,9 @@ impl ChannelMonitor { if !claimable_outpoints.is_empty() || per_commitment_option.is_some() { // ie we're confident this is actually ours // We're definitely a counterparty commitment transaction! log_trace!(logger, "Got broadcast of revoked counterparty commitment transaction, going to generate general spend tx with {} inputs", claimable_outpoints.len()); - watch_outputs.append(&mut tx.output.clone()); + for (idx, outp) in tx.output.iter().enumerate() { + watch_outputs.push((idx as u32, outp.clone())); + } self.counterparty_commitment_txn_on_chain.insert(commitment_txid, commitment_number); macro_rules! check_htlc_fails { @@ -1366,7 +1678,9 @@ impl ChannelMonitor { // already processed the block, resulting in the counterparty_commitment_txn_on_chain entry // not being generated by the above conditional. Thus, to be safe, we go ahead and // insert it here. - watch_outputs.append(&mut tx.output.clone()); + for (idx, outp) in tx.output.iter().enumerate() { + watch_outputs.push((idx as u32, outp.clone())); + } self.counterparty_commitment_txn_on_chain.insert(commitment_txid, commitment_number); log_trace!(logger, "Got broadcast of non-revoked counterparty commitment transaction {}", commitment_txid); @@ -1420,43 +1734,59 @@ impl ChannelMonitor { check_htlc_fails!(txid, "previous", 'prev_loop); } + let htlc_claim_reqs = self.get_counterparty_htlc_output_claim_reqs(commitment_number, commitment_txid, Some(tx)); + for req in htlc_claim_reqs { + claimable_outpoints.push(req); + } + + } + (claimable_outpoints, (commitment_txid, watch_outputs)) + } + + fn get_counterparty_htlc_output_claim_reqs(&self, commitment_number: u64, commitment_txid: Txid, tx: Option<&Transaction>) -> Vec { + let mut claims = Vec::new(); + if let Some(htlc_outputs) = self.counterparty_claimable_outpoints.get(&commitment_txid) { if let Some(revocation_points) = self.their_cur_revocation_points { let revocation_point_option = + // If the counterparty commitment tx is the latest valid state, use their latest + // per-commitment point if revocation_points.0 == commitment_number { Some(&revocation_points.1) } else if let Some(point) = revocation_points.2.as_ref() { + // If counterparty commitment tx is the state previous to the latest valid state, use + // their previous per-commitment point (non-atomicity of revocation means it's valid for + // them to temporarily have two valid commitment txns from our viewpoint) if revocation_points.0 == commitment_number + 1 { Some(point) } else { None } } else { None }; if let Some(revocation_point) = revocation_point_option { - self.counterparty_payment_script = { - // Note that the Network here is ignored as we immediately drop the address for the - // script_pubkey version - let payment_hash160 = WPubkeyHash::hash(&self.keys.pubkeys().payment_point.serialize()); - Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&payment_hash160[..]).into_script() - }; - - // Then, try to find htlc outputs - for (_, &(ref htlc, _)) in per_commitment_data.iter().enumerate() { + for (_, &(ref htlc, _)) in htlc_outputs.iter().enumerate() { if let Some(transaction_output_index) = htlc.transaction_output_index { - if transaction_output_index as usize >= tx.output.len() || - tx.output[transaction_output_index as usize].value != htlc.amount_msat / 1000 { - return (claimable_outpoints, (commitment_txid, watch_outputs)); // Corrupted per_commitment_data, fuck this user + if let Some(transaction) = tx { + if transaction_output_index as usize >= transaction.output.len() || + transaction.output[transaction_output_index as usize].value != htlc.amount_msat / 1000 { + return claims; // Corrupted per_commitment_data, fuck this user + } } - let preimage = if htlc.offered { if let Some(p) = self.payment_preimages.get(&htlc.payment_hash) { Some(*p) } else { None } } else { None }; + let preimage = + if htlc.offered { + if let Some(p) = self.payment_preimages.get(&htlc.payment_hash) { + Some(*p) + } else { None } + } else { None }; let aggregable = if !htlc.offered { false } else { true }; if preimage.is_some() || !htlc.offered { let witness_data = InputMaterial::CounterpartyHTLC { per_commitment_point: *revocation_point, counterparty_delayed_payment_base_key: self.counterparty_tx_cache.counterparty_delayed_payment_base_key, counterparty_htlc_base_key: self.counterparty_tx_cache.counterparty_htlc_base_key, preimage, htlc: htlc.clone() }; - claimable_outpoints.push(ClaimRequest { absolute_timelock: htlc.cltv_expiry, aggregable, outpoint: BitcoinOutPoint { txid: commitment_txid, vout: transaction_output_index }, witness_data }); + claims.push(ClaimRequest { absolute_timelock: htlc.cltv_expiry, aggregable, outpoint: BitcoinOutPoint { txid: commitment_txid, vout: transaction_output_index }, witness_data }); } } } } } } - (claimable_outpoints, (commitment_txid, watch_outputs)) + claims } /// Attempts to claim a counterparty HTLC-Success/HTLC-Timeout's outputs using the revocation key - fn check_spend_counterparty_htlc(&mut self, tx: &Transaction, commitment_number: u64, height: u32, logger: &L) -> (Vec, Option<(Txid, Vec)>) where L::Target: Logger { + fn check_spend_counterparty_htlc(&mut self, tx: &Transaction, commitment_number: u64, height: u32, logger: &L) -> (Vec, Option<(Txid, Vec<(u32, TxOut)>)>) where L::Target: Logger { let htlc_txid = tx.txid(); if tx.input.len() != 1 || tx.output.len() != 1 || tx.input[0].witness.len() != 5 { return (Vec::new(), None) @@ -1478,12 +1808,15 @@ impl ChannelMonitor { log_trace!(logger, "Counterparty HTLC broadcast {}:{}", htlc_txid, 0); let witness_data = InputMaterial::Revoked { per_commitment_point, counterparty_delayed_payment_base_key: self.counterparty_tx_cache.counterparty_delayed_payment_base_key, counterparty_htlc_base_key: self.counterparty_tx_cache.counterparty_htlc_base_key, per_commitment_key, input_descriptor: InputDescriptors::RevokedOutput, amount: tx.output[0].value, htlc: None, on_counterparty_tx_csv: self.counterparty_tx_cache.on_counterparty_tx_csv }; let claimable_outpoints = vec!(ClaimRequest { absolute_timelock: height + self.counterparty_tx_cache.on_counterparty_tx_csv as u32, aggregable: true, outpoint: BitcoinOutPoint { txid: htlc_txid, vout: 0}, witness_data }); - (claimable_outpoints, Some((htlc_txid, tx.output.clone()))) + let outputs = vec![(0, tx.output[0].clone())]; + (claimable_outpoints, Some((htlc_txid, outputs))) } - fn broadcast_by_holder_state(&self, commitment_tx: &Transaction, holder_tx: &HolderSignedTx) -> (Vec, Vec, Option<(Script, PublicKey, PublicKey)>) { + // Returns (1) `ClaimRequest`s that can be given to the OnChainTxHandler, so that the handler can + // broadcast transactions claiming holder HTLC commitment outputs and (2) a holder revokable + // script so we can detect whether a holder transaction has been seen on-chain. + fn get_broadcasted_holder_claims(&self, holder_tx: &HolderSignedTx) -> (Vec, Option<(Script, PublicKey, PublicKey)>) { let mut claim_requests = Vec::with_capacity(holder_tx.htlc_outputs.len()); - let mut watch_outputs = Vec::with_capacity(holder_tx.htlc_outputs.len()); let redeemscript = chan_utils::get_revokeable_redeemscript(&holder_tx.revocation_key, self.on_holder_tx_csv, &holder_tx.delayed_payment_key); let broadcasted_holder_revokable_script = Some((redeemscript.to_v0_p2wsh(), holder_tx.per_commitment_point.clone(), holder_tx.revocation_key.clone())); @@ -1502,17 +1835,27 @@ impl ChannelMonitor { } else { None }, amount: htlc.amount_msat, }}); - watch_outputs.push(commitment_tx.output[transaction_output_index as usize].clone()); } } - (claim_requests, watch_outputs, broadcasted_holder_revokable_script) + (claim_requests, broadcasted_holder_revokable_script) + } + + // Returns holder HTLC outputs to watch and react to in case of spending. + fn get_broadcasted_holder_watch_outputs(&self, holder_tx: &HolderSignedTx, commitment_tx: &Transaction) -> Vec<(u32, TxOut)> { + let mut watch_outputs = Vec::with_capacity(holder_tx.htlc_outputs.len()); + for &(ref htlc, _, _) in holder_tx.htlc_outputs.iter() { + if let Some(transaction_output_index) = htlc.transaction_output_index { + watch_outputs.push((transaction_output_index, commitment_tx.output[transaction_output_index as usize].clone())); + } + } + watch_outputs } /// Attempts to claim any claimable HTLCs in a commitment transaction which was not (yet) /// revoked using data in holder_claimable_outpoints. /// Should not be used if check_spend_revoked_transaction succeeds. - fn check_spend_holder_transaction(&mut self, tx: &Transaction, height: u32, logger: &L) -> (Vec, (Txid, Vec)) where L::Target: Logger { + fn check_spend_holder_transaction(&mut self, tx: &Transaction, height: u32, logger: &L) -> (Vec, (Txid, Vec<(u32, TxOut)>)) where L::Target: Logger { let commitment_txid = tx.txid(); let mut claim_requests = Vec::new(); let mut watch_outputs = Vec::new(); @@ -1541,10 +1884,10 @@ impl ChannelMonitor { } macro_rules! append_onchain_update { - ($updates: expr) => { + ($updates: expr, $to_watch: expr) => { claim_requests = $updates.0; - watch_outputs.append(&mut $updates.1); - self.broadcasted_holder_revokable_script = $updates.2; + self.broadcasted_holder_revokable_script = $updates.1; + watch_outputs.append(&mut $to_watch); } } @@ -1554,14 +1897,16 @@ impl ChannelMonitor { if self.current_holder_commitment_tx.txid == commitment_txid { is_holder_tx = true; log_trace!(logger, "Got latest holder commitment tx broadcast, searching for available HTLCs to claim"); - let mut res = self.broadcast_by_holder_state(tx, &self.current_holder_commitment_tx); - append_onchain_update!(res); + let res = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx); + let mut to_watch = self.get_broadcasted_holder_watch_outputs(&self.current_holder_commitment_tx, tx); + append_onchain_update!(res, to_watch); } else if let &Some(ref holder_tx) = &self.prev_holder_signed_commitment_tx { if holder_tx.txid == commitment_txid { is_holder_tx = true; log_trace!(logger, "Got previous holder commitment tx broadcast, searching for available HTLCs to claim"); - let mut res = self.broadcast_by_holder_state(tx, holder_tx); - append_onchain_update!(res); + let res = self.get_broadcasted_holder_claims(holder_tx); + let mut to_watch = self.get_broadcasted_holder_watch_outputs(holder_tx, tx); + append_onchain_update!(res, to_watch); } } @@ -1587,82 +1932,55 @@ impl ChannelMonitor { (claim_requests, (commitment_txid, watch_outputs)) } - /// Used by ChannelManager deserialization to broadcast the latest holder state if its copy of - /// the Channel was out-of-date. You may use it to get a broadcastable holder toxic tx in case of - /// fallen-behind, i.e when receiving a channel_reestablish with a proof that our counterparty side knows - /// a higher revocation secret than the holder commitment number we are aware of. Broadcasting these - /// transactions are UNSAFE, as they allow counterparty side to punish you. Nevertheless you may want to - /// broadcast them if counterparty don't close channel with his higher commitment transaction after a - /// substantial amount of time (a month or even a year) to get back funds. Best may be to contact - /// out-of-band the other node operator to coordinate with him if option is available to you. - /// In any-case, choice is up to the user. pub fn get_latest_holder_commitment_txn(&mut self, logger: &L) -> Vec where L::Target: Logger { log_trace!(logger, "Getting signed latest holder commitment transaction!"); self.holder_tx_signed = true; - if let Some(commitment_tx) = self.onchain_tx_handler.get_fully_signed_holder_tx(&self.funding_redeemscript) { - let txid = commitment_tx.txid(); - let mut res = vec![commitment_tx]; - for htlc in self.current_holder_commitment_tx.htlc_outputs.iter() { - if let Some(vout) = htlc.0.transaction_output_index { - let preimage = if !htlc.0.offered { - if let Some(preimage) = self.payment_preimages.get(&htlc.0.payment_hash) { Some(preimage.clone()) } else { - // We can't build an HTLC-Success transaction without the preimage - continue; - } - } else { None }; - if let Some(htlc_tx) = self.onchain_tx_handler.get_fully_signed_htlc_tx( - &::bitcoin::OutPoint { txid, vout }, &preimage) { - res.push(htlc_tx); + let commitment_tx = self.onchain_tx_handler.get_fully_signed_holder_tx(&self.funding_redeemscript); + let txid = commitment_tx.txid(); + let mut res = vec![commitment_tx]; + for htlc in self.current_holder_commitment_tx.htlc_outputs.iter() { + if let Some(vout) = htlc.0.transaction_output_index { + let preimage = if !htlc.0.offered { + if let Some(preimage) = self.payment_preimages.get(&htlc.0.payment_hash) { Some(preimage.clone()) } else { + // We can't build an HTLC-Success transaction without the preimage + continue; } + } else { None }; + if let Some(htlc_tx) = self.onchain_tx_handler.get_fully_signed_htlc_tx( + &::bitcoin::OutPoint { txid, vout }, &preimage) { + res.push(htlc_tx); } } - // We throw away the generated waiting_first_conf data as we aren't (yet) confirmed and we don't actually know what the caller wants to do. - // The data will be re-generated and tracked in check_spend_holder_transaction if we get a confirmation. - return res } - Vec::new() + // We throw away the generated waiting_first_conf data as we aren't (yet) confirmed and we don't actually know what the caller wants to do. + // The data will be re-generated and tracked in check_spend_holder_transaction if we get a confirmation. + return res; } - /// Unsafe test-only version of get_latest_holder_commitment_txn used by our test framework - /// to bypass HolderCommitmentTransaction state update lockdown after signature and generate - /// revoked commitment transaction. #[cfg(any(test,feature = "unsafe_revoked_tx_signing"))] - pub fn unsafe_get_latest_holder_commitment_txn(&mut self, logger: &L) -> Vec where L::Target: Logger { + fn unsafe_get_latest_holder_commitment_txn(&mut self, logger: &L) -> Vec where L::Target: Logger { log_trace!(logger, "Getting signed copy of latest holder commitment transaction!"); - if let Some(commitment_tx) = self.onchain_tx_handler.get_fully_signed_copy_holder_tx(&self.funding_redeemscript) { - let txid = commitment_tx.txid(); - let mut res = vec![commitment_tx]; - for htlc in self.current_holder_commitment_tx.htlc_outputs.iter() { - if let Some(vout) = htlc.0.transaction_output_index { - let preimage = if !htlc.0.offered { - if let Some(preimage) = self.payment_preimages.get(&htlc.0.payment_hash) { Some(preimage.clone()) } else { - // We can't build an HTLC-Success transaction without the preimage - continue; - } - } else { None }; - if let Some(htlc_tx) = self.onchain_tx_handler.unsafe_get_fully_signed_htlc_tx( - &::bitcoin::OutPoint { txid, vout }, &preimage) { - res.push(htlc_tx); + let commitment_tx = self.onchain_tx_handler.get_fully_signed_copy_holder_tx(&self.funding_redeemscript); + let txid = commitment_tx.txid(); + let mut res = vec![commitment_tx]; + for htlc in self.current_holder_commitment_tx.htlc_outputs.iter() { + if let Some(vout) = htlc.0.transaction_output_index { + let preimage = if !htlc.0.offered { + if let Some(preimage) = self.payment_preimages.get(&htlc.0.payment_hash) { Some(preimage.clone()) } else { + // We can't build an HTLC-Success transaction without the preimage + continue; } + } else { None }; + if let Some(htlc_tx) = self.onchain_tx_handler.unsafe_get_fully_signed_htlc_tx( + &::bitcoin::OutPoint { txid, vout }, &preimage) { + res.push(htlc_tx); } } - return res } - Vec::new() + return res } - /// Processes transactions in a newly connected block, which may result in any of the following: - /// - update the monitor's state against resolved HTLCs - /// - punish the counterparty in the case of seeing a revoked commitment transaction - /// - force close the channel and claim/timeout incoming/outgoing HTLCs if near expiration - /// - detect settled outputs for later spending - /// - schedule and bump any in-flight claims - /// - /// Returns any new outputs to watch from `txdata`; after called, these are also included in - /// [`get_outputs_to_watch`]. - /// - /// [`get_outputs_to_watch`]: #method.get_outputs_to_watch - pub fn block_connected(&mut self, header: &BlockHeader, txdata: &TransactionData, height: u32, broadcaster: B, fee_estimator: F, logger: L)-> Vec<(Txid, Vec)> + pub fn block_connected(&mut self, header: &BlockHeader, txdata: &TransactionData, height: u32, broadcaster: B, fee_estimator: F, logger: L)-> Vec<(Txid, Vec<(u32, TxOut)>)> where B::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, @@ -1727,14 +2045,14 @@ impl ChannelMonitor { } if should_broadcast { self.pending_monitor_events.push(MonitorEvent::CommitmentTxBroadcasted(self.funding_info.0)); - if let Some(commitment_tx) = self.onchain_tx_handler.get_fully_signed_holder_tx(&self.funding_redeemscript) { - self.holder_tx_signed = true; - let (mut new_outpoints, new_outputs, _) = self.broadcast_by_holder_state(&commitment_tx, &self.current_holder_commitment_tx); - if !new_outputs.is_empty() { - watch_outputs.push((self.current_holder_commitment_tx.txid.clone(), new_outputs)); - } - claimable_outpoints.append(&mut new_outpoints); + let commitment_tx = self.onchain_tx_handler.get_fully_signed_holder_tx(&self.funding_redeemscript); + self.holder_tx_signed = true; + let (mut new_outpoints, _) = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx); + let new_outputs = self.get_broadcasted_holder_watch_outputs(&self.current_holder_commitment_tx, &commitment_tx); + if !new_outputs.is_empty() { + watch_outputs.push((self.current_holder_commitment_tx.txid.clone(), new_outputs)); } + claimable_outpoints.append(&mut new_outpoints); } if let Some(events) = self.onchain_events_waiting_threshold_conf.remove(&height) { for ev in events { @@ -1757,27 +2075,38 @@ impl ChannelMonitor { } } - self.onchain_tx_handler.block_connected(&txn_matched, claimable_outpoints, height, &*broadcaster, &*fee_estimator, &*logger); + self.onchain_tx_handler.update_claims_view(&txn_matched, claimable_outpoints, Some(height), &&*broadcaster, &&*fee_estimator, &&*logger); self.last_block_hash = block_hash; // Determine new outputs to watch by comparing against previously known outputs to watch, // updating the latter in the process. watch_outputs.retain(|&(ref txid, ref txouts)| { - let output_scripts = txouts.iter().map(|o| o.script_pubkey.clone()).collect(); - self.outputs_to_watch.insert(txid.clone(), output_scripts).is_none() + let idx_and_scripts = txouts.iter().map(|o| (o.0, o.1.script_pubkey.clone())).collect(); + self.outputs_to_watch.insert(txid.clone(), idx_and_scripts).is_none() }); + #[cfg(test)] + { + // If we see a transaction for which we registered outputs previously, + // make sure the registered scriptpubkey at the expected index match + // the actual transaction output one. We failed this case before #653. + for tx in &txn_matched { + if let Some(outputs) = self.get_outputs_to_watch().get(&tx.txid()) { + for idx_and_script in outputs.iter() { + assert!((idx_and_script.0 as usize) < tx.output.len()); + assert_eq!(tx.output[idx_and_script.0 as usize].script_pubkey, idx_and_script.1); + } + } + } + } watch_outputs } - /// Determines if the disconnected block contained any transactions of interest and updates - /// appropriately. pub fn block_disconnected(&mut self, header: &BlockHeader, height: u32, broadcaster: B, fee_estimator: F, logger: L) where B::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, { - let block_hash = header.block_hash(); - log_trace!(logger, "Block {} at height {} disconnected", block_hash, height); + log_trace!(logger, "Block {} at height {} disconnected", header.block_hash(), height); if let Some(_) = self.onchain_events_waiting_threshold_conf.remove(&(height + ANTI_REORG_DELAY - 1)) { //We may discard: @@ -1787,7 +2116,7 @@ impl ChannelMonitor { self.onchain_tx_handler.block_disconnected(height, broadcaster, fee_estimator, logger); - self.last_block_hash = block_hash; + self.last_block_hash = header.prev_blockhash; } /// Filters a block's `txdata` for transactions spending watched outputs or for any child @@ -1813,8 +2142,19 @@ impl ChannelMonitor { fn spends_watched_output(&self, tx: &Transaction) -> bool { for input in tx.input.iter() { if let Some(outputs) = self.get_outputs_to_watch().get(&input.previous_output.txid) { - for (idx, _script_pubkey) in outputs.iter().enumerate() { - if idx == input.previous_output.vout as usize { + for (idx, _script_pubkey) in outputs.iter() { + if *idx == input.previous_output.vout { + #[cfg(test)] + { + // If the expected script is a known type, check that the witness + // appears to be spending the correct type (ie that the match would + // actually succeed in BIP 158/159-style filters). + if _script_pubkey.is_v0_p2wsh() { + assert_eq!(&bitcoin::Address::p2wsh(&Script::from(input.witness.last().unwrap().clone()), bitcoin::Network::Bitcoin).script_pubkey(), _script_pubkey); + } else if _script_pubkey.is_v0_p2wpkh() { + assert_eq!(&bitcoin::Address::p2wpkh(&bitcoin::PublicKey::from_slice(&input.witness.last().unwrap()).unwrap(), bitcoin::Network::Bitcoin).unwrap().script_pubkey(), _script_pubkey); + } else { panic!(); } + } return true; } } @@ -2053,22 +2393,24 @@ impl ChannelMonitor { break; } else if let Some(ref broadcasted_holder_revokable_script) = self.broadcasted_holder_revokable_script { if broadcasted_holder_revokable_script.0 == outp.script_pubkey { - spendable_output = Some(SpendableOutputDescriptor::DynamicOutputP2WSH { + spendable_output = Some(SpendableOutputDescriptor::DelayedPaymentOutput(DelayedPaymentOutputDescriptor { outpoint: OutPoint { txid: tx.txid(), index: i as u16 }, per_commitment_point: broadcasted_holder_revokable_script.1, to_self_delay: self.on_holder_tx_csv, output: outp.clone(), - key_derivation_params: self.keys.key_derivation_params(), revocation_pubkey: broadcasted_holder_revokable_script.2.clone(), - }); + channel_keys_id: self.channel_keys_id, + channel_value_satoshis: self.channel_value_satoshis, + })); break; } } else if self.counterparty_payment_script == outp.script_pubkey { - spendable_output = Some(SpendableOutputDescriptor::StaticOutputCounterpartyPayment { + spendable_output = Some(SpendableOutputDescriptor::StaticPaymentOutput(StaticPaymentOutputDescriptor { outpoint: OutPoint { txid: tx.txid(), index: i as u16 }, output: outp.clone(), - key_derivation_params: self.keys.key_derivation_params(), - }); + channel_keys_id: self.channel_keys_id, + channel_value_satoshis: self.channel_value_satoshis, + })); break; } else if outp.script_pubkey == self.shutdown_script { spendable_output = Some(SpendableOutputDescriptor::StaticOutput { @@ -2092,10 +2434,74 @@ impl ChannelMonitor { } } +/// `Persist` defines behavior for persisting channel monitors: this could mean +/// writing once to disk, and/or uploading to one or more backup services. +/// +/// Note that for every new monitor, you **must** persist the new `ChannelMonitor` +/// to disk/backups. And, on every update, you **must** persist either the +/// `ChannelMonitorUpdate` or the updated monitor itself. Otherwise, there is risk +/// of situations such as revoking a transaction, then crashing before this +/// revocation can be persisted, then unintentionally broadcasting a revoked +/// transaction and losing money. This is a risk because previous channel states +/// are toxic, so it's important that whatever channel state is persisted is +/// kept up-to-date. +pub trait Persist: Send + Sync { + /// Persist a new channel's data. The data can be stored any way you want, but + /// the identifier provided by Rust-Lightning is the channel's outpoint (and + /// it is up to you to maintain a correct mapping between the outpoint and the + /// stored channel data). Note that you **must** persist every new monitor to + /// disk. See the `Persist` trait documentation for more details. + /// + /// See [`ChannelMonitor::write`] for writing out a `ChannelMonitor`, + /// and [`ChannelMonitorUpdateErr`] for requirements when returning errors. + fn persist_new_channel(&self, id: OutPoint, data: &ChannelMonitor) -> Result<(), ChannelMonitorUpdateErr>; + + /// Update one channel's data. The provided `ChannelMonitor` has already + /// applied the given update. + /// + /// Note that on every update, you **must** persist either the + /// `ChannelMonitorUpdate` or the updated monitor itself to disk/backups. See + /// the `Persist` trait documentation for more details. + /// + /// If an implementer chooses to persist the updates only, they need to make + /// sure that all the updates are applied to the `ChannelMonitors` *before* + /// the set of channel monitors is given to the `ChannelManager` + /// deserialization routine. See [`ChannelMonitor::update_monitor`] for + /// applying a monitor update to a monitor. If full `ChannelMonitors` are + /// persisted, then there is no need to persist individual updates. + /// + /// Note that there could be a performance tradeoff between persisting complete + /// channel monitors on every update vs. persisting only updates and applying + /// them in batches. The size of each monitor grows `O(number of state updates)` + /// whereas updates are small and `O(1)`. + /// + /// See [`ChannelMonitor::write`] for writing out a `ChannelMonitor`, + /// [`ChannelMonitorUpdate::write`] for writing out an update, and + /// [`ChannelMonitorUpdateErr`] for requirements when returning errors. + fn update_persisted_channel(&self, id: OutPoint, update: &ChannelMonitorUpdate, data: &ChannelMonitor) -> Result<(), ChannelMonitorUpdateErr>; +} + +impl chain::Listen for (ChannelMonitor, T, F, L) +where + T::Target: BroadcasterInterface, + F::Target: FeeEstimator, + L::Target: Logger, +{ + fn block_connected(&self, block: &Block, height: u32) { + let txdata: Vec<_> = block.txdata.iter().enumerate().collect(); + self.0.block_connected(&block.header, &txdata, height, &*self.1, &*self.2, &*self.3); + } + + fn block_disconnected(&self, header: &BlockHeader, height: u32) { + self.0.block_disconnected(header, height, &*self.1, &*self.2, &*self.3); + } +} + const MAX_ALLOC_SIZE: usize = 64*1024; -impl Readable for (BlockHash, ChannelMonitor) { - fn read(reader: &mut R) -> Result { +impl<'a, Signer: Sign, K: KeysInterface> ReadableArgs<&'a K> + for (BlockHash, ChannelMonitor) { + fn read(reader: &mut R, keys_manager: &'a K) -> Result { macro_rules! unwrap_obj { ($key: expr) => { match $key { @@ -2128,7 +2534,8 @@ impl Readable for (BlockHash, ChannelMonitor let counterparty_payment_script = Readable::read(reader)?; let shutdown_script = Readable::read(reader)?; - let keys = Readable::read(reader)?; + let channel_keys_id = Readable::read(reader)?; + let holder_revocation_basepoint = Readable::read(reader)?; // Technically this can fail and serialize fail a round-trip, but only for serialization of // barely-init'd ChannelMonitors that we can't do anything with. let outpoint = OutPoint { @@ -2316,74 +2723,81 @@ impl Readable for (BlockHash, ChannelMonitor } let outputs_to_watch_len: u64 = Readable::read(reader)?; - let mut outputs_to_watch = HashMap::with_capacity(cmp::min(outputs_to_watch_len as usize, MAX_ALLOC_SIZE / (mem::size_of::() + mem::size_of::>()))); + let mut outputs_to_watch = HashMap::with_capacity(cmp::min(outputs_to_watch_len as usize, MAX_ALLOC_SIZE / (mem::size_of::() + mem::size_of::() + mem::size_of::>()))); for _ in 0..outputs_to_watch_len { let txid = Readable::read(reader)?; let outputs_len: u64 = Readable::read(reader)?; - let mut outputs = Vec::with_capacity(cmp::min(outputs_len as usize, MAX_ALLOC_SIZE / mem::size_of::