X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fchain%2Fchannelmonitor.rs;h=bb8d8c32de483215aee393202b7f9a74e4e29cb6;hb=4e82003261e11ece5d5fb3b13f686c9f7a0d2aaf;hp=31e4565ecab766851cc47699e30f74a512018bf6;hpb=df778b605a28580905cb5ca63b3ec8bbe99afc2f;p=rust-lightning diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 31e4565e..bb8d8c32 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -57,21 +57,36 @@ use std::io::Error; /// An update generated by the underlying Channel itself which contains some new information the /// ChannelMonitor should be made aware of. -#[cfg_attr(test, derive(PartialEq))] +#[cfg_attr(any(test, feature = "_test_utils"), derive(PartialEq))] #[derive(Clone)] #[must_use] pub struct ChannelMonitorUpdate { pub(crate) updates: Vec, /// The sequence number of this update. Updates *must* be replayed in-order according to this /// sequence number (and updates may panic if they are not). The update_id values are strictly - /// increasing and increase by one for each new update. + /// increasing and increase by one for each new update, with one exception specified below. /// /// This sequence number is also used to track up to which points updates which returned /// ChannelMonitorUpdateErr::TemporaryFailure have been applied to all copies of a given /// ChannelMonitor when ChannelManager::channel_monitor_updated is called. + /// + /// The only instance where update_id values are not strictly increasing is the case where we + /// allow post-force-close updates with a special update ID of [`CLOSED_CHANNEL_UPDATE_ID`]. See + /// its docs for more details. + /// + /// [`CLOSED_CHANNEL_UPDATE_ID`]: constant.CLOSED_CHANNEL_UPDATE_ID.html pub update_id: u64, } +/// If: +/// (1) a channel has been force closed and +/// (2) we receive a preimage from a forward link that allows us to spend an HTLC output on +/// this channel's (the backward link's) broadcasted commitment transaction +/// then we allow the `ChannelManager` to send a `ChannelMonitorUpdate` with this update ID, +/// with the update providing said payment preimage. No other update types are allowed after +/// force-close. +pub const CLOSED_CHANNEL_UPDATE_ID: u64 = std::u64::MAX; + impl Writeable for ChannelMonitorUpdate { fn write(&self, w: &mut W) -> Result<(), ::std::io::Error> { self.update_id.write(w)?; @@ -95,7 +110,7 @@ impl Readable for ChannelMonitorUpdate { } /// An error enum representing a failure to persist a channel monitor update. -#[derive(Clone)] +#[derive(Clone, Debug)] pub enum ChannelMonitorUpdateErr { /// Used to indicate a temporary failure (eg connection to a watchtower or remote backup of /// our state failed, but is expected to succeed at some point in the future). @@ -159,7 +174,7 @@ pub enum ChannelMonitorUpdateErr { /// inconsistent with the ChannelMonitor being called. eg for ChannelMonitor::update_monitor this /// means you tried to update a monitor for a different channel or the ChannelMonitorUpdate was /// corrupted. -/// Contains a human-readable error message. +/// Contains a developer-readable error message. #[derive(Debug)] pub struct MonitorUpdateError(pub &'static str); @@ -470,7 +485,7 @@ enum OnchainEvent { const SERIALIZATION_VERSION: u8 = 1; const MIN_SERIALIZATION_VERSION: u8 = 1; -#[cfg_attr(test, derive(PartialEq))] +#[cfg_attr(any(test, feature = "_test_utils"), derive(PartialEq))] #[derive(Clone)] pub(crate) enum ChannelMonitorUpdateStep { LatestHolderCommitmentTXInfo { @@ -666,7 +681,7 @@ pub struct ChannelMonitor { // interface knows about the TXOs that we want to be notified of spends of. We could probably // be smart and derive them from the above storage fields, but its much simpler and more // Obviously Correct (tm) if we just keep track of them explicitly. - outputs_to_watch: HashMap>, + outputs_to_watch: HashMap>, #[cfg(test)] pub onchain_tx_handler: OnchainTxHandler, @@ -696,7 +711,7 @@ pub struct ChannelMonitor { secp_ctx: Secp256k1, //TODO: dedup this a bit... } -#[cfg(any(test, feature = "fuzztarget"))] +#[cfg(any(test, feature = "fuzztarget", feature = "_test_utils"))] /// Used only in testing and fuzztarget to check serialization roundtrips don't change the /// underlying object impl PartialEq for ChannelMonitor { @@ -746,7 +761,7 @@ impl ChannelMonitor { /// the "reorg path" (ie disconnecting blocks until you find a common ancestor from both the /// returned block hash and the the current chain and then reconnecting blocks to get to the /// best chain) upon deserializing the object! - pub fn write_for_disk(&self, writer: &mut W) -> Result<(), Error> { + pub fn serialize_for_disk(&self, writer: &mut W) -> Result<(), Error> { //TODO: We still write out all the serialization here manually instead of using the fancy //serialization framework we have, we should migrate things over to it. writer.write_all(&[SERIALIZATION_VERSION; 1])?; @@ -914,10 +929,11 @@ impl ChannelMonitor { } (self.outputs_to_watch.len() as u64).write(writer)?; - for (txid, output_scripts) in self.outputs_to_watch.iter() { + for (txid, idx_scripts) in self.outputs_to_watch.iter() { txid.write(writer)?; - (output_scripts.len() as u64).write(writer)?; - for script in output_scripts.iter() { + (idx_scripts.len() as u64).write(writer)?; + for (idx, script) in idx_scripts.iter() { + idx.write(writer)?; script.write(writer)?; } } @@ -963,7 +979,7 @@ impl ChannelMonitor { onchain_tx_handler.provide_latest_holder_tx(initial_holder_commitment_tx); let mut outputs_to_watch = HashMap::new(); - outputs_to_watch.insert(funding_info.0.txid, vec![funding_info.1.clone()]); + outputs_to_watch.insert(funding_info.0.txid, vec![(funding_info.0.index as u32, funding_info.1.clone())]); ChannelMonitor { latest_update_id: 0, @@ -1143,8 +1159,47 @@ impl ChannelMonitor { /// Provides a payment_hash->payment_preimage mapping. Will be automatically pruned when all /// commitment_tx_infos which contain the payment hash have been revoked. - pub(crate) fn provide_payment_preimage(&mut self, payment_hash: &PaymentHash, payment_preimage: &PaymentPreimage) { + pub(crate) fn provide_payment_preimage(&mut self, payment_hash: &PaymentHash, payment_preimage: &PaymentPreimage, broadcaster: &B, fee_estimator: &F, logger: &L) + where B::Target: BroadcasterInterface, + F::Target: FeeEstimator, + L::Target: Logger, + { self.payment_preimages.insert(payment_hash.clone(), payment_preimage.clone()); + + // If the channel is force closed, try to claim the output from this preimage. + // First check if a counterparty commitment transaction has been broadcasted: + macro_rules! claim_htlcs { + ($commitment_number: expr, $txid: expr) => { + let htlc_claim_reqs = self.get_counterparty_htlc_output_claim_reqs($commitment_number, $txid, None); + self.onchain_tx_handler.update_claims_view(&Vec::new(), htlc_claim_reqs, None, broadcaster, fee_estimator, logger); + } + } + if let Some(txid) = self.current_counterparty_commitment_txid { + if let Some(commitment_number) = self.counterparty_commitment_txn_on_chain.get(&txid) { + claim_htlcs!(*commitment_number, txid); + return; + } + } + if let Some(txid) = self.prev_counterparty_commitment_txid { + if let Some(commitment_number) = self.counterparty_commitment_txn_on_chain.get(&txid) { + claim_htlcs!(*commitment_number, txid); + return; + } + } + + // Then if a holder commitment transaction has been seen on-chain, broadcast transactions + // claiming the HTLC output from each of the holder commitment transactions. + // Note that we can't just use `self.holder_tx_signed`, because that only covers the case where + // *we* sign a holder commitment transaction, not when e.g. a watchtower broadcasts one of our + // holder commitment transactions. + if self.broadcasted_holder_revokable_script.is_some() { + let (claim_reqs, _) = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx); + self.onchain_tx_handler.update_claims_view(&Vec::new(), claim_reqs, None, broadcaster, fee_estimator, logger); + if let Some(ref tx) = self.prev_holder_signed_commitment_tx { + let (claim_reqs, _) = self.get_broadcasted_holder_claims(&tx); + self.onchain_tx_handler.update_claims_view(&Vec::new(), claim_reqs, None, broadcaster, fee_estimator, logger); + } + } } pub(crate) fn broadcast_latest_holder_commitment_txn(&mut self, broadcaster: &B, logger: &L) @@ -1161,28 +1216,47 @@ impl ChannelMonitor { /// itself. /// /// panics if the given update is not the next update by update_id. - pub fn update_monitor(&mut self, mut updates: ChannelMonitorUpdate, broadcaster: &B, logger: &L) -> Result<(), MonitorUpdateError> - where B::Target: BroadcasterInterface, - L::Target: Logger, + pub fn update_monitor(&mut self, updates: &ChannelMonitorUpdate, broadcaster: &B, fee_estimator: &F, logger: &L) -> Result<(), MonitorUpdateError> + where B::Target: BroadcasterInterface, + F::Target: FeeEstimator, + L::Target: Logger, { - if self.latest_update_id + 1 != updates.update_id { + // ChannelMonitor updates may be applied after force close if we receive a + // preimage for a broadcasted commitment transaction HTLC output that we'd + // like to claim on-chain. If this is the case, we no longer have guaranteed + // access to the monitor's update ID, so we use a sentinel value instead. + if updates.update_id == CLOSED_CHANNEL_UPDATE_ID { + match updates.updates[0] { + ChannelMonitorUpdateStep::PaymentPreimage { .. } => {}, + _ => panic!("Attempted to apply post-force-close ChannelMonitorUpdate that wasn't providing a payment preimage"), + } + assert_eq!(updates.updates.len(), 1); + } else if self.latest_update_id + 1 != updates.update_id { panic!("Attempted to apply ChannelMonitorUpdates out of order, check the update_id before passing an update to update_monitor!"); } - for update in updates.updates.drain(..) { + for update in updates.updates.iter() { match update { ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo { commitment_tx, htlc_outputs } => { + log_trace!(logger, "Updating ChannelMonitor with latest holder commitment transaction info"); if self.lockdown_from_offchain { panic!(); } - self.provide_latest_holder_commitment_tx_info(commitment_tx, htlc_outputs)? + self.provide_latest_holder_commitment_tx_info(commitment_tx.clone(), htlc_outputs.clone())? + }, + ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { unsigned_commitment_tx, htlc_outputs, commitment_number, their_revocation_point } => { + log_trace!(logger, "Updating ChannelMonitor with latest counterparty commitment transaction info"); + self.provide_latest_counterparty_commitment_tx_info(&unsigned_commitment_tx, htlc_outputs.clone(), *commitment_number, *their_revocation_point, logger) + }, + ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage } => { + log_trace!(logger, "Updating ChannelMonitor with payment preimage"); + self.provide_payment_preimage(&PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner()), &payment_preimage, broadcaster, fee_estimator, logger) + }, + ChannelMonitorUpdateStep::CommitmentSecret { idx, secret } => { + log_trace!(logger, "Updating ChannelMonitor with commitment secret"); + self.provide_secret(*idx, *secret)? }, - ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { unsigned_commitment_tx, htlc_outputs, commitment_number, their_revocation_point } => - self.provide_latest_counterparty_commitment_tx_info(&unsigned_commitment_tx, htlc_outputs, commitment_number, their_revocation_point, logger), - ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage } => - self.provide_payment_preimage(&PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner()), &payment_preimage), - ChannelMonitorUpdateStep::CommitmentSecret { idx, secret } => - self.provide_secret(idx, secret)?, ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } => { + log_trace!(logger, "Updating ChannelMonitor: channel force closed, should broadcast: {}", should_broadcast); self.lockdown_from_offchain = true; - if should_broadcast { + if *should_broadcast { self.broadcast_latest_holder_commitment_txn(broadcaster, logger); } else { log_error!(logger, "You have a toxic holder commitment transaction avaible in channel monitor, read comment in ChannelMonitor::get_latest_holder_commitment_txn to be informed of manual action to take"); @@ -1209,7 +1283,7 @@ impl ChannelMonitor { /// transaction), which we must learn about spends of via block_connected(). /// /// (C-not exported) because we have no HashMap bindings - pub fn get_outputs_to_watch(&self) -> &HashMap> { + pub fn get_outputs_to_watch(&self) -> &HashMap> { // If we've detected a counterparty commitment tx on chain, we must include it in the set // of outputs to watch for spends of, otherwise we're likely to lose user funds. Because // its trivial to do, double-check that here. @@ -1264,7 +1338,7 @@ impl ChannelMonitor { /// HTLC-Success/HTLC-Timeout transactions. /// Return updates for HTLC pending in the channel and failed automatically by the broadcast of /// revoked counterparty commitment tx - fn check_spend_counterparty_transaction(&mut self, tx: &Transaction, height: u32, logger: &L) -> (Vec, (Txid, Vec)) where L::Target: Logger { + fn check_spend_counterparty_transaction(&mut self, tx: &Transaction, height: u32, logger: &L) -> (Vec, (Txid, Vec<(u32, TxOut)>)) where L::Target: Logger { // Most secp and related errors trying to create keys means we have no hope of constructing // a spend transaction...so we return no transactions to broadcast let mut claimable_outpoints = Vec::new(); @@ -1319,7 +1393,9 @@ impl ChannelMonitor { if !claimable_outpoints.is_empty() || per_commitment_option.is_some() { // ie we're confident this is actually ours // We're definitely a counterparty commitment transaction! log_trace!(logger, "Got broadcast of revoked counterparty commitment transaction, going to generate general spend tx with {} inputs", claimable_outpoints.len()); - watch_outputs.append(&mut tx.output.clone()); + for (idx, outp) in tx.output.iter().enumerate() { + watch_outputs.push((idx as u32, outp.clone())); + } self.counterparty_commitment_txn_on_chain.insert(commitment_txid, commitment_number); macro_rules! check_htlc_fails { @@ -1366,7 +1442,9 @@ impl ChannelMonitor { // already processed the block, resulting in the counterparty_commitment_txn_on_chain entry // not being generated by the above conditional. Thus, to be safe, we go ahead and // insert it here. - watch_outputs.append(&mut tx.output.clone()); + for (idx, outp) in tx.output.iter().enumerate() { + watch_outputs.push((idx as u32, outp.clone())); + } self.counterparty_commitment_txn_on_chain.insert(commitment_txid, commitment_number); log_trace!(logger, "Got broadcast of non-revoked counterparty commitment transaction {}", commitment_txid); @@ -1420,43 +1498,59 @@ impl ChannelMonitor { check_htlc_fails!(txid, "previous", 'prev_loop); } + let htlc_claim_reqs = self.get_counterparty_htlc_output_claim_reqs(commitment_number, commitment_txid, Some(tx)); + for req in htlc_claim_reqs { + claimable_outpoints.push(req); + } + + } + (claimable_outpoints, (commitment_txid, watch_outputs)) + } + + fn get_counterparty_htlc_output_claim_reqs(&self, commitment_number: u64, commitment_txid: Txid, tx: Option<&Transaction>) -> Vec { + let mut claims = Vec::new(); + if let Some(htlc_outputs) = self.counterparty_claimable_outpoints.get(&commitment_txid) { if let Some(revocation_points) = self.their_cur_revocation_points { let revocation_point_option = + // If the counterparty commitment tx is the latest valid state, use their latest + // per-commitment point if revocation_points.0 == commitment_number { Some(&revocation_points.1) } else if let Some(point) = revocation_points.2.as_ref() { + // If counterparty commitment tx is the state previous to the latest valid state, use + // their previous per-commitment point (non-atomicity of revocation means it's valid for + // them to temporarily have two valid commitment txns from our viewpoint) if revocation_points.0 == commitment_number + 1 { Some(point) } else { None } } else { None }; if let Some(revocation_point) = revocation_point_option { - self.counterparty_payment_script = { - // Note that the Network here is ignored as we immediately drop the address for the - // script_pubkey version - let payment_hash160 = WPubkeyHash::hash(&self.keys.pubkeys().payment_point.serialize()); - Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&payment_hash160[..]).into_script() - }; - - // Then, try to find htlc outputs - for (_, &(ref htlc, _)) in per_commitment_data.iter().enumerate() { + for (_, &(ref htlc, _)) in htlc_outputs.iter().enumerate() { if let Some(transaction_output_index) = htlc.transaction_output_index { - if transaction_output_index as usize >= tx.output.len() || - tx.output[transaction_output_index as usize].value != htlc.amount_msat / 1000 { - return (claimable_outpoints, (commitment_txid, watch_outputs)); // Corrupted per_commitment_data, fuck this user + if let Some(transaction) = tx { + if transaction_output_index as usize >= transaction.output.len() || + transaction.output[transaction_output_index as usize].value != htlc.amount_msat / 1000 { + return claims; // Corrupted per_commitment_data, fuck this user + } } - let preimage = if htlc.offered { if let Some(p) = self.payment_preimages.get(&htlc.payment_hash) { Some(*p) } else { None } } else { None }; + let preimage = + if htlc.offered { + if let Some(p) = self.payment_preimages.get(&htlc.payment_hash) { + Some(*p) + } else { None } + } else { None }; let aggregable = if !htlc.offered { false } else { true }; if preimage.is_some() || !htlc.offered { let witness_data = InputMaterial::CounterpartyHTLC { per_commitment_point: *revocation_point, counterparty_delayed_payment_base_key: self.counterparty_tx_cache.counterparty_delayed_payment_base_key, counterparty_htlc_base_key: self.counterparty_tx_cache.counterparty_htlc_base_key, preimage, htlc: htlc.clone() }; - claimable_outpoints.push(ClaimRequest { absolute_timelock: htlc.cltv_expiry, aggregable, outpoint: BitcoinOutPoint { txid: commitment_txid, vout: transaction_output_index }, witness_data }); + claims.push(ClaimRequest { absolute_timelock: htlc.cltv_expiry, aggregable, outpoint: BitcoinOutPoint { txid: commitment_txid, vout: transaction_output_index }, witness_data }); } } } } } } - (claimable_outpoints, (commitment_txid, watch_outputs)) + claims } /// Attempts to claim a counterparty HTLC-Success/HTLC-Timeout's outputs using the revocation key - fn check_spend_counterparty_htlc(&mut self, tx: &Transaction, commitment_number: u64, height: u32, logger: &L) -> (Vec, Option<(Txid, Vec)>) where L::Target: Logger { + fn check_spend_counterparty_htlc(&mut self, tx: &Transaction, commitment_number: u64, height: u32, logger: &L) -> (Vec, Option<(Txid, Vec<(u32, TxOut)>)>) where L::Target: Logger { let htlc_txid = tx.txid(); if tx.input.len() != 1 || tx.output.len() != 1 || tx.input[0].witness.len() != 5 { return (Vec::new(), None) @@ -1478,12 +1572,15 @@ impl ChannelMonitor { log_trace!(logger, "Counterparty HTLC broadcast {}:{}", htlc_txid, 0); let witness_data = InputMaterial::Revoked { per_commitment_point, counterparty_delayed_payment_base_key: self.counterparty_tx_cache.counterparty_delayed_payment_base_key, counterparty_htlc_base_key: self.counterparty_tx_cache.counterparty_htlc_base_key, per_commitment_key, input_descriptor: InputDescriptors::RevokedOutput, amount: tx.output[0].value, htlc: None, on_counterparty_tx_csv: self.counterparty_tx_cache.on_counterparty_tx_csv }; let claimable_outpoints = vec!(ClaimRequest { absolute_timelock: height + self.counterparty_tx_cache.on_counterparty_tx_csv as u32, aggregable: true, outpoint: BitcoinOutPoint { txid: htlc_txid, vout: 0}, witness_data }); - (claimable_outpoints, Some((htlc_txid, tx.output.clone()))) + let outputs = vec![(0, tx.output[0].clone())]; + (claimable_outpoints, Some((htlc_txid, outputs))) } - fn broadcast_by_holder_state(&self, commitment_tx: &Transaction, holder_tx: &HolderSignedTx) -> (Vec, Vec, Option<(Script, PublicKey, PublicKey)>) { + // Returns (1) `ClaimRequest`s that can be given to the OnChainTxHandler, so that the handler can + // broadcast transactions claiming holder HTLC commitment outputs and (2) a holder revokable + // script so we can detect whether a holder transaction has been seen on-chain. + fn get_broadcasted_holder_claims(&self, holder_tx: &HolderSignedTx) -> (Vec, Option<(Script, PublicKey, PublicKey)>) { let mut claim_requests = Vec::with_capacity(holder_tx.htlc_outputs.len()); - let mut watch_outputs = Vec::with_capacity(holder_tx.htlc_outputs.len()); let redeemscript = chan_utils::get_revokeable_redeemscript(&holder_tx.revocation_key, self.on_holder_tx_csv, &holder_tx.delayed_payment_key); let broadcasted_holder_revokable_script = Some((redeemscript.to_v0_p2wsh(), holder_tx.per_commitment_point.clone(), holder_tx.revocation_key.clone())); @@ -1502,17 +1599,27 @@ impl ChannelMonitor { } else { None }, amount: htlc.amount_msat, }}); - watch_outputs.push(commitment_tx.output[transaction_output_index as usize].clone()); } } - (claim_requests, watch_outputs, broadcasted_holder_revokable_script) + (claim_requests, broadcasted_holder_revokable_script) + } + + // Returns holder HTLC outputs to watch and react to in case of spending. + fn get_broadcasted_holder_watch_outputs(&self, holder_tx: &HolderSignedTx, commitment_tx: &Transaction) -> Vec<(u32, TxOut)> { + let mut watch_outputs = Vec::with_capacity(holder_tx.htlc_outputs.len()); + for &(ref htlc, _, _) in holder_tx.htlc_outputs.iter() { + if let Some(transaction_output_index) = htlc.transaction_output_index { + watch_outputs.push((transaction_output_index, commitment_tx.output[transaction_output_index as usize].clone())); + } + } + watch_outputs } /// Attempts to claim any claimable HTLCs in a commitment transaction which was not (yet) /// revoked using data in holder_claimable_outpoints. /// Should not be used if check_spend_revoked_transaction succeeds. - fn check_spend_holder_transaction(&mut self, tx: &Transaction, height: u32, logger: &L) -> (Vec, (Txid, Vec)) where L::Target: Logger { + fn check_spend_holder_transaction(&mut self, tx: &Transaction, height: u32, logger: &L) -> (Vec, (Txid, Vec<(u32, TxOut)>)) where L::Target: Logger { let commitment_txid = tx.txid(); let mut claim_requests = Vec::new(); let mut watch_outputs = Vec::new(); @@ -1541,10 +1648,10 @@ impl ChannelMonitor { } macro_rules! append_onchain_update { - ($updates: expr) => { + ($updates: expr, $to_watch: expr) => { claim_requests = $updates.0; - watch_outputs.append(&mut $updates.1); - self.broadcasted_holder_revokable_script = $updates.2; + self.broadcasted_holder_revokable_script = $updates.1; + watch_outputs.append(&mut $to_watch); } } @@ -1554,14 +1661,16 @@ impl ChannelMonitor { if self.current_holder_commitment_tx.txid == commitment_txid { is_holder_tx = true; log_trace!(logger, "Got latest holder commitment tx broadcast, searching for available HTLCs to claim"); - let mut res = self.broadcast_by_holder_state(tx, &self.current_holder_commitment_tx); - append_onchain_update!(res); + let res = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx); + let mut to_watch = self.get_broadcasted_holder_watch_outputs(&self.current_holder_commitment_tx, tx); + append_onchain_update!(res, to_watch); } else if let &Some(ref holder_tx) = &self.prev_holder_signed_commitment_tx { if holder_tx.txid == commitment_txid { is_holder_tx = true; log_trace!(logger, "Got previous holder commitment tx broadcast, searching for available HTLCs to claim"); - let mut res = self.broadcast_by_holder_state(tx, holder_tx); - append_onchain_update!(res); + let res = self.get_broadcasted_holder_claims(holder_tx); + let mut to_watch = self.get_broadcasted_holder_watch_outputs(holder_tx, tx); + append_onchain_update!(res, to_watch); } } @@ -1662,7 +1771,7 @@ impl ChannelMonitor { /// [`get_outputs_to_watch`]. /// /// [`get_outputs_to_watch`]: #method.get_outputs_to_watch - pub fn block_connected(&mut self, header: &BlockHeader, txdata: &TransactionData, height: u32, broadcaster: B, fee_estimator: F, logger: L)-> Vec<(Txid, Vec)> + pub fn block_connected(&mut self, header: &BlockHeader, txdata: &TransactionData, height: u32, broadcaster: B, fee_estimator: F, logger: L)-> Vec<(Txid, Vec<(u32, TxOut)>)> where B::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, @@ -1729,7 +1838,8 @@ impl ChannelMonitor { self.pending_monitor_events.push(MonitorEvent::CommitmentTxBroadcasted(self.funding_info.0)); if let Some(commitment_tx) = self.onchain_tx_handler.get_fully_signed_holder_tx(&self.funding_redeemscript) { self.holder_tx_signed = true; - let (mut new_outpoints, new_outputs, _) = self.broadcast_by_holder_state(&commitment_tx, &self.current_holder_commitment_tx); + let (mut new_outpoints, _) = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx); + let new_outputs = self.get_broadcasted_holder_watch_outputs(&self.current_holder_commitment_tx, &commitment_tx); if !new_outputs.is_empty() { watch_outputs.push((self.current_holder_commitment_tx.txid.clone(), new_outputs)); } @@ -1757,15 +1867,29 @@ impl ChannelMonitor { } } - self.onchain_tx_handler.block_connected(&txn_matched, claimable_outpoints, height, &*broadcaster, &*fee_estimator, &*logger); + self.onchain_tx_handler.update_claims_view(&txn_matched, claimable_outpoints, Some(height), &&*broadcaster, &&*fee_estimator, &&*logger); self.last_block_hash = block_hash; // Determine new outputs to watch by comparing against previously known outputs to watch, // updating the latter in the process. watch_outputs.retain(|&(ref txid, ref txouts)| { - let output_scripts = txouts.iter().map(|o| o.script_pubkey.clone()).collect(); - self.outputs_to_watch.insert(txid.clone(), output_scripts).is_none() + let idx_and_scripts = txouts.iter().map(|o| (o.0, o.1.script_pubkey.clone())).collect(); + self.outputs_to_watch.insert(txid.clone(), idx_and_scripts).is_none() }); + #[cfg(test)] + { + // If we see a transaction for which we registered outputs previously, + // make sure the registered scriptpubkey at the expected index match + // the actual transaction output one. We failed this case before #653. + for tx in &txn_matched { + if let Some(outputs) = self.get_outputs_to_watch().get(&tx.txid()) { + for idx_and_script in outputs.iter() { + assert!((idx_and_script.0 as usize) < tx.output.len()); + assert_eq!(tx.output[idx_and_script.0 as usize].script_pubkey, idx_and_script.1); + } + } + } + } watch_outputs } @@ -1813,8 +1937,19 @@ impl ChannelMonitor { fn spends_watched_output(&self, tx: &Transaction) -> bool { for input in tx.input.iter() { if let Some(outputs) = self.get_outputs_to_watch().get(&input.previous_output.txid) { - for (idx, _script_pubkey) in outputs.iter().enumerate() { - if idx == input.previous_output.vout as usize { + for (idx, _script_pubkey) in outputs.iter() { + if *idx == input.previous_output.vout { + #[cfg(test)] + { + // If the expected script is a known type, check that the witness + // appears to be spending the correct type (ie that the match would + // actually succeed in BIP 158/159-style filters). + if _script_pubkey.is_v0_p2wsh() { + assert_eq!(&bitcoin::Address::p2wsh(&Script::from(input.witness.last().unwrap().clone()), bitcoin::Network::Bitcoin).script_pubkey(), _script_pubkey); + } else if _script_pubkey.is_v0_p2wpkh() { + assert_eq!(&bitcoin::Address::p2wpkh(&bitcoin::PublicKey::from_slice(&input.witness.last().unwrap()).unwrap(), bitcoin::Network::Bitcoin).unwrap().script_pubkey(), _script_pubkey); + } else { panic!(); } + } return true; } } @@ -2092,6 +2227,61 @@ impl ChannelMonitor { } } +/// `Persist` defines behavior for persisting channel monitors: this could mean +/// writing once to disk, and/or uploading to one or more backup services. +/// +/// Note that for every new monitor, you **must** persist the new `ChannelMonitor` +/// to disk/backups. And, on every update, you **must** persist either the +/// `ChannelMonitorUpdate` or the updated monitor itself. Otherwise, there is risk +/// of situations such as revoking a transaction, then crashing before this +/// revocation can be persisted, then unintentionally broadcasting a revoked +/// transaction and losing money. This is a risk because previous channel states +/// are toxic, so it's important that whatever channel state is persisted is +/// kept up-to-date. +pub trait Persist: Send + Sync { + /// Persist a new channel's data. The data can be stored any way you want, but + /// the identifier provided by Rust-Lightning is the channel's outpoint (and + /// it is up to you to maintain a correct mapping between the outpoint and the + /// stored channel data). Note that you **must** persist every new monitor to + /// disk. See the `Persist` trait documentation for more details. + /// + /// See [`ChannelMonitor::serialize_for_disk`] for writing out a `ChannelMonitor`, + /// and [`ChannelMonitorUpdateErr`] for requirements when returning errors. + /// + /// [`ChannelMonitor::serialize_for_disk`]: struct.ChannelMonitor.html#method.serialize_for_disk + /// [`ChannelMonitorUpdateErr`]: enum.ChannelMonitorUpdateErr.html + fn persist_new_channel(&self, id: OutPoint, data: &ChannelMonitor) -> Result<(), ChannelMonitorUpdateErr>; + + /// Update one channel's data. The provided `ChannelMonitor` has already + /// applied the given update. + /// + /// Note that on every update, you **must** persist either the + /// `ChannelMonitorUpdate` or the updated monitor itself to disk/backups. See + /// the `Persist` trait documentation for more details. + /// + /// If an implementer chooses to persist the updates only, they need to make + /// sure that all the updates are applied to the `ChannelMonitors` *before* + /// the set of channel monitors is given to the `ChannelManager` + /// deserialization routine. See [`ChannelMonitor::update_monitor`] for + /// applying a monitor update to a monitor. If full `ChannelMonitors` are + /// persisted, then there is no need to persist individual updates. + /// + /// Note that there could be a performance tradeoff between persisting complete + /// channel monitors on every update vs. persisting only updates and applying + /// them in batches. The size of each monitor grows `O(number of state updates)` + /// whereas updates are small and `O(1)`. + /// + /// See [`ChannelMonitor::serialize_for_disk`] for writing out a `ChannelMonitor`, + /// [`ChannelMonitorUpdate::write`] for writing out an update, and + /// [`ChannelMonitorUpdateErr`] for requirements when returning errors. + /// + /// [`ChannelMonitor::update_monitor`]: struct.ChannelMonitor.html#impl-1 + /// [`ChannelMonitor::serialize_for_disk`]: struct.ChannelMonitor.html#method.serialize_for_disk + /// [`ChannelMonitorUpdate::write`]: struct.ChannelMonitorUpdate.html#method.write + /// [`ChannelMonitorUpdateErr`]: enum.ChannelMonitorUpdateErr.html + fn update_persisted_channel(&self, id: OutPoint, update: &ChannelMonitorUpdate, data: &ChannelMonitor) -> Result<(), ChannelMonitorUpdateErr>; +} + const MAX_ALLOC_SIZE: usize = 64*1024; impl Readable for (BlockHash, ChannelMonitor) { @@ -2316,13 +2506,13 @@ impl Readable for (BlockHash, ChannelMonitor } let outputs_to_watch_len: u64 = Readable::read(reader)?; - let mut outputs_to_watch = HashMap::with_capacity(cmp::min(outputs_to_watch_len as usize, MAX_ALLOC_SIZE / (mem::size_of::() + mem::size_of::>()))); + let mut outputs_to_watch = HashMap::with_capacity(cmp::min(outputs_to_watch_len as usize, MAX_ALLOC_SIZE / (mem::size_of::() + mem::size_of::() + mem::size_of::>()))); for _ in 0..outputs_to_watch_len { let txid = Readable::read(reader)?; let outputs_len: u64 = Readable::read(reader)?; - let mut outputs = Vec::with_capacity(cmp::min(outputs_len as usize, MAX_ALLOC_SIZE / mem::size_of::