X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fchain%2Fchannelmonitor.rs;h=bee2bfd012e0ff3efc628b4954883804f5d6a0c7;hb=21804de70c3ff37970ead98e5e72c4fccd14b1fb;hp=79d589c6521b0d0268d8a897d6950cdd97db0a96;hpb=a257906743d528c32c862b053b652d4b728aa990;p=rust-lightning diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 79d589c6..bee2bfd0 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -332,14 +332,15 @@ impl Readable for CounterpartyCommitmentParameters { } } -/// An entry for an [`OnchainEvent`], stating the block height when the event was observed and the -/// transaction causing it. +/// An entry for an [`OnchainEvent`], stating the block height and hash when the event was +/// observed, as well as the transaction causing it. /// /// Used to determine when the on-chain event can be considered safe from a chain reorganization. #[derive(PartialEq, Eq)] struct OnchainEventEntry { txid: Txid, height: u32, + block_hash: Option, // Added as optional, will be filled in for any entry generated on 0.0.113 or after event: OnchainEvent, transaction: Option, // Added as optional, but always filled in, in LDK 0.0.110 } @@ -440,6 +441,7 @@ impl Writeable for OnchainEventEntry { (0, self.txid, required), (1, self.transaction, option), (2, self.height, required), + (3, self.block_hash, option), (4, self.event, required), }); Ok(()) @@ -450,16 +452,18 @@ impl MaybeReadable for OnchainEventEntry { fn read(reader: &mut R) -> Result, DecodeError> { let mut txid = Txid::all_zeros(); let mut transaction = None; + let mut block_hash = None; let mut height = 0; let mut event = None; read_tlv_fields!(reader, { (0, txid, required), (1, transaction, option), (2, height, required), + (3, block_hash, option), (4, event, ignorable), }); if let Some(ev) = event { - Ok(Some(Self { txid, transaction, height, event: ev })) + Ok(Some(Self { txid, transaction, height, block_hash, event: ev })) } else { Ok(None) } @@ -822,6 +826,13 @@ pub(crate) struct ChannelMonitorImpl { /// spending CSV for revocable outputs). htlcs_resolved_on_chain: Vec, + /// The set of `SpendableOutput` events which we have already passed upstream to be claimed. + /// These are tracked explicitly to ensure that we don't generate the same events redundantly + /// if users duplicatively confirm old transactions. Specifically for transactions claiming a + /// revoked remote outpoint we otherwise have no tracking at all once they've reached + /// [`ANTI_REORG_DELAY`], so we have to track them here. + spendable_txids_confirmed: Vec, + // We simply modify best_block in Channel's block_connected so that serialization is // consistent but hopefully the users' copy handles block_connected in a consistent way. // (we do *not*, however, update them in update_monitor to ensure any local user copies keep @@ -1067,6 +1078,7 @@ impl Writeable for ChannelMonitorImpl { (7, self.funding_spend_seen, required), (9, self.counterparty_node_id, option), (11, self.confirmed_commitment_tx_counterparty_output, option), + (13, self.spendable_txids_confirmed, vec_type), }); Ok(()) @@ -1175,6 +1187,7 @@ impl ChannelMonitor { funding_spend_confirmed: None, confirmed_commitment_tx_counterparty_output: None, htlcs_resolved_on_chain: Vec::new(), + spendable_txids_confirmed: Vec::new(), best_block, counterparty_node_id: Some(counterparty_node_id), @@ -1305,9 +1318,11 @@ impl ChannelMonitor { /// Gets the list of pending events which were generated by previous actions, clearing the list /// in the process. /// - /// This is called by ChainMonitor::get_and_clear_pending_events() and is equivalent to - /// EventsProvider::get_and_clear_pending_events() except that it requires &mut self as we do - /// no internal locking in ChannelMonitors. + /// This is called by the [`EventsProvider::process_pending_events`] implementation for + /// [`ChainMonitor`]. + /// + /// [`EventsProvider::process_pending_events`]: crate::util::events::EventsProvider::process_pending_events + /// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor pub fn get_and_clear_pending_events(&self) -> Vec { self.inner.lock().unwrap().get_and_clear_pending_events() } @@ -1482,11 +1497,11 @@ impl ChannelMonitor { } /// Returns the set of txids that should be monitored for re-organization out of the chain. - pub fn get_relevant_txids(&self) -> Vec { + pub fn get_relevant_txids(&self) -> Vec<(Txid, Option)> { let inner = self.inner.lock().unwrap(); - let mut txids: Vec = inner.onchain_events_awaiting_threshold_conf + let mut txids: Vec<(Txid, Option)> = inner.onchain_events_awaiting_threshold_conf .iter() - .map(|entry| entry.txid) + .map(|entry| (entry.txid, entry.block_hash)) .chain(inner.onchain_tx_handler.get_relevant_txids().into_iter()) .collect(); txids.sort_unstable(); @@ -1939,7 +1954,7 @@ impl ChannelMonitor { /// been revoked yet, the previous one, we we will never "forget" to resolve an HTLC. macro_rules! fail_unbroadcast_htlcs { ($self: expr, $commitment_tx_type: expr, $commitment_txid_confirmed: expr, $commitment_tx_confirmed: expr, - $commitment_tx_conf_height: expr, $confirmed_htlcs_list: expr, $logger: expr) => { { + $commitment_tx_conf_height: expr, $commitment_tx_conf_hash: expr, $confirmed_htlcs_list: expr, $logger: expr) => { { debug_assert_eq!($commitment_tx_confirmed.txid(), $commitment_txid_confirmed); macro_rules! check_htlc_fails { @@ -1983,6 +1998,7 @@ macro_rules! fail_unbroadcast_htlcs { txid: $commitment_txid_confirmed, transaction: Some($commitment_tx_confirmed.clone()), height: $commitment_tx_conf_height, + block_hash: Some(*$commitment_tx_conf_hash), event: OnchainEvent::HTLCUpdate { source: (**source).clone(), payment_hash: htlc.payment_hash.clone(), @@ -2170,7 +2186,7 @@ impl ChannelMonitorImpl { macro_rules! claim_htlcs { ($commitment_number: expr, $txid: expr) => { let (htlc_claim_reqs, _) = self.get_counterparty_output_claim_info($commitment_number, $txid, None); - self.onchain_tx_handler.update_claims_view(&Vec::new(), htlc_claim_reqs, self.best_block.height(), self.best_block.height(), broadcaster, fee_estimator, logger); + self.onchain_tx_handler.update_claims_view_from_requests(htlc_claim_reqs, self.best_block.height(), self.best_block.height(), broadcaster, fee_estimator, logger); } } if let Some(txid) = self.current_counterparty_commitment_txid { @@ -2196,10 +2212,10 @@ impl ChannelMonitorImpl { // block. Even if not, its a reasonable metric for the bump criteria on the HTLC // transactions. let (claim_reqs, _) = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx, self.best_block.height()); - self.onchain_tx_handler.update_claims_view(&Vec::new(), claim_reqs, self.best_block.height(), self.best_block.height(), broadcaster, fee_estimator, logger); + self.onchain_tx_handler.update_claims_view_from_requests(claim_reqs, self.best_block.height(), self.best_block.height(), broadcaster, fee_estimator, logger); if let Some(ref tx) = self.prev_holder_signed_commitment_tx { let (claim_reqs, _) = self.get_broadcasted_holder_claims(&tx, self.best_block.height()); - self.onchain_tx_handler.update_claims_view(&Vec::new(), claim_reqs, self.best_block.height(), self.best_block.height(), broadcaster, fee_estimator, logger); + self.onchain_tx_handler.update_claims_view_from_requests(claim_reqs, self.best_block.height(), self.best_block.height(), broadcaster, fee_estimator, logger); } } } @@ -2286,8 +2302,8 @@ impl ChannelMonitorImpl { PackageSolvingData::HolderFundingOutput(funding_output), best_block_height, false, best_block_height, ); - self.onchain_tx_handler.update_claims_view( - &[], vec![commitment_package], best_block_height, best_block_height, + self.onchain_tx_handler.update_claims_view_from_requests( + vec![commitment_package], best_block_height, best_block_height, broadcaster, &bounded_fee_estimator, logger, ); } @@ -2401,7 +2417,7 @@ impl ChannelMonitorImpl { /// Returns packages to claim the revoked output(s), as well as additional outputs to watch and /// general information about the output that is to the counterparty in the commitment /// transaction. - fn check_spend_counterparty_transaction(&mut self, tx: &Transaction, height: u32, logger: &L) + fn check_spend_counterparty_transaction(&mut self, tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &L) -> (Vec, TransactionOutputs, CommitmentTxCounterpartyOutputInfo) where L::Target: Logger { // Most secp and related errors trying to create keys means we have no hope of constructing @@ -2472,13 +2488,13 @@ impl ChannelMonitorImpl { if let Some(per_commitment_data) = per_commitment_option { fail_unbroadcast_htlcs!(self, "revoked_counterparty", commitment_txid, tx, height, - per_commitment_data.iter().map(|(htlc, htlc_source)| + block_hash, per_commitment_data.iter().map(|(htlc, htlc_source)| (htlc, htlc_source.as_ref().map(|htlc_source| htlc_source.as_ref())) ), logger); } else { debug_assert!(false, "We should have per-commitment option for any recognized old commitment txn"); fail_unbroadcast_htlcs!(self, "revoked counterparty", commitment_txid, tx, height, - [].iter().map(|reference| *reference), logger); + block_hash, [].iter().map(|reference| *reference), logger); } } } else if let Some(per_commitment_data) = per_commitment_option { @@ -2495,7 +2511,7 @@ impl ChannelMonitorImpl { self.counterparty_commitment_txn_on_chain.insert(commitment_txid, commitment_number); log_info!(logger, "Got broadcast of non-revoked counterparty commitment transaction {}", commitment_txid); - fail_unbroadcast_htlcs!(self, "counterparty", commitment_txid, tx, height, + fail_unbroadcast_htlcs!(self, "counterparty", commitment_txid, tx, height, block_hash, per_commitment_data.iter().map(|(htlc, htlc_source)| (htlc, htlc_source.as_ref().map(|htlc_source| htlc_source.as_ref())) ), logger); @@ -2631,7 +2647,7 @@ impl ChannelMonitorImpl { (claimable_outpoints, Some((htlc_txid, outputs))) } - // Returns (1) `PackageTemplate`s that can be given to the OnChainTxHandler, so that the handler can + // Returns (1) `PackageTemplate`s that can be given to the OnchainTxHandler, so that the handler can // broadcast transactions claiming holder HTLC commitment outputs and (2) a holder revokable // script so we can detect whether a holder transaction has been seen on-chain. fn get_broadcasted_holder_claims(&self, holder_tx: &HolderSignedTx, conf_height: u32) -> (Vec, Option<(Script, PublicKey, PublicKey)>) { @@ -2676,7 +2692,7 @@ impl ChannelMonitorImpl { /// revoked using data in holder_claimable_outpoints. /// Should not be used if check_spend_revoked_transaction succeeds. /// Returns None unless the transaction is definitely one of our commitment transactions. - fn check_spend_holder_transaction(&mut self, tx: &Transaction, height: u32, logger: &L) -> Option<(Vec, TransactionOutputs)> where L::Target: Logger { + fn check_spend_holder_transaction(&mut self, tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &L) -> Option<(Vec, TransactionOutputs)> where L::Target: Logger { let commitment_txid = tx.txid(); let mut claim_requests = Vec::new(); let mut watch_outputs = Vec::new(); @@ -2699,7 +2715,7 @@ impl ChannelMonitorImpl { let mut to_watch = self.get_broadcasted_holder_watch_outputs(&self.current_holder_commitment_tx, tx); append_onchain_update!(res, to_watch); fail_unbroadcast_htlcs!(self, "latest holder", commitment_txid, tx, height, - self.current_holder_commitment_tx.htlc_outputs.iter() + block_hash, self.current_holder_commitment_tx.htlc_outputs.iter() .map(|(htlc, _, htlc_source)| (htlc, htlc_source.as_ref())), logger); } else if let &Some(ref holder_tx) = &self.prev_holder_signed_commitment_tx { if holder_tx.txid == commitment_txid { @@ -2708,7 +2724,7 @@ impl ChannelMonitorImpl { let res = self.get_broadcasted_holder_claims(holder_tx, height); let mut to_watch = self.get_broadcasted_holder_watch_outputs(holder_tx, tx); append_onchain_update!(res, to_watch); - fail_unbroadcast_htlcs!(self, "previous holder", commitment_txid, tx, height, + fail_unbroadcast_htlcs!(self, "previous holder", commitment_txid, tx, height, block_hash, holder_tx.htlc_outputs.iter().map(|(htlc, _, htlc_source)| (htlc, htlc_source.as_ref())), logger); } @@ -2816,7 +2832,7 @@ impl ChannelMonitorImpl { if height > self.best_block.height() { self.best_block = BestBlock::new(block_hash, height); - self.block_confirmed(height, vec![], vec![], vec![], &broadcaster, &fee_estimator, &logger) + self.block_confirmed(height, block_hash, vec![], vec![], vec![], &broadcaster, &fee_estimator, &logger) } else if block_hash != self.best_block.block_hash() { self.best_block = BestBlock::new(block_hash, height); self.onchain_events_awaiting_threshold_conf.retain(|ref entry| entry.height <= height); @@ -2853,7 +2869,37 @@ impl ChannelMonitorImpl { let mut watch_outputs = Vec::new(); let mut claimable_outpoints = Vec::new(); - for tx in &txn_matched { + 'tx_iter: for tx in &txn_matched { + let txid = tx.txid(); + // If a transaction has already been confirmed, ensure we don't bother processing it duplicatively. + if Some(txid) == self.funding_spend_confirmed { + log_debug!(logger, "Skipping redundant processing of funding-spend tx {} as it was previously confirmed", txid); + continue 'tx_iter; + } + for ev in self.onchain_events_awaiting_threshold_conf.iter() { + if ev.txid == txid { + if let Some(conf_hash) = ev.block_hash { + assert_eq!(header.block_hash(), conf_hash, + "Transaction {} was already confirmed and is being re-confirmed in a different block.\n\ + This indicates a severe bug in the transaction connection logic - a reorg should have been processed first!", ev.txid); + } + log_debug!(logger, "Skipping redundant processing of confirming tx {} as it was previously confirmed", txid); + continue 'tx_iter; + } + } + for htlc in self.htlcs_resolved_on_chain.iter() { + if Some(txid) == htlc.resolving_txid { + log_debug!(logger, "Skipping redundant processing of HTLC resolution tx {} as it was previously confirmed", txid); + continue 'tx_iter; + } + } + for spendable_txid in self.spendable_txids_confirmed.iter() { + if txid == *spendable_txid { + log_debug!(logger, "Skipping redundant processing of spendable tx {} as it was previously confirmed", txid); + continue 'tx_iter; + } + } + if tx.input.len() == 1 { // Assuming our keys were not leaked (in which case we're screwed no matter what), // commitment transactions and HTLC transactions will all only ever have one input, @@ -2863,19 +2909,19 @@ impl ChannelMonitorImpl { if prevout.txid == self.funding_info.0.txid && prevout.vout == self.funding_info.0.index as u32 { let mut balance_spendable_csv = None; log_info!(logger, "Channel {} closed by funding output spend in txid {}.", - log_bytes!(self.funding_info.0.to_channel_id()), tx.txid()); + log_bytes!(self.funding_info.0.to_channel_id()), txid); self.funding_spend_seen = true; let mut commitment_tx_to_counterparty_output = None; if (tx.input[0].sequence.0 >> 8*3) as u8 == 0x80 && (tx.lock_time.0 >> 8*3) as u8 == 0x20 { let (mut new_outpoints, new_outputs, counterparty_output_idx_sats) = - self.check_spend_counterparty_transaction(&tx, height, &logger); + self.check_spend_counterparty_transaction(&tx, height, &block_hash, &logger); commitment_tx_to_counterparty_output = counterparty_output_idx_sats; if !new_outputs.1.is_empty() { watch_outputs.push(new_outputs); } claimable_outpoints.append(&mut new_outpoints); if new_outpoints.is_empty() { - if let Some((mut new_outpoints, new_outputs)) = self.check_spend_holder_transaction(&tx, height, &logger) { + if let Some((mut new_outpoints, new_outputs)) = self.check_spend_holder_transaction(&tx, height, &block_hash, &logger) { debug_assert!(commitment_tx_to_counterparty_output.is_none(), "A commitment transaction matched as both a counterparty and local commitment tx?"); if !new_outputs.1.is_empty() { @@ -2886,11 +2932,11 @@ impl ChannelMonitorImpl { } } } - let txid = tx.txid(); self.onchain_events_awaiting_threshold_conf.push(OnchainEventEntry { txid, transaction: Some((*tx).clone()), height, + block_hash: Some(block_hash), event: OnchainEvent::FundingSpendConfirmation { on_local_output_csv: balance_spendable_csv, commitment_tx_to_counterparty_output, @@ -2909,28 +2955,30 @@ impl ChannelMonitorImpl { // While all commitment/HTLC-Success/HTLC-Timeout transactions have one input, HTLCs // can also be resolved in a few other ways which can have more than one output. Thus, // we call is_resolving_htlc_output here outside of the tx.input.len() == 1 check. - self.is_resolving_htlc_output(&tx, height, &logger); + self.is_resolving_htlc_output(&tx, height, &block_hash, &logger); - self.is_paying_spendable_output(&tx, height, &logger); + self.is_paying_spendable_output(&tx, height, &block_hash, &logger); } if height > self.best_block.height() { self.best_block = BestBlock::new(block_hash, height); } - self.block_confirmed(height, txn_matched, watch_outputs, claimable_outpoints, &broadcaster, &fee_estimator, &logger) + self.block_confirmed(height, block_hash, txn_matched, watch_outputs, claimable_outpoints, &broadcaster, &fee_estimator, &logger) } /// Update state for new block(s)/transaction(s) confirmed. Note that the caller must update /// `self.best_block` before calling if a new best blockchain tip is available. More /// concretely, `self.best_block` must never be at a lower height than `conf_height`, avoiding - /// complexity especially in `OnchainTx::update_claims_view`. + /// complexity especially in + /// `OnchainTx::update_claims_view_from_requests`/`OnchainTx::update_claims_view_from_matched_txn`. /// /// `conf_height` should be set to the height at which any new transaction(s)/block(s) were /// confirmed at, even if it is not the current best height. fn block_confirmed( &mut self, conf_height: u32, + conf_hash: BlockHash, txn_matched: Vec<&Transaction>, mut watch_outputs: Vec, mut claimable_outpoints: Vec, @@ -3032,6 +3080,7 @@ impl ChannelMonitorImpl { self.pending_events.push(Event::SpendableOutputs { outputs: vec![descriptor] }); + self.spendable_txids_confirmed.push(entry.txid); }, OnchainEvent::HTLCSpendConfirmation { commitment_tx_output_idx, preimage, .. } => { self.htlcs_resolved_on_chain.push(IrrevocablyResolvedHTLC { @@ -3046,7 +3095,8 @@ impl ChannelMonitorImpl { } } - self.onchain_tx_handler.update_claims_view(&txn_matched, claimable_outpoints, conf_height, self.best_block.height(), broadcaster, fee_estimator, logger); + self.onchain_tx_handler.update_claims_view_from_requests(claimable_outpoints, conf_height, self.best_block.height(), broadcaster, fee_estimator, logger); + self.onchain_tx_handler.update_claims_view_from_matched_txn(&txn_matched, conf_height, conf_hash, self.best_block.height(), broadcaster, fee_estimator, logger); // Determine new outputs to watch by comparing against previously known outputs to watch, // updating the latter in the process. @@ -3235,7 +3285,7 @@ impl ChannelMonitorImpl { /// Check if any transaction broadcasted is resolving HTLC output by a success or timeout on a holder /// or counterparty commitment tx, if so send back the source, preimage if found and payment_hash of resolved HTLC - fn is_resolving_htlc_output(&mut self, tx: &Transaction, height: u32, logger: &L) where L::Target: Logger { + fn is_resolving_htlc_output(&mut self, tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &L) where L::Target: Logger { 'outer_loop: for input in &tx.input { let mut payment_data = None; let htlc_claim = HTLCClaim::from_witness(&input.witness); @@ -3320,7 +3370,7 @@ impl ChannelMonitorImpl { log_claim!($tx_info, $holder_tx, htlc_output, false); let outbound_htlc = $holder_tx == htlc_output.offered; self.onchain_events_awaiting_threshold_conf.push(OnchainEventEntry { - txid: tx.txid(), height, transaction: Some(tx.clone()), + txid: tx.txid(), height, block_hash: Some(*block_hash), transaction: Some(tx.clone()), event: OnchainEvent::HTLCSpendConfirmation { commitment_tx_output_idx: input.previous_output.vout, preimage: if accepted_preimage_claim || offered_preimage_claim { @@ -3364,6 +3414,7 @@ impl ChannelMonitorImpl { self.onchain_events_awaiting_threshold_conf.push(OnchainEventEntry { txid: tx.txid(), height, + block_hash: Some(*block_hash), transaction: Some(tx.clone()), event: OnchainEvent::HTLCSpendConfirmation { commitment_tx_output_idx: input.previous_output.vout, @@ -3387,6 +3438,7 @@ impl ChannelMonitorImpl { txid: tx.txid(), transaction: Some(tx.clone()), height, + block_hash: Some(*block_hash), event: OnchainEvent::HTLCSpendConfirmation { commitment_tx_output_idx: input.previous_output.vout, preimage: Some(payment_preimage), @@ -3414,6 +3466,7 @@ impl ChannelMonitorImpl { txid: tx.txid(), transaction: Some(tx.clone()), height, + block_hash: Some(*block_hash), event: OnchainEvent::HTLCUpdate { source, payment_hash, htlc_value_satoshis: Some(amount_msat / 1000), @@ -3428,7 +3481,7 @@ impl ChannelMonitorImpl { } /// Check if any transaction broadcasted is paying fund back to some address we can assume to own - fn is_paying_spendable_output(&mut self, tx: &Transaction, height: u32, logger: &L) where L::Target: Logger { + fn is_paying_spendable_output(&mut self, tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &L) where L::Target: Logger { let mut spendable_output = None; for (i, outp) in tx.output.iter().enumerate() { // There is max one spendable output for any channel tx, including ones generated by us if i > ::core::u16::MAX as usize { @@ -3488,6 +3541,7 @@ impl ChannelMonitorImpl { txid: tx.txid(), transaction: Some(tx.clone()), height, + block_hash: Some(*block_hash), event: OnchainEvent::MaturingOutput { descriptor: spendable_output.clone() }, }; log_info!(logger, "Received spendable output {}, spendable at height {}", log_spendable!(spendable_output), entry.confirmation_threshold()); @@ -3529,15 +3583,15 @@ where self.0.best_block_updated(header, height, &*self.1, &*self.2, &*self.3); } - fn get_relevant_txids(&self) -> Vec { + fn get_relevant_txids(&self) -> Vec<(Txid, Option)> { self.0.get_relevant_txids() } } const MAX_ALLOC_SIZE: usize = 64*1024; -impl<'a, Signer: Sign, K: KeysInterface> ReadableArgs<&'a K> - for (BlockHash, ChannelMonitor) { +impl<'a, K: KeysInterface> ReadableArgs<&'a K> + for (BlockHash, ChannelMonitor) { fn read(reader: &mut R, keys_manager: &'a K) -> Result { macro_rules! unwrap_obj { ($key: expr) => { @@ -3721,7 +3775,7 @@ impl<'a, Signer: Sign, K: KeysInterface> ReadableArgs<&'a K> return Err(DecodeError::InvalidValue); } } - let onchain_tx_handler: OnchainTxHandler = ReadableArgs::read(reader, keys_manager)?; + let onchain_tx_handler: OnchainTxHandler = ReadableArgs::read(reader, keys_manager)?; let lockdown_from_offchain = Readable::read(reader)?; let holder_tx_signed = Readable::read(reader)?; @@ -3748,6 +3802,7 @@ impl<'a, Signer: Sign, K: KeysInterface> ReadableArgs<&'a K> let mut funding_spend_seen = Some(false); let mut counterparty_node_id = None; let mut confirmed_commitment_tx_counterparty_output = None; + let mut spendable_txids_confirmed = Some(Vec::new()); read_tlv_fields!(reader, { (1, funding_spend_confirmed, option), (3, htlcs_resolved_on_chain, vec_type), @@ -3755,6 +3810,7 @@ impl<'a, Signer: Sign, K: KeysInterface> ReadableArgs<&'a K> (7, funding_spend_seen, option), (9, counterparty_node_id, option), (11, confirmed_commitment_tx_counterparty_output, option), + (13, spendable_txids_confirmed, vec_type), }); let mut secp_ctx = Secp256k1::new(); @@ -3807,6 +3863,7 @@ impl<'a, Signer: Sign, K: KeysInterface> ReadableArgs<&'a K> funding_spend_confirmed, confirmed_commitment_tx_counterparty_output, htlcs_resolved_on_chain: htlcs_resolved_on_chain.unwrap(), + spendable_txids_confirmed: spendable_txids_confirmed.unwrap(), best_block, counterparty_node_id, @@ -3846,7 +3903,7 @@ mod tests { use crate::ln::{PaymentPreimage, PaymentHash}; use crate::ln::chan_utils; use crate::ln::chan_utils::{HTLCOutputInCommitment, ChannelPublicKeys, ChannelTransactionParameters, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters}; - use crate::ln::channelmanager::{self, PaymentSendFailure}; + use crate::ln::channelmanager::{self, PaymentSendFailure, PaymentId}; use crate::ln::functional_test_utils::*; use crate::ln::script::ShutdownScript; use crate::util::errors::APIError; @@ -3911,7 +3968,7 @@ mod tests { // If the ChannelManager tries to update the channel, however, the ChainMonitor will pass // the update through to the ChannelMonitor which will refuse it (as the channel is closed). let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 100_000); - unwrap_send_err!(nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)), + unwrap_send_err!(nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)), true, APIError::ChannelUnavailable { ref err }, assert!(err.contains("ChannelMonitor storage failure"))); check_added_monitors!(nodes[1], 2); // After the failure we generate a close-channel monitor update