X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fchain%2Fchannelmonitor.rs;h=a48f169a4d5e605b330f49896bc191779dd389d7;hb=a9d49aee5f1e24281cedd8ac8177d700359b86d0;hp=66ab16911146e1e2c7cd09a8c0b3f3b0b6ac4664;hpb=b72f6b1b84fee84f1c34a83fd9097fcc4418fa63;p=rust-lightning diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 66ab1691..a48f169a 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -42,7 +42,7 @@ use crate::chain; use crate::chain::{BestBlock, WatchedOutput}; use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator, LowerBoundedFeeEstimator}; use crate::chain::transaction::{OutPoint, TransactionData}; -use crate::chain::keysinterface::{SpendableOutputDescriptor, StaticPaymentOutputDescriptor, DelayedPaymentOutputDescriptor, WriteableEcdsaChannelSigner, SignerProvider, EntropySource}; +use crate::sign::{SpendableOutputDescriptor, StaticPaymentOutputDescriptor, DelayedPaymentOutputDescriptor, WriteableEcdsaChannelSigner, SignerProvider, EntropySource}; #[cfg(anchors)] use crate::chain::onchaintx::ClaimEvent; use crate::chain::onchaintx::OnchainTxHandler; @@ -51,9 +51,9 @@ use crate::chain::Filter; use crate::util::logger::Logger; use crate::util::ser::{Readable, ReadableArgs, RequiredWrapper, MaybeReadable, UpgradableRequired, Writer, Writeable, U48}; use crate::util::byte_utils; -use crate::util::events::Event; +use crate::events::Event; #[cfg(anchors)] -use crate::util::events::{AnchorDescriptor, HTLCDescriptor, BumpTransactionEvent}; +use crate::events::bump_transaction::{AnchorDescriptor, HTLCDescriptor, BumpTransactionEvent}; use crate::prelude::*; use core::{cmp, mem}; @@ -69,34 +69,36 @@ use crate::sync::{Mutex, LockTestExt}; /// much smaller than a full [`ChannelMonitor`]. However, for large single commitment transaction /// updates (e.g. ones during which there are hundreds of HTLCs pending on the commitment /// transaction), a single update may reach upwards of 1 MiB in serialized size. -#[cfg_attr(any(test, fuzzing, feature = "_test_utils"), derive(PartialEq, Eq))] -#[derive(Clone)] +#[derive(Clone, PartialEq, Eq)] #[must_use] pub struct ChannelMonitorUpdate { pub(crate) updates: Vec, /// The sequence number of this update. Updates *must* be replayed in-order according to this /// sequence number (and updates may panic if they are not). The update_id values are strictly - /// increasing and increase by one for each new update, with one exception specified below. + /// increasing and increase by one for each new update, with two exceptions specified below. /// /// This sequence number is also used to track up to which points updates which returned /// [`ChannelMonitorUpdateStatus::InProgress`] have been applied to all copies of a given /// ChannelMonitor when ChannelManager::channel_monitor_updated is called. /// - /// The only instance where update_id values are not strictly increasing is the case where we - /// allow post-force-close updates with a special update ID of [`CLOSED_CHANNEL_UPDATE_ID`]. See - /// its docs for more details. + /// The only instances we allow where update_id values are not strictly increasing have a + /// special update ID of [`CLOSED_CHANNEL_UPDATE_ID`]. This update ID is used for updates that + /// will force close the channel by broadcasting the latest commitment transaction or + /// special post-force-close updates, like providing preimages necessary to claim outputs on the + /// broadcast commitment transaction. See its docs for more details. /// /// [`ChannelMonitorUpdateStatus::InProgress`]: super::ChannelMonitorUpdateStatus::InProgress pub update_id: u64, } -/// If: -/// (1) a channel has been force closed and -/// (2) we receive a preimage from a forward link that allows us to spend an HTLC output on -/// this channel's (the backward link's) broadcasted commitment transaction -/// then we allow the `ChannelManager` to send a `ChannelMonitorUpdate` with this update ID, -/// with the update providing said payment preimage. No other update types are allowed after -/// force-close. +/// The update ID used for a [`ChannelMonitorUpdate`] that is either: +/// +/// (1) attempting to force close the channel by broadcasting our latest commitment transaction or +/// (2) providing a preimage (after the channel has been force closed) from a forward link that +/// allows us to spend an HTLC output on this channel's (the backward link's) broadcasted +/// commitment transaction. +/// +/// No other [`ChannelMonitorUpdate`]s are allowed after force-close. pub const CLOSED_CHANNEL_UPDATE_ID: u64 = core::u64::MAX; impl Writeable for ChannelMonitorUpdate { @@ -488,8 +490,7 @@ impl_writeable_tlv_based_enum_upgradable!(OnchainEvent, ); -#[cfg_attr(any(test, fuzzing, feature = "_test_utils"), derive(PartialEq, Eq))] -#[derive(Clone)] +#[derive(Clone, PartialEq, Eq)] pub(crate) enum ChannelMonitorUpdateStep { LatestHolderCommitmentTXInfo { commitment_tx: HolderCommitmentTransaction, @@ -605,6 +606,10 @@ pub enum Balance { /// The height at which the counterparty may be able to claim the balance if we have not /// done so. timeout_height: u32, + /// The payment hash that locks this HTLC. + payment_hash: PaymentHash, + /// The preimage that can be used to claim this HTLC. + payment_preimage: PaymentPreimage, }, /// HTLCs which we sent to our counterparty which are claimable after a timeout (less on-chain /// fees) if the counterparty does not know the preimage for the HTLCs. These are somewhat @@ -616,6 +621,8 @@ pub enum Balance { /// The height at which we will be able to claim the balance if our counterparty has not /// done so. claimable_height: u32, + /// The payment hash whose preimage our counterparty needs to claim this HTLC. + payment_hash: PaymentHash, }, /// HTLCs which we received from our counterparty which are claimable with a preimage which we /// do not currently have. This will only be claimable if we receive the preimage from the node @@ -627,6 +634,8 @@ pub enum Balance { /// The height at which our counterparty will be able to claim the balance if we have not /// yet received the preimage and claimed it ourselves. expiry_height: u32, + /// The payment hash whose preimage we need to claim this HTLC. + payment_hash: PaymentHash, }, /// The channel has been closed, and our counterparty broadcasted a revoked commitment /// transaction. @@ -1206,17 +1215,6 @@ impl ChannelMonitor { payment_hash, payment_preimage, broadcaster, fee_estimator, logger) } - pub(crate) fn broadcast_latest_holder_commitment_txn( - &self, - broadcaster: &B, - logger: &L, - ) where - B::Target: BroadcasterInterface, - L::Target: Logger, - { - self.inner.lock().unwrap().broadcast_latest_holder_commitment_txn(broadcaster, logger); - } - /// Updates a ChannelMonitor on the basis of some new information provided by the Channel /// itself. /// @@ -1284,7 +1282,7 @@ impl ChannelMonitor { /// This is called by the [`EventsProvider::process_pending_events`] implementation for /// [`ChainMonitor`]. /// - /// [`EventsProvider::process_pending_events`]: crate::util::events::EventsProvider::process_pending_events + /// [`EventsProvider::process_pending_events`]: crate::events::EventsProvider::process_pending_events /// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor pub fn get_and_clear_pending_events(&self) -> Vec { self.inner.lock().unwrap().get_and_clear_pending_events() @@ -1477,6 +1475,27 @@ impl ChannelMonitor { pub fn current_best_block(&self) -> BestBlock { self.inner.lock().unwrap().best_block.clone() } + + /// Triggers rebroadcasts/fee-bumps of pending claims from a force-closed channel. This is + /// crucial in preventing certain classes of pinning attacks, detecting substantial mempool + /// feerate changes between blocks, and ensuring reliability if broadcasting fails. We recommend + /// invoking this every 30 seconds, or lower if running in an environment with spotty + /// connections, like on mobile. + pub fn rebroadcast_pending_claims( + &self, broadcaster: B, fee_estimator: F, logger: L, + ) + where + B::Target: BroadcasterInterface, + F::Target: FeeEstimator, + L::Target: Logger, + { + let fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator); + let mut inner = self.inner.lock().unwrap(); + let current_height = inner.best_block.height; + inner.onchain_tx_handler.rebroadcast_pending_claims( + current_height, &broadcaster, &fee_estimator, &logger, + ); + } } impl ChannelMonitorImpl { @@ -1612,9 +1631,10 @@ impl ChannelMonitorImpl { return Some(Balance::MaybeTimeoutClaimableHTLC { claimable_amount_satoshis: htlc.amount_msat / 1000, claimable_height: htlc.cltv_expiry, + payment_hash: htlc.payment_hash, }); } - } else if self.payment_preimages.get(&htlc.payment_hash).is_some() { + } else if let Some(payment_preimage) = self.payment_preimages.get(&htlc.payment_hash) { // Otherwise (the payment was inbound), only expose it as claimable if // we know the preimage. // Note that if there is a pending claim, but it did not use the @@ -1630,12 +1650,15 @@ impl ChannelMonitorImpl { return Some(Balance::ContentiousClaimable { claimable_amount_satoshis: htlc.amount_msat / 1000, timeout_height: htlc.cltv_expiry, + payment_hash: htlc.payment_hash, + payment_preimage: *payment_preimage, }); } } else if htlc_resolved.is_none() { return Some(Balance::MaybePreimageClaimableHTLC { claimable_amount_satoshis: htlc.amount_msat / 1000, expiry_height: htlc.cltv_expiry, + payment_hash: htlc.payment_hash, }); } None @@ -1797,6 +1820,7 @@ impl ChannelMonitor { res.push(Balance::MaybeTimeoutClaimableHTLC { claimable_amount_satoshis: htlc.amount_msat / 1000, claimable_height: htlc.cltv_expiry, + payment_hash: htlc.payment_hash, }); } else if us.payment_preimages.get(&htlc.payment_hash).is_some() { claimable_inbound_htlc_value_sat += htlc.amount_msat / 1000; @@ -1806,6 +1830,7 @@ impl ChannelMonitor { res.push(Balance::MaybePreimageClaimableHTLC { claimable_amount_satoshis: htlc.amount_msat / 1000, expiry_height: htlc.cltv_expiry, + payment_hash: htlc.payment_hash, }); } } @@ -2302,10 +2327,13 @@ impl ChannelMonitorImpl { where B::Target: BroadcasterInterface, L::Target: Logger, { - for tx in self.get_latest_holder_commitment_txn(logger).iter() { + let commit_txs = self.get_latest_holder_commitment_txn(logger); + let mut txs = vec![]; + for tx in commit_txs.iter() { log_info!(logger, "Broadcasting local {}", log_tx!(tx)); - broadcaster.broadcast_transaction(tx); + txs.push(tx); } + broadcaster.broadcast_transactions(&txs); self.pending_monitor_events.push(MonitorEvent::CommitmentTxConfirmed(self.funding_info.0)); } @@ -2314,16 +2342,32 @@ impl ChannelMonitorImpl { F::Target: FeeEstimator, L::Target: Logger, { - log_info!(logger, "Applying update to monitor {}, bringing update_id from {} to {} with {} changes.", - log_funding_info!(self), self.latest_update_id, updates.update_id, updates.updates.len()); - // ChannelMonitor updates may be applied after force close if we receive a - // preimage for a broadcasted commitment transaction HTLC output that we'd - // like to claim on-chain. If this is the case, we no longer have guaranteed - // access to the monitor's update ID, so we use a sentinel value instead. + if self.latest_update_id == CLOSED_CHANNEL_UPDATE_ID && updates.update_id == CLOSED_CHANNEL_UPDATE_ID { + log_info!(logger, "Applying post-force-closed update to monitor {} with {} change(s).", + log_funding_info!(self), updates.updates.len()); + } else if updates.update_id == CLOSED_CHANNEL_UPDATE_ID { + log_info!(logger, "Applying force close update to monitor {} with {} change(s).", + log_funding_info!(self), updates.updates.len()); + } else { + log_info!(logger, "Applying update to monitor {}, bringing update_id from {} to {} with {} change(s).", + log_funding_info!(self), self.latest_update_id, updates.update_id, updates.updates.len()); + } + // ChannelMonitor updates may be applied after force close if we receive a preimage for a + // broadcasted commitment transaction HTLC output that we'd like to claim on-chain. If this + // is the case, we no longer have guaranteed access to the monitor's update ID, so we use a + // sentinel value instead. + // + // The `ChannelManager` may also queue redundant `ChannelForceClosed` updates if it still + // thinks the channel needs to have its commitment transaction broadcast, so we'll allow + // them as well. if updates.update_id == CLOSED_CHANNEL_UPDATE_ID { assert_eq!(updates.updates.len(), 1); match updates.updates[0] { - ChannelMonitorUpdateStep::PaymentPreimage { .. } => {}, + ChannelMonitorUpdateStep::ChannelForceClosed { .. } => {}, + // We should have already seen a `ChannelForceClosed` update if we're trying to + // provide a preimage at this point. + ChannelMonitorUpdateStep::PaymentPreimage { .. } => + debug_assert_eq!(self.latest_update_id, CLOSED_CHANNEL_UPDATE_ID), _ => { log_error!(logger, "Attempted to apply post-force-close ChannelMonitorUpdate of type {}", updates.updates[0].variant_name()); panic!("Attempted to apply post-force-close ChannelMonitorUpdate that wasn't providing a payment preimage"); @@ -2374,6 +2418,7 @@ impl ChannelMonitorImpl { _ => false, }).is_some(); if detected_funding_spend { + log_trace!(logger, "Avoiding commitment broadcast, already detected confirmed spend onchain"); continue; } self.broadcast_latest_holder_commitment_txn(broadcaster, logger); @@ -2389,7 +2434,7 @@ impl ChannelMonitorImpl { let commitment_package = PackageTemplate::build_package( self.funding_info.0.txid.clone(), self.funding_info.0.index as u32, PackageSolvingData::HolderFundingOutput(funding_output), - best_block_height, false, best_block_height, + best_block_height, best_block_height ); self.onchain_tx_handler.update_claims_view_from_requests( vec![commitment_package], best_block_height, best_block_height, @@ -2415,9 +2460,18 @@ impl ChannelMonitorImpl { }, } } + + // If the updates succeeded and we were in an already closed channel state, then there's no + // need to refuse any updates we expect to receive afer seeing a confirmed commitment. + if ret.is_ok() && updates.update_id == CLOSED_CHANNEL_UPDATE_ID && self.latest_update_id == updates.update_id { + return Ok(()); + } + self.latest_update_id = updates.update_id; - if ret.is_ok() && self.funding_spend_seen { + // Refuse updates after we've detected a spend onchain, but only if we haven't processed a + // force closed monitor update yet. + if ret.is_ok() && self.funding_spend_seen && self.latest_update_id != CLOSED_CHANNEL_UPDATE_ID { log_error!(logger, "Refusing Channel Monitor Update as counterparty attempted to update commitment after funding was spent"); Err(()) } else { ret } @@ -2477,7 +2531,7 @@ impl ChannelMonitorImpl { })); }, ClaimEvent::BumpHTLC { - target_feerate_sat_per_1000_weight, htlcs, + target_feerate_sat_per_1000_weight, htlcs, tx_lock_time, } => { let mut htlc_descriptors = Vec::with_capacity(htlcs.len()); for htlc in htlcs { @@ -2495,6 +2549,7 @@ impl ChannelMonitorImpl { ret.push(Event::BumpTransaction(BumpTransactionEvent::HTLCResolution { target_feerate_sat_per_1000_weight, htlc_descriptors, + tx_lock_time, })); } } @@ -2562,8 +2617,8 @@ impl ChannelMonitorImpl { // First, process non-htlc outputs (to_holder & to_counterparty) for (idx, outp) in tx.output.iter().enumerate() { if outp.script_pubkey == revokeable_p2wsh { - let revk_outp = RevokedOutput::build(per_commitment_point, self.counterparty_commitment_params.counterparty_delayed_payment_base_key, self.counterparty_commitment_params.counterparty_htlc_base_key, per_commitment_key, outp.value, self.counterparty_commitment_params.on_counterparty_tx_csv); - let justice_package = PackageTemplate::build_package(commitment_txid, idx as u32, PackageSolvingData::RevokedOutput(revk_outp), height + self.counterparty_commitment_params.on_counterparty_tx_csv as u32, true, height); + let revk_outp = RevokedOutput::build(per_commitment_point, self.counterparty_commitment_params.counterparty_delayed_payment_base_key, self.counterparty_commitment_params.counterparty_htlc_base_key, per_commitment_key, outp.value, self.counterparty_commitment_params.on_counterparty_tx_csv, self.onchain_tx_handler.opt_anchors()); + let justice_package = PackageTemplate::build_package(commitment_txid, idx as u32, PackageSolvingData::RevokedOutput(revk_outp), height + self.counterparty_commitment_params.on_counterparty_tx_csv as u32, height); claimable_outpoints.push(justice_package); to_counterparty_output_info = Some((idx.try_into().expect("Txn can't have more than 2^32 outputs"), outp.value)); @@ -2581,7 +2636,7 @@ impl ChannelMonitorImpl { to_counterparty_output_info); } let revk_htlc_outp = RevokedHTLCOutput::build(per_commitment_point, self.counterparty_commitment_params.counterparty_delayed_payment_base_key, self.counterparty_commitment_params.counterparty_htlc_base_key, per_commitment_key, htlc.amount_msat / 1000, htlc.clone(), self.onchain_tx_handler.channel_transaction_parameters.opt_anchors.is_some()); - let justice_package = PackageTemplate::build_package(commitment_txid, transaction_output_index, PackageSolvingData::RevokedHTLCOutput(revk_htlc_outp), htlc.cltv_expiry, true, height); + let justice_package = PackageTemplate::build_package(commitment_txid, transaction_output_index, PackageSolvingData::RevokedHTLCOutput(revk_htlc_outp), htlc.cltv_expiry, height); claimable_outpoints.push(justice_package); } } @@ -2706,8 +2761,7 @@ impl ChannelMonitorImpl { self.counterparty_commitment_params.counterparty_htlc_base_key, htlc.clone(), self.onchain_tx_handler.opt_anchors())) }; - let aggregation = if !htlc.offered { false } else { true }; - let counterparty_package = PackageTemplate::build_package(commitment_txid, transaction_output_index, counterparty_htlc_outp, htlc.cltv_expiry,aggregation, 0); + let counterparty_package = PackageTemplate::build_package(commitment_txid, transaction_output_index, counterparty_htlc_outp, htlc.cltv_expiry, 0); claimable_outpoints.push(counterparty_package); } } @@ -2746,11 +2800,12 @@ impl ChannelMonitorImpl { let revk_outp = RevokedOutput::build( per_commitment_point, self.counterparty_commitment_params.counterparty_delayed_payment_base_key, self.counterparty_commitment_params.counterparty_htlc_base_key, per_commitment_key, - tx.output[idx].value, self.counterparty_commitment_params.on_counterparty_tx_csv + tx.output[idx].value, self.counterparty_commitment_params.on_counterparty_tx_csv, + false ); let justice_package = PackageTemplate::build_package( htlc_txid, idx as u32, PackageSolvingData::RevokedOutput(revk_outp), - height + self.counterparty_commitment_params.on_counterparty_tx_csv as u32, true, height + height + self.counterparty_commitment_params.on_counterparty_tx_csv as u32, height ); claimable_outpoints.push(justice_package); if outputs_to_watch.is_none() { @@ -2773,11 +2828,11 @@ impl ChannelMonitorImpl { for &(ref htlc, _, _) in holder_tx.htlc_outputs.iter() { if let Some(transaction_output_index) = htlc.transaction_output_index { - let (htlc_output, aggregable) = if htlc.offered { + let htlc_output = if htlc.offered { let htlc_output = HolderHTLCOutput::build_offered( htlc.amount_msat, htlc.cltv_expiry, self.onchain_tx_handler.opt_anchors() ); - (htlc_output, false) + htlc_output } else { let payment_preimage = if let Some(preimage) = self.payment_preimages.get(&htlc.payment_hash) { preimage.clone() @@ -2788,12 +2843,12 @@ impl ChannelMonitorImpl { let htlc_output = HolderHTLCOutput::build_accepted( payment_preimage, htlc.amount_msat, self.onchain_tx_handler.opt_anchors() ); - (htlc_output, self.onchain_tx_handler.opt_anchors()) + htlc_output }; let htlc_package = PackageTemplate::build_package( holder_tx.txid, transaction_output_index, PackageSolvingData::HolderHTLCOutput(htlc_output), - htlc.cltv_expiry, aggregable, conf_height + htlc.cltv_expiry, conf_height ); claim_requests.push(htlc_package); } @@ -3133,7 +3188,7 @@ impl ChannelMonitorImpl { let should_broadcast = self.should_broadcast_holder_commitment_txn(logger); if should_broadcast { let funding_outp = HolderFundingOutput::build(self.funding_redeemscript.clone(), self.channel_value_satoshis, self.onchain_tx_handler.opt_anchors()); - let commitment_package = PackageTemplate::build_package(self.funding_info.0.txid.clone(), self.funding_info.0.index as u32, PackageSolvingData::HolderFundingOutput(funding_outp), self.best_block.height(), false, self.best_block.height()); + let commitment_package = PackageTemplate::build_package(self.funding_info.0.txid.clone(), self.funding_info.0.index as u32, PackageSolvingData::HolderFundingOutput(funding_outp), self.best_block.height(), self.best_block.height()); claimable_outpoints.push(commitment_package); self.pending_monitor_events.push(MonitorEvent::CommitmentTxConfirmed(self.funding_info.0)); let commitment_tx = self.onchain_tx_handler.get_fully_signed_holder_tx(&self.funding_redeemscript); @@ -3719,8 +3774,9 @@ where } } -impl chain::Confirm for (ChannelMonitor, T, F, L) +impl chain::Confirm for (M, T, F, L) where + M: Deref>, T::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, @@ -4031,7 +4087,6 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP #[cfg(test)] mod tests { - use bitcoin::blockdata::block::BlockHeader; use bitcoin::blockdata::script::{Script, Builder}; use bitcoin::blockdata::opcodes; use bitcoin::blockdata::transaction::{Transaction, TxIn, TxOut, EcdsaSighashType}; @@ -4055,20 +4110,20 @@ mod tests { use crate::chain::channelmonitor::ChannelMonitor; use crate::chain::package::{weight_offered_htlc, weight_received_htlc, weight_revoked_offered_htlc, weight_revoked_received_htlc, WEIGHT_REVOKED_OUTPUT}; use crate::chain::transaction::OutPoint; - use crate::chain::keysinterface::InMemorySigner; + use crate::sign::InMemorySigner; + use crate::events::ClosureReason; use crate::ln::{PaymentPreimage, PaymentHash}; use crate::ln::chan_utils; use crate::ln::chan_utils::{HTLCOutputInCommitment, ChannelPublicKeys, ChannelTransactionParameters, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters}; - use crate::ln::channelmanager::{PaymentSendFailure, PaymentId}; + use crate::ln::channelmanager::{PaymentSendFailure, PaymentId, RecipientOnionFields}; use crate::ln::functional_test_utils::*; use crate::ln::script::ShutdownScript; use crate::util::errors::APIError; - use crate::util::events::{ClosureReason, MessageSendEventsProvider}; use crate::util::test_utils::{TestLogger, TestBroadcaster, TestFeeEstimator}; use crate::util::ser::{ReadableArgs, Writeable}; use crate::sync::{Arc, Mutex}; use crate::io; - use bitcoin::{PackedLockTime, Sequence, TxMerkleNode, Witness}; + use bitcoin::{PackedLockTime, Sequence, Witness}; use crate::prelude::*; fn do_test_funding_spend_refuses_updates(use_local_txn: bool) { @@ -4107,10 +4162,7 @@ mod tests { // Connect a commitment transaction, but only to the ChainMonitor/ChannelMonitor. The // channel is now closed, but the ChannelManager doesn't know that yet. - let new_header = BlockHeader { - version: 2, time: 0, bits: 0, nonce: 0, - prev_blockhash: nodes[0].best_block_info().0, - merkle_root: TxMerkleNode::all_zeros() }; + let new_header = create_dummy_header(nodes[0].best_block_info().0, 0); let conf_height = nodes[0].best_block_info().1 + 1; nodes[1].chain_monitor.chain_monitor.transactions_confirmed(&new_header, &[(0, broadcast_tx)], conf_height); @@ -4122,8 +4174,9 @@ mod tests { // If the ChannelManager tries to update the channel, however, the ChainMonitor will pass // the update through to the ChannelMonitor which will refuse it (as the channel is closed). let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 100_000); - unwrap_send_err!(nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)), - true, APIError::ChannelUnavailable { ref err }, + unwrap_send_err!(nodes[1].node.send_payment_with_route(&route, payment_hash, + RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0) + ), true, APIError::ChannelUnavailable { ref err }, assert!(err.contains("ChannelMonitor storage failure"))); check_added_monitors!(nodes[1], 2); // After the failure we generate a close-channel monitor update check_closed_broadcast!(nodes[1], true); @@ -4140,7 +4193,7 @@ mod tests { replay_update.updates.push(ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage: payment_preimage_1 }); replay_update.updates.push(ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage: payment_preimage_2 }); - let broadcaster = TestBroadcaster::new(Arc::clone(&nodes[1].blocks)); + let broadcaster = TestBroadcaster::with_blocks(Arc::clone(&nodes[1].blocks)); assert!( pre_update_monitor.update_monitor(&replay_update, &&broadcaster, &chanmon_cfgs[1].fee_estimator, &nodes[1].logger) .is_err()); @@ -4166,10 +4219,7 @@ mod tests { fn test_prune_preimages() { let secp_ctx = Secp256k1::new(); let logger = Arc::new(TestLogger::new()); - let broadcaster = Arc::new(TestBroadcaster { - txn_broadcasted: Mutex::new(Vec::new()), - blocks: Arc::new(Mutex::new(Vec::new())) - }); + let broadcaster = Arc::new(TestBroadcaster::new(Network::Testnet)); let fee_estimator = TestFeeEstimator { sat_per_kw: Mutex::new(253) }; let dummy_key = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); @@ -4227,6 +4277,7 @@ mod tests { [41; 32], 0, [0; 32], + [0; 32], ); let counterparty_pubkeys = ChannelPublicKeys {