use crate::ln::msgs::DecodeError;
use crate::ln::chan_utils;
use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, HTLCOutputInCommitment, HTLCClaim, ChannelTransactionParameters, HolderCommitmentTransaction};
-use crate::ln::channelmanager::HTLCSource;
+use crate::ln::channelmanager::{HTLCSource, SentHTLCId};
use crate::chain;
use crate::chain::{BestBlock, WatchedOutput};
use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator, LowerBoundedFeeEstimator};
use crate::util::logger::Logger;
use crate::util::ser::{Readable, ReadableArgs, RequiredWrapper, MaybeReadable, UpgradableRequired, Writer, Writeable, U48};
use crate::util::byte_utils;
-use crate::util::events::Event;
+use crate::events::Event;
#[cfg(anchors)]
-use crate::util::events::{AnchorDescriptor, HTLCDescriptor, BumpTransactionEvent};
+use crate::events::bump_transaction::{AnchorDescriptor, HTLCDescriptor, BumpTransactionEvent};
use crate::prelude::*;
use core::{cmp, mem};
use crate::io::{self, Error};
use core::convert::TryInto;
use core::ops::Deref;
-use crate::sync::Mutex;
+use crate::sync::{Mutex, LockTestExt};
/// An update generated by the underlying channel itself which contains some new information the
/// [`ChannelMonitor`] should be made aware of.
/// much smaller than a full [`ChannelMonitor`]. However, for large single commitment transaction
/// updates (e.g. ones during which there are hundreds of HTLCs pending on the commitment
/// transaction), a single update may reach upwards of 1 MiB in serialized size.
-#[cfg_attr(any(test, fuzzing, feature = "_test_utils"), derive(PartialEq, Eq))]
-#[derive(Clone)]
+#[derive(Clone, PartialEq, Eq)]
#[must_use]
pub struct ChannelMonitorUpdate {
pub(crate) updates: Vec<ChannelMonitorUpdateStep>,
/// The sequence number of this update. Updates *must* be replayed in-order according to this
/// sequence number (and updates may panic if they are not). The update_id values are strictly
- /// increasing and increase by one for each new update, with one exception specified below.
+ /// increasing and increase by one for each new update, with two exceptions specified below.
///
/// This sequence number is also used to track up to which points updates which returned
/// [`ChannelMonitorUpdateStatus::InProgress`] have been applied to all copies of a given
/// ChannelMonitor when ChannelManager::channel_monitor_updated is called.
///
- /// The only instance where update_id values are not strictly increasing is the case where we
- /// allow post-force-close updates with a special update ID of [`CLOSED_CHANNEL_UPDATE_ID`]. See
- /// its docs for more details.
+ /// The only instances we allow where update_id values are not strictly increasing have a
+ /// special update ID of [`CLOSED_CHANNEL_UPDATE_ID`]. This update ID is used for updates that
+ /// will force close the channel by broadcasting the latest commitment transaction or
+ /// special post-force-close updates, like providing preimages necessary to claim outputs on the
+ /// broadcast commitment transaction. See its docs for more details.
///
/// [`ChannelMonitorUpdateStatus::InProgress`]: super::ChannelMonitorUpdateStatus::InProgress
pub update_id: u64,
}
-/// If:
-/// (1) a channel has been force closed and
-/// (2) we receive a preimage from a forward link that allows us to spend an HTLC output on
-/// this channel's (the backward link's) broadcasted commitment transaction
-/// then we allow the `ChannelManager` to send a `ChannelMonitorUpdate` with this update ID,
-/// with the update providing said payment preimage. No other update types are allowed after
-/// force-close.
+/// The update ID used for a [`ChannelMonitorUpdate`] that is either:
+///
+/// (1) attempting to force close the channel by broadcasting our latest commitment transaction or
+/// (2) providing a preimage (after the channel has been force closed) from a forward link that
+/// allows us to spend an HTLC output on this channel's (the backward link's) broadcasted
+/// commitment transaction.
+///
+/// No other [`ChannelMonitorUpdate`]s are allowed after force-close.
pub const CLOSED_CHANNEL_UPDATE_ID: u64 = core::u64::MAX;
impl Writeable for ChannelMonitorUpdate {
);
-#[cfg_attr(any(test, fuzzing, feature = "_test_utils"), derive(PartialEq, Eq))]
-#[derive(Clone)]
+#[derive(Clone, PartialEq, Eq)]
pub(crate) enum ChannelMonitorUpdateStep {
LatestHolderCommitmentTXInfo {
commitment_tx: HolderCommitmentTransaction,
htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Signature>, Option<HTLCSource>)>,
+ claimed_htlcs: Vec<(SentHTLCId, PaymentPreimage)>,
},
LatestCounterpartyCommitmentTXInfo {
commitment_txid: Txid,
impl_writeable_tlv_based_enum_upgradable!(ChannelMonitorUpdateStep,
(0, LatestHolderCommitmentTXInfo) => {
(0, commitment_tx, required),
+ (1, claimed_htlcs, vec_type),
(2, htlc_outputs, vec_type),
},
(1, LatestCounterpartyCommitmentTXInfo) => {
/// Serialized to disk but should generally not be sent to Watchtowers.
counterparty_hash_commitment_number: HashMap<PaymentHash, u64>,
+ counterparty_fulfilled_htlcs: HashMap<SentHTLCId, PaymentPreimage>,
+
// We store two holder commitment transactions to avoid any race conditions where we may update
// some monitors (potentially on watchtowers) but then fail to update others, resulting in the
// various monitors for one channel being out of sync, and us broadcasting a holder
impl<Signer: WriteableEcdsaChannelSigner> PartialEq for ChannelMonitor<Signer> where Signer: PartialEq {
fn eq(&self, other: &Self) -> bool {
- let inner = self.inner.lock().unwrap();
- let other = other.inner.lock().unwrap();
- inner.eq(&other)
+ // We need some kind of total lockorder. Absent a better idea, we sort by position in
+ // memory and take locks in that order (assuming that we can't move within memory while a
+ // lock is held).
+ let ord = ((self as *const _) as usize) < ((other as *const _) as usize);
+ let a = if ord { self.inner.unsafe_well_ordered_double_lock_self() } else { other.inner.unsafe_well_ordered_double_lock_self() };
+ let b = if ord { other.inner.unsafe_well_ordered_double_lock_self() } else { self.inner.unsafe_well_ordered_double_lock_self() };
+ a.eq(&b)
}
}
(9, self.counterparty_node_id, option),
(11, self.confirmed_commitment_tx_counterparty_output, option),
(13, self.spendable_txids_confirmed, vec_type),
+ (15, self.counterparty_fulfilled_htlcs, required),
});
Ok(())
counterparty_claimable_outpoints: HashMap::new(),
counterparty_commitment_txn_on_chain: HashMap::new(),
counterparty_hash_commitment_number: HashMap::new(),
+ counterparty_fulfilled_htlcs: HashMap::new(),
prev_holder_signed_commitment_tx: None,
current_holder_commitment_tx: holder_commitment_tx,
&self, holder_commitment_tx: HolderCommitmentTransaction,
htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Signature>, Option<HTLCSource>)>,
) -> Result<(), ()> {
- self.inner.lock().unwrap().provide_latest_holder_commitment_tx(holder_commitment_tx, htlc_outputs).map_err(|_| ())
+ self.inner.lock().unwrap().provide_latest_holder_commitment_tx(holder_commitment_tx, htlc_outputs, &Vec::new()).map_err(|_| ())
}
/// This is used to provide payment preimage(s) out-of-band during startup without updating the
payment_hash, payment_preimage, broadcaster, fee_estimator, logger)
}
- pub(crate) fn broadcast_latest_holder_commitment_txn<B: Deref, L: Deref>(
- &self,
- broadcaster: &B,
- logger: &L,
- ) where
- B::Target: BroadcasterInterface,
- L::Target: Logger,
- {
- self.inner.lock().unwrap().broadcast_latest_holder_commitment_txn(broadcaster, logger);
- }
-
/// Updates a ChannelMonitor on the basis of some new information provided by the Channel
/// itself.
///
/// This is called by the [`EventsProvider::process_pending_events`] implementation for
/// [`ChainMonitor`].
///
- /// [`EventsProvider::process_pending_events`]: crate::util::events::EventsProvider::process_pending_events
+ /// [`EventsProvider::process_pending_events`]: crate::events::EventsProvider::process_pending_events
/// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor
pub fn get_and_clear_pending_events(&self) -> Vec<Event> {
self.inner.lock().unwrap().get_and_clear_pending_events()
/// `ChannelMonitor`. This is used to determine if an HTLC was removed from the channel prior
/// to the `ChannelManager` having been persisted.
///
- /// This is similar to [`Self::get_pending_outbound_htlcs`] except it includes HTLCs which were
- /// resolved by this `ChannelMonitor`.
- pub(crate) fn get_all_current_outbound_htlcs(&self) -> HashMap<HTLCSource, HTLCOutputInCommitment> {
+ /// This is similar to [`Self::get_pending_or_resolved_outbound_htlcs`] except it includes
+ /// HTLCs which were resolved on-chain (i.e. where the final HTLC resolution was done by an
+ /// event from this `ChannelMonitor`).
+ pub(crate) fn get_all_current_outbound_htlcs(&self) -> HashMap<HTLCSource, (HTLCOutputInCommitment, Option<PaymentPreimage>)> {
let mut res = HashMap::new();
// Just examine the available counterparty commitment transactions. See docs on
// `fail_unbroadcast_htlcs`, below, for justification.
if let Some(ref latest_outpoints) = us.counterparty_claimable_outpoints.get($txid) {
for &(ref htlc, ref source_option) in latest_outpoints.iter() {
if let &Some(ref source) = source_option {
- res.insert((**source).clone(), htlc.clone());
+ res.insert((**source).clone(), (htlc.clone(),
+ us.counterparty_fulfilled_htlcs.get(&SentHTLCId::from_source(source)).cloned()));
}
}
}
res
}
- /// Gets the set of outbound HTLCs which are pending resolution in this channel.
+ /// Gets the set of outbound HTLCs which are pending resolution in this channel or which were
+ /// resolved with a preimage from our counterparty.
+ ///
/// This is used to reconstruct pending outbound payments on restart in the ChannelManager.
- pub(crate) fn get_pending_outbound_htlcs(&self) -> HashMap<HTLCSource, HTLCOutputInCommitment> {
+ ///
+ /// Currently, the preimage is unused, however if it is present in the relevant internal state
+ /// an HTLC is always included even if it has been resolved.
+ pub(crate) fn get_pending_or_resolved_outbound_htlcs(&self) -> HashMap<HTLCSource, (HTLCOutputInCommitment, Option<PaymentPreimage>)> {
let us = self.inner.lock().unwrap();
// We're only concerned with the confirmation count of HTLC transactions, and don't
// actually care how many confirmations a commitment transaction may or may not have. Thus,
Some(commitment_tx_output_idx) == htlc.transaction_output_index
} else { false }
});
- if !htlc_update_confd {
- res.insert(source.clone(), htlc.clone());
+ let counterparty_resolved_preimage_opt =
+ us.counterparty_fulfilled_htlcs.get(&SentHTLCId::from_source(source)).cloned();
+ if !htlc_update_confd || counterparty_resolved_preimage_opt.is_some() {
+ res.insert(source.clone(), (htlc.clone(), counterparty_resolved_preimage_opt));
}
}
}
}
}
if matched_htlc { continue; }
+ if $self.counterparty_fulfilled_htlcs.get(&SentHTLCId::from_source(source)).is_some() {
+ continue;
+ }
$self.onchain_events_awaiting_threshold_conf.retain(|ref entry| {
if entry.height != $commitment_tx_conf_height { return true; }
match entry.event {
// Prune HTLCs from the previous counterparty commitment tx so we don't generate failure/fulfill
// events for now-revoked/fulfilled HTLCs.
if let Some(txid) = self.prev_counterparty_commitment_txid.take() {
- for &mut (_, ref mut source) in self.counterparty_claimable_outpoints.get_mut(&txid).unwrap() {
- *source = None;
+ if self.current_counterparty_commitment_txid.unwrap() != txid {
+ let cur_claimables = self.counterparty_claimable_outpoints.get(
+ &self.current_counterparty_commitment_txid.unwrap()).unwrap();
+ for (_, ref source_opt) in self.counterparty_claimable_outpoints.get(&txid).unwrap() {
+ if let Some(source) = source_opt {
+ if !cur_claimables.iter()
+ .any(|(_, cur_source_opt)| cur_source_opt == source_opt)
+ {
+ self.counterparty_fulfilled_htlcs.remove(&SentHTLCId::from_source(source));
+ }
+ }
+ }
+ for &mut (_, ref mut source_opt) in self.counterparty_claimable_outpoints.get_mut(&txid).unwrap() {
+ *source_opt = None;
+ }
+ } else {
+ assert!(cfg!(fuzzing), "Commitment txids are unique outside of fuzzing, where hashes can collide");
}
}
/// is important that any clones of this channel monitor (including remote clones) by kept
/// up-to-date as our holder commitment transaction is updated.
/// Panics if set_on_holder_tx_csv has never been called.
- fn provide_latest_holder_commitment_tx(&mut self, holder_commitment_tx: HolderCommitmentTransaction, htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Signature>, Option<HTLCSource>)>) -> Result<(), &'static str> {
- // block for Rust 1.34 compat
- let mut new_holder_commitment_tx = {
- let trusted_tx = holder_commitment_tx.trust();
- let txid = trusted_tx.txid();
- let tx_keys = trusted_tx.keys();
- self.current_holder_commitment_number = trusted_tx.commitment_number();
- HolderSignedTx {
- txid,
- revocation_key: tx_keys.revocation_key,
- a_htlc_key: tx_keys.broadcaster_htlc_key,
- b_htlc_key: tx_keys.countersignatory_htlc_key,
- delayed_payment_key: tx_keys.broadcaster_delayed_payment_key,
- per_commitment_point: tx_keys.per_commitment_point,
- htlc_outputs,
- to_self_value_sat: holder_commitment_tx.to_broadcaster_value_sat(),
- feerate_per_kw: trusted_tx.feerate_per_kw(),
- }
+ fn provide_latest_holder_commitment_tx(&mut self, holder_commitment_tx: HolderCommitmentTransaction, htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Signature>, Option<HTLCSource>)>, claimed_htlcs: &[(SentHTLCId, PaymentPreimage)]) -> Result<(), &'static str> {
+ let trusted_tx = holder_commitment_tx.trust();
+ let txid = trusted_tx.txid();
+ let tx_keys = trusted_tx.keys();
+ self.current_holder_commitment_number = trusted_tx.commitment_number();
+ let mut new_holder_commitment_tx = HolderSignedTx {
+ txid,
+ revocation_key: tx_keys.revocation_key,
+ a_htlc_key: tx_keys.broadcaster_htlc_key,
+ b_htlc_key: tx_keys.countersignatory_htlc_key,
+ delayed_payment_key: tx_keys.broadcaster_delayed_payment_key,
+ per_commitment_point: tx_keys.per_commitment_point,
+ htlc_outputs,
+ to_self_value_sat: holder_commitment_tx.to_broadcaster_value_sat(),
+ feerate_per_kw: trusted_tx.feerate_per_kw(),
};
self.onchain_tx_handler.provide_latest_holder_tx(holder_commitment_tx);
mem::swap(&mut new_holder_commitment_tx, &mut self.current_holder_commitment_tx);
self.prev_holder_signed_commitment_tx = Some(new_holder_commitment_tx);
+ for (claimed_htlc_id, claimed_preimage) in claimed_htlcs {
+ #[cfg(debug_assertions)] {
+ let cur_counterparty_htlcs = self.counterparty_claimable_outpoints.get(
+ &self.current_counterparty_commitment_txid.unwrap()).unwrap();
+ assert!(cur_counterparty_htlcs.iter().any(|(_, source_opt)| {
+ if let Some(source) = source_opt {
+ SentHTLCId::from_source(source) == *claimed_htlc_id
+ } else { false }
+ }));
+ }
+ self.counterparty_fulfilled_htlcs.insert(*claimed_htlc_id, *claimed_preimage);
+ }
if self.holder_tx_signed {
return Err("Latest holder commitment signed has already been signed, update is rejected");
}
{
log_info!(logger, "Applying update to monitor {}, bringing update_id from {} to {} with {} changes.",
log_funding_info!(self), self.latest_update_id, updates.update_id, updates.updates.len());
- // ChannelMonitor updates may be applied after force close if we receive a
- // preimage for a broadcasted commitment transaction HTLC output that we'd
- // like to claim on-chain. If this is the case, we no longer have guaranteed
- // access to the monitor's update ID, so we use a sentinel value instead.
+ // ChannelMonitor updates may be applied after force close if we receive a preimage for a
+ // broadcasted commitment transaction HTLC output that we'd like to claim on-chain. If this
+ // is the case, we no longer have guaranteed access to the monitor's update ID, so we use a
+ // sentinel value instead.
+ //
+ // The `ChannelManager` may also queue redundant `ChannelForceClosed` updates if it still
+ // thinks the channel needs to have its commitment transaction broadcast, so we'll allow
+ // them as well.
if updates.update_id == CLOSED_CHANNEL_UPDATE_ID {
assert_eq!(updates.updates.len(), 1);
match updates.updates[0] {
- ChannelMonitorUpdateStep::PaymentPreimage { .. } => {},
+ ChannelMonitorUpdateStep::ChannelForceClosed { .. } => {},
+ // We should have already seen a `ChannelForceClosed` update if we're trying to
+ // provide a preimage at this point.
+ ChannelMonitorUpdateStep::PaymentPreimage { .. } =>
+ debug_assert_eq!(self.latest_update_id, CLOSED_CHANNEL_UPDATE_ID),
_ => {
log_error!(logger, "Attempted to apply post-force-close ChannelMonitorUpdate of type {}", updates.updates[0].variant_name());
panic!("Attempted to apply post-force-close ChannelMonitorUpdate that wasn't providing a payment preimage");
let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&*fee_estimator);
for update in updates.updates.iter() {
match update {
- ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo { commitment_tx, htlc_outputs } => {
+ ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo { commitment_tx, htlc_outputs, claimed_htlcs } => {
log_trace!(logger, "Updating ChannelMonitor with latest holder commitment transaction info");
if self.lockdown_from_offchain { panic!(); }
- if let Err(e) = self.provide_latest_holder_commitment_tx(commitment_tx.clone(), htlc_outputs.clone()) {
+ if let Err(e) = self.provide_latest_holder_commitment_tx(commitment_tx.clone(), htlc_outputs.clone(), &claimed_htlcs) {
log_error!(logger, "Providing latest holder commitment transaction failed/was refused:");
log_error!(logger, " {}", e);
ret = Err(());
},
}
}
+
+ // If the updates succeeded and we were in an already closed channel state, then there's no
+ // need to refuse any updates we expect to receive afer seeing a confirmed commitment.
+ if ret.is_ok() && updates.update_id == CLOSED_CHANNEL_UPDATE_ID && self.latest_update_id == updates.update_id {
+ return Ok(());
+ }
+
self.latest_update_id = updates.update_id;
if ret.is_ok() && self.funding_spend_seen {
}));
},
ClaimEvent::BumpHTLC {
- target_feerate_sat_per_1000_weight, htlcs,
+ target_feerate_sat_per_1000_weight, htlcs, tx_lock_time,
} => {
let mut htlc_descriptors = Vec::with_capacity(htlcs.len());
for htlc in htlcs {
ret.push(Event::BumpTransaction(BumpTransactionEvent::HTLCResolution {
target_feerate_sat_per_1000_weight,
htlc_descriptors,
+ tx_lock_time,
}));
}
}
let mut counterparty_node_id = None;
let mut confirmed_commitment_tx_counterparty_output = None;
let mut spendable_txids_confirmed = Some(Vec::new());
+ let mut counterparty_fulfilled_htlcs = Some(HashMap::new());
read_tlv_fields!(reader, {
(1, funding_spend_confirmed, option),
(3, htlcs_resolved_on_chain, vec_type),
(9, counterparty_node_id, option),
(11, confirmed_commitment_tx_counterparty_output, option),
(13, spendable_txids_confirmed, vec_type),
+ (15, counterparty_fulfilled_htlcs, option),
});
Ok((best_block.block_hash(), ChannelMonitor::from_impl(ChannelMonitorImpl {
counterparty_claimable_outpoints,
counterparty_commitment_txn_on_chain,
counterparty_hash_commitment_number,
+ counterparty_fulfilled_htlcs: counterparty_fulfilled_htlcs.unwrap(),
prev_holder_signed_commitment_tx,
current_holder_commitment_tx,
use crate::chain::package::{weight_offered_htlc, weight_received_htlc, weight_revoked_offered_htlc, weight_revoked_received_htlc, WEIGHT_REVOKED_OUTPUT};
use crate::chain::transaction::OutPoint;
use crate::chain::keysinterface::InMemorySigner;
+ use crate::events::ClosureReason;
use crate::ln::{PaymentPreimage, PaymentHash};
use crate::ln::chan_utils;
use crate::ln::chan_utils::{HTLCOutputInCommitment, ChannelPublicKeys, ChannelTransactionParameters, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
use crate::ln::functional_test_utils::*;
use crate::ln::script::ShutdownScript;
use crate::util::errors::APIError;
- use crate::util::events::{ClosureReason, MessageSendEventsProvider};
use crate::util::test_utils::{TestLogger, TestBroadcaster, TestFeeEstimator};
use crate::util::ser::{ReadableArgs, Writeable};
use crate::sync::{Arc, Mutex};
fn test_prune_preimages() {
let secp_ctx = Secp256k1::new();
let logger = Arc::new(TestLogger::new());
- let broadcaster = Arc::new(TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new()), blocks: Arc::new(Mutex::new(Vec::new()))});
+ let broadcaster = Arc::new(TestBroadcaster {
+ txn_broadcasted: Mutex::new(Vec::new()),
+ blocks: Arc::new(Mutex::new(Vec::new()))
+ });
let fee_estimator = TestFeeEstimator { sat_per_kw: Mutex::new(253) };
let dummy_key = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
- let dummy_tx = Transaction { version: 0, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: Vec::new() };
let mut preimages = Vec::new();
{
// Prune with one old state and a holder commitment tx holding a few overlaps with the
// old state.
let shutdown_pubkey = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
- let best_block = BestBlock::from_genesis(Network::Testnet);
+ let best_block = BestBlock::from_network(Network::Testnet);
let monitor = ChannelMonitor::new(Secp256k1::new(), keys,
Some(ShutdownScript::new_p2wpkh_from_pubkey(shutdown_pubkey).into_inner()), 0, &Script::new(),
(OutPoint { txid: Txid::from_slice(&[43; 32]).unwrap(), index: 0 }, Script::new()),
HolderCommitmentTransaction::dummy(), best_block, dummy_key);
monitor.provide_latest_holder_commitment_tx(HolderCommitmentTransaction::dummy(), preimages_to_holder_htlcs!(preimages[0..10])).unwrap();
- let dummy_txid = dummy_tx.txid();
- monitor.provide_latest_counterparty_commitment_tx(dummy_txid, preimages_slice_to_htlc_outputs!(preimages[5..15]), 281474976710655, dummy_key, &logger);
- monitor.provide_latest_counterparty_commitment_tx(dummy_txid, preimages_slice_to_htlc_outputs!(preimages[15..20]), 281474976710654, dummy_key, &logger);
- monitor.provide_latest_counterparty_commitment_tx(dummy_txid, preimages_slice_to_htlc_outputs!(preimages[17..20]), 281474976710653, dummy_key, &logger);
- monitor.provide_latest_counterparty_commitment_tx(dummy_txid, preimages_slice_to_htlc_outputs!(preimages[18..20]), 281474976710652, dummy_key, &logger);
+ monitor.provide_latest_counterparty_commitment_tx(Txid::from_inner(Sha256::hash(b"1").into_inner()),
+ preimages_slice_to_htlc_outputs!(preimages[5..15]), 281474976710655, dummy_key, &logger);
+ monitor.provide_latest_counterparty_commitment_tx(Txid::from_inner(Sha256::hash(b"2").into_inner()),
+ preimages_slice_to_htlc_outputs!(preimages[15..20]), 281474976710654, dummy_key, &logger);
for &(ref preimage, ref hash) in preimages.iter() {
let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_estimator);
monitor.provide_payment_preimage(hash, preimage, &broadcaster, &bounded_fee_estimator, &logger);
test_preimages_exist!(&preimages[0..10], monitor);
test_preimages_exist!(&preimages[15..20], monitor);
+ monitor.provide_latest_counterparty_commitment_tx(Txid::from_inner(Sha256::hash(b"3").into_inner()),
+ preimages_slice_to_htlc_outputs!(preimages[17..20]), 281474976710653, dummy_key, &logger);
+
// Now provide a further secret, pruning preimages 15-17
secret[0..32].clone_from_slice(&hex::decode("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap());
monitor.provide_secret(281474976710654, secret.clone()).unwrap();
test_preimages_exist!(&preimages[0..10], monitor);
test_preimages_exist!(&preimages[17..20], monitor);
+ monitor.provide_latest_counterparty_commitment_tx(Txid::from_inner(Sha256::hash(b"4").into_inner()),
+ preimages_slice_to_htlc_outputs!(preimages[18..20]), 281474976710652, dummy_key, &logger);
+
// Now update holder commitment tx info, pruning only element 18 as we still care about the
// previous commitment tx's preimages too
monitor.provide_latest_holder_commitment_tx(HolderCommitmentTransaction::dummy(), preimages_to_holder_htlcs!(preimages[0..5])).unwrap();