use bitcoin::sighash::EcdsaSighashType;
use crate::ln::channel::INITIAL_COMMITMENT_NUMBER;
-use crate::ln::{PaymentHash, PaymentPreimage};
+use crate::ln::{PaymentHash, PaymentPreimage, ChannelId};
use crate::ln::msgs::DecodeError;
use crate::ln::channel_keys::{DelayedPaymentKey, DelayedPaymentBasepoint, HtlcBasepoint, HtlcKey, RevocationKey, RevocationBasepoint};
use crate::ln::chan_utils::{self,CommitmentTransaction, CounterpartyCommitmentSecrets, HTLCOutputInCommitment, HTLCClaim, ChannelTransactionParameters, HolderCommitmentTransaction, TxCreationKeys};
use crate::chain::{BestBlock, WatchedOutput};
use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator, LowerBoundedFeeEstimator};
use crate::chain::transaction::{OutPoint, TransactionData};
-use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, SpendableOutputDescriptor, StaticPaymentOutputDescriptor, DelayedPaymentOutputDescriptor, WriteableEcdsaChannelSigner, SignerProvider, EntropySource};
-use crate::chain::onchaintx::{ClaimEvent, OnchainTxHandler};
+use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, SpendableOutputDescriptor, StaticPaymentOutputDescriptor, DelayedPaymentOutputDescriptor, ecdsa::WriteableEcdsaChannelSigner, SignerProvider, EntropySource};
+use crate::chain::onchaintx::{ClaimEvent, FeerateStrategy, OnchainTxHandler};
use crate::chain::package::{CounterpartyOfferedHTLCOutput, CounterpartyReceivedHTLCOutput, HolderFundingOutput, HolderHTLCOutput, PackageSolvingData, PackageTemplate, RevokedOutput, RevokedHTLCOutput};
use crate::chain::Filter;
-use crate::util::logger::Logger;
+use crate::util::logger::{Logger, Record};
use crate::util::ser::{Readable, ReadableArgs, RequiredWrapper, MaybeReadable, UpgradableRequired, Writer, Writeable, U48};
use crate::util::byte_utils;
-use crate::events::{Event, EventHandler};
+use crate::events::{ClosureReason, Event, EventHandler};
use crate::events::bump_transaction::{AnchorDescriptor, BumpTransactionEvent};
+#[allow(unused_imports)]
use crate::prelude::*;
+
use core::{cmp, mem};
use crate::io::{self, Error};
-use core::convert::TryInto;
use core::ops::Deref;
use crate::sync::{Mutex, LockTestExt};
#[must_use]
pub struct ChannelMonitorUpdate {
pub(crate) updates: Vec<ChannelMonitorUpdateStep>,
+ /// Historically, [`ChannelMonitor`]s didn't know their counterparty node id. However,
+ /// `ChannelManager` really wants to know it so that it can easily look up the corresponding
+ /// channel. For now, this results in a temporary map in `ChannelManager` to look up channels
+ /// by only the funding outpoint.
+ ///
+ /// To eventually remove that, we repeat the counterparty node id here so that we can upgrade
+ /// `ChannelMonitor`s to become aware of the counterparty node id if they were generated prior
+ /// to when it was stored directly in them.
+ pub(crate) counterparty_node_id: Option<PublicKey>,
/// The sequence number of this update. Updates *must* be replayed in-order according to this
/// sequence number (and updates may panic if they are not). The update_id values are strictly
/// increasing and increase by one for each new update, with two exceptions specified below.
///
/// [`ChannelMonitorUpdateStatus::InProgress`]: super::ChannelMonitorUpdateStatus::InProgress
pub update_id: u64,
+ /// The channel ID associated with these updates.
+ ///
+ /// Will be `None` for `ChannelMonitorUpdate`s constructed on LDK versions prior to 0.0.121 and
+ /// always `Some` otherwise.
+ pub channel_id: Option<ChannelId>,
}
/// The update ID used for a [`ChannelMonitorUpdate`] that is either:
for update_step in self.updates.iter() {
update_step.write(w)?;
}
- write_tlv_fields!(w, {});
+ write_tlv_fields!(w, {
+ (1, self.counterparty_node_id, option),
+ (3, self.channel_id, option),
+ });
Ok(())
}
}
updates.push(upd);
}
}
- read_tlv_fields!(r, {});
- Ok(Self { update_id, updates })
+ let mut counterparty_node_id = None;
+ let mut channel_id = None;
+ read_tlv_fields!(r, {
+ (1, counterparty_node_id, option),
+ (3, channel_id, option),
+ });
+ Ok(Self { update_id, counterparty_node_id, updates, channel_id })
}
}
/// A monitor event containing an HTLCUpdate.
HTLCEvent(HTLCUpdate),
+ /// Indicates we broadcasted the channel's latest commitment transaction and thus closed the
+ /// channel. Holds information about the channel and why it was closed.
+ HolderForceClosedWithInfo {
+ /// The reason the channel was closed.
+ reason: ClosureReason,
+ /// The funding outpoint of the channel.
+ outpoint: OutPoint,
+ /// The channel ID of the channel.
+ channel_id: ChannelId,
+ },
+
/// Indicates we broadcasted the channel's latest commitment transaction and thus closed the
/// channel.
HolderForceClosed(OutPoint),
Completed {
/// The funding outpoint of the [`ChannelMonitor`] that was updated
funding_txo: OutPoint,
+ /// The channel ID of the channel associated with the [`ChannelMonitor`]
+ channel_id: ChannelId,
/// The Update ID from [`ChannelMonitorUpdate::update_id`] which was applied or
/// [`ChannelMonitor::get_latest_update_id`].
///
(0, Completed) => {
(0, funding_txo, required),
(2, monitor_update_id, required),
+ (4, channel_id, required),
+ },
+ (5, HolderForceClosedWithInfo) => {
+ (0, reason, upgradable_required),
+ (2, outpoint, required),
+ (4, channel_id, required),
},
;
(2, HTLCEvent),
}
fn has_reached_confirmation_threshold(&self, best_block: &BestBlock) -> bool {
- best_block.height() >= self.confirmation_threshold()
+ best_block.height >= self.confirmation_threshold()
}
}
channel_keys_id: [u8; 32],
holder_revocation_basepoint: RevocationBasepoint,
+ channel_id: ChannelId,
funding_info: (OutPoint, ScriptBuf),
current_counterparty_commitment_txid: Option<Txid>,
prev_counterparty_commitment_txid: Option<Txid>,
writer.write_all(&(self.pending_monitor_events.iter().filter(|ev| match ev {
MonitorEvent::HTLCEvent(_) => true,
MonitorEvent::HolderForceClosed(_) => true,
+ MonitorEvent::HolderForceClosedWithInfo { .. } => true,
_ => false,
}).count() as u64).to_be_bytes())?;
for event in self.pending_monitor_events.iter() {
upd.write(writer)?;
},
MonitorEvent::HolderForceClosed(_) => 1u8.write(writer)?,
+ // `HolderForceClosedWithInfo` replaced `HolderForceClosed` in v0.0.122. To keep
+ // backwards compatibility, we write a `HolderForceClosed` event along with the
+ // `HolderForceClosedWithInfo` event. This is deduplicated in the reader.
+ MonitorEvent::HolderForceClosedWithInfo { .. } => 1u8.write(writer)?,
_ => {}, // Covered in the TLV writes below
}
}
event.write(writer)?;
}
- self.best_block.block_hash().write(writer)?;
- writer.write_all(&self.best_block.height().to_be_bytes())?;
+ self.best_block.block_hash.write(writer)?;
+ writer.write_all(&self.best_block.height.to_be_bytes())?;
writer.write_all(&(self.onchain_events_awaiting_threshold_conf.len() as u64).to_be_bytes())?;
for ref entry in self.onchain_events_awaiting_threshold_conf.iter() {
self.lockdown_from_offchain.write(writer)?;
self.holder_tx_signed.write(writer)?;
+ // If we have a `HolderForceClosedWithInfo` event, we need to write the `HolderForceClosed` for backwards compatibility.
+ let pending_monitor_events = match self.pending_monitor_events.iter().find(|ev| match ev {
+ MonitorEvent::HolderForceClosedWithInfo { .. } => true,
+ _ => false,
+ }) {
+ Some(MonitorEvent::HolderForceClosedWithInfo { outpoint, .. }) => {
+ let mut pending_monitor_events = self.pending_monitor_events.clone();
+ pending_monitor_events.push(MonitorEvent::HolderForceClosed(*outpoint));
+ pending_monitor_events
+ }
+ _ => self.pending_monitor_events.clone(),
+ };
+
write_tlv_fields!(writer, {
(1, self.funding_spend_confirmed, option),
(3, self.htlcs_resolved_on_chain, required_vec),
- (5, self.pending_monitor_events, required_vec),
+ (5, pending_monitor_events, required_vec),
(7, self.funding_spend_seen, required),
(9, self.counterparty_node_id, option),
(11, self.confirmed_commitment_tx_counterparty_output, option),
(13, self.spendable_txids_confirmed, required_vec),
(15, self.counterparty_fulfilled_htlcs, required),
(17, self.initial_counterparty_commitment_info, option),
+ (19, self.channel_id, required),
});
Ok(())
}
pub(super) use _process_events_body as process_events_body;
+pub(crate) struct WithChannelMonitor<'a, L: Deref> where L::Target: Logger {
+ logger: &'a L,
+ peer_id: Option<PublicKey>,
+ channel_id: Option<ChannelId>,
+}
+
+impl<'a, L: Deref> Logger for WithChannelMonitor<'a, L> where L::Target: Logger {
+ fn log(&self, mut record: Record) {
+ record.peer_id = self.peer_id;
+ record.channel_id = self.channel_id;
+ self.logger.log(record)
+ }
+}
+
+impl<'a, L: Deref> WithChannelMonitor<'a, L> where L::Target: Logger {
+ pub(crate) fn from<S: WriteableEcdsaChannelSigner>(logger: &'a L, monitor: &ChannelMonitor<S>) -> Self {
+ Self::from_impl(logger, &*monitor.inner.lock().unwrap())
+ }
+
+ pub(crate) fn from_impl<S: WriteableEcdsaChannelSigner>(logger: &'a L, monitor_impl: &ChannelMonitorImpl<S>) -> Self {
+ let peer_id = monitor_impl.counterparty_node_id;
+ let channel_id = Some(monitor_impl.channel_id());
+ WithChannelMonitor {
+ logger, peer_id, channel_id,
+ }
+ }
+}
+
impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
/// For lockorder enforcement purposes, we need to have a single site which constructs the
/// `inner` mutex, otherwise cases where we lock two monitors at the same time (eg in our
funding_redeemscript: ScriptBuf, channel_value_satoshis: u64,
commitment_transaction_number_obscure_factor: u64,
initial_holder_commitment_tx: HolderCommitmentTransaction,
- best_block: BestBlock, counterparty_node_id: PublicKey) -> ChannelMonitor<Signer> {
+ best_block: BestBlock, counterparty_node_id: PublicKey, channel_id: ChannelId,
+ ) -> ChannelMonitor<Signer> {
assert!(commitment_transaction_number_obscure_factor <= (1 << 48));
let counterparty_payment_script = chan_utils::get_counterparty_payment_script(
channel_parameters.clone(), initial_holder_commitment_tx, secp_ctx
);
- let mut outputs_to_watch = HashMap::new();
+ let mut outputs_to_watch = new_hash_map();
outputs_to_watch.insert(funding_info.0.txid, vec![(funding_info.0.index as u32, funding_info.1.clone())]);
Self::from_impl(ChannelMonitorImpl {
channel_keys_id,
holder_revocation_basepoint,
+ channel_id,
funding_info,
current_counterparty_commitment_txid: None,
prev_counterparty_commitment_txid: None,
on_holder_tx_csv: counterparty_channel_parameters.selected_contest_delay,
commitment_secrets: CounterpartyCommitmentSecrets::new(),
- counterparty_claimable_outpoints: HashMap::new(),
- counterparty_commitment_txn_on_chain: HashMap::new(),
- counterparty_hash_commitment_number: HashMap::new(),
- counterparty_fulfilled_htlcs: HashMap::new(),
+ counterparty_claimable_outpoints: new_hash_map(),
+ counterparty_commitment_txn_on_chain: new_hash_map(),
+ counterparty_hash_commitment_number: new_hash_map(),
+ counterparty_fulfilled_htlcs: new_hash_map(),
prev_holder_signed_commitment_tx: None,
current_holder_commitment_tx: holder_commitment_tx,
current_counterparty_commitment_number: 1 << 48,
current_holder_commitment_number,
- payment_preimages: HashMap::new(),
+ payment_preimages: new_hash_map(),
pending_monitor_events: Vec::new(),
pending_events: Vec::new(),
is_processing_pending_events: false,
)
where L::Target: Logger
{
- self.inner.lock().unwrap().provide_initial_counterparty_commitment_tx(txid,
+ let mut inner = self.inner.lock().unwrap();
+ let logger = WithChannelMonitor::from_impl(logger, &*inner);
+ inner.provide_initial_counterparty_commitment_tx(txid,
htlc_outputs, commitment_number, their_cur_per_commitment_point, feerate_per_kw,
- to_broadcaster_value_sat, to_countersignatory_value_sat, logger);
+ to_broadcaster_value_sat, to_countersignatory_value_sat, &logger);
}
/// Informs this monitor of the latest counterparty (ie non-broadcastable) commitment transaction.
their_per_commitment_point: PublicKey,
logger: &L,
) where L::Target: Logger {
- self.inner.lock().unwrap().provide_latest_counterparty_commitment_tx(
- txid, htlc_outputs, commitment_number, their_per_commitment_point, logger)
+ let mut inner = self.inner.lock().unwrap();
+ let logger = WithChannelMonitor::from_impl(logger, &*inner);
+ inner.provide_latest_counterparty_commitment_tx(
+ txid, htlc_outputs, commitment_number, their_per_commitment_point, &logger)
}
#[cfg(test)]
F::Target: FeeEstimator,
L::Target: Logger,
{
- self.inner.lock().unwrap().provide_payment_preimage(
- payment_hash, payment_preimage, broadcaster, fee_estimator, logger)
+ let mut inner = self.inner.lock().unwrap();
+ let logger = WithChannelMonitor::from_impl(logger, &*inner);
+ inner.provide_payment_preimage(
+ payment_hash, payment_preimage, broadcaster, fee_estimator, &logger)
}
/// Updates a ChannelMonitor on the basis of some new information provided by the Channel
F::Target: FeeEstimator,
L::Target: Logger,
{
- self.inner.lock().unwrap().update_monitor(updates, broadcaster, fee_estimator, logger)
+ let mut inner = self.inner.lock().unwrap();
+ let logger = WithChannelMonitor::from_impl(logger, &*inner);
+ inner.update_monitor(updates, broadcaster, fee_estimator, &logger)
}
/// Gets the update_id from the latest ChannelMonitorUpdate which was applied to this
self.inner.lock().unwrap().get_funding_txo().clone()
}
+ /// Gets the channel_id of the channel this ChannelMonitor is monitoring for.
+ pub fn channel_id(&self) -> ChannelId {
+ self.inner.lock().unwrap().channel_id()
+ }
+
/// Gets a list of txids, with their output scripts (in the order they appear in the
/// transaction), which we must learn about spends of via block_connected().
pub fn get_outputs_to_watch(&self) -> Vec<(Txid, Vec<(u32, ScriptBuf)>)> {
/// Loads the funding txo and outputs to watch into the given `chain::Filter` by repeatedly
/// calling `chain::Filter::register_output` and `chain::Filter::register_tx` until all outputs
/// have been registered.
- pub fn load_outputs_to_watch<F: Deref>(&self, filter: &F) where F::Target: chain::Filter {
+ pub fn load_outputs_to_watch<F: Deref, L: Deref>(&self, filter: &F, logger: &L)
+ where
+ F::Target: chain::Filter, L::Target: Logger,
+ {
let lock = self.inner.lock().unwrap();
+ let logger = WithChannelMonitor::from_impl(logger, &*lock);
+ log_trace!(&logger, "Registering funding outpoint {}", &lock.get_funding_txo().0);
filter.register_tx(&lock.get_funding_txo().0.txid, &lock.get_funding_txo().1);
for (txid, outputs) in lock.get_outputs_to_watch().iter() {
for (index, script_pubkey) in outputs.iter() {
assert!(*index <= u16::max_value() as u32);
+ let outpoint = OutPoint { txid: *txid, index: *index as u16 };
+ log_trace!(logger, "Registering outpoint {} with the filter for monitoring spends", outpoint);
filter.register_output(WatchedOutput {
block_hash: None,
- outpoint: OutPoint { txid: *txid, index: *index as u16 },
+ outpoint,
script_pubkey: script_pubkey.clone(),
});
}
/// to the commitment transaction being revoked, this will return a signed transaction, but
/// the signature will not be valid.
///
- /// [`EcdsaChannelSigner::sign_justice_revoked_output`]: crate::sign::EcdsaChannelSigner::sign_justice_revoked_output
+ /// [`EcdsaChannelSigner::sign_justice_revoked_output`]: crate::sign::ecdsa::EcdsaChannelSigner::sign_justice_revoked_output
/// [`Persist`]: crate::chain::chainmonitor::Persist
pub fn sign_to_local_justice_tx(&self, justice_tx: Transaction, input_idx: usize, value: u64, commitment_number: u64) -> Result<Transaction, ()> {
self.inner.lock().unwrap().sign_to_local_justice_tx(justice_tx, input_idx, value, commitment_number)
self.inner.lock().unwrap().counterparty_node_id
}
- /// Used by [`ChannelManager`] deserialization to broadcast the latest holder state if its copy
- /// of the channel state was out-of-date.
- ///
- /// You may also use this to broadcast the latest local commitment transaction, either because
+ /// You may use this to broadcast the latest local commitment transaction, either because
/// a monitor update failed or because we've fallen behind (i.e. we've received proof that our
/// counterparty side knows a revocation secret we gave them that they shouldn't know).
///
- /// Broadcasting these transactions in the second case is UNSAFE, as they allow counterparty
+ /// Broadcasting these transactions in this manner is UNSAFE, as they allow counterparty
/// side to punish you. Nevertheless you may want to broadcast them if counterparty doesn't
/// close channel with their commitment transaction after a substantial amount of time. Best
/// may be to contact the other node operator out-of-band to coordinate other options available
/// to you.
- ///
- /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
- pub fn get_latest_holder_commitment_txn<L: Deref>(&self, logger: &L) -> Vec<Transaction>
- where L::Target: Logger {
- self.inner.lock().unwrap().get_latest_holder_commitment_txn(logger)
+ pub fn broadcast_latest_holder_commitment_txn<B: Deref, F: Deref, L: Deref>(
+ &self, broadcaster: &B, fee_estimator: &F, logger: &L
+ )
+ where
+ B::Target: BroadcasterInterface,
+ F::Target: FeeEstimator,
+ L::Target: Logger
+ {
+ let mut inner = self.inner.lock().unwrap();
+ let fee_estimator = LowerBoundedFeeEstimator::new(&**fee_estimator);
+ let logger = WithChannelMonitor::from_impl(logger, &*inner);
+ inner.queue_latest_holder_commitment_txn_for_broadcast(broadcaster, &fee_estimator, &logger);
}
- /// Unsafe test-only version of get_latest_holder_commitment_txn used by our test framework
+ /// Unsafe test-only version of `broadcast_latest_holder_commitment_txn` used by our test framework
/// to bypass HolderCommitmentTransaction state update lockdown after signature and generate
/// revoked commitment transaction.
#[cfg(any(test, feature = "unsafe_revoked_tx_signing"))]
pub fn unsafe_get_latest_holder_commitment_txn<L: Deref>(&self, logger: &L) -> Vec<Transaction>
where L::Target: Logger {
- self.inner.lock().unwrap().unsafe_get_latest_holder_commitment_txn(logger)
+ let mut inner = self.inner.lock().unwrap();
+ let logger = WithChannelMonitor::from_impl(logger, &*inner);
+ inner.unsafe_get_latest_holder_commitment_txn(&logger)
}
/// Processes transactions in a newly connected block, which may result in any of the following:
height: u32,
broadcaster: B,
fee_estimator: F,
- logger: L,
+ logger: &L,
) -> Vec<TransactionOutputs>
where
B::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
- self.inner.lock().unwrap().block_connected(
- header, txdata, height, broadcaster, fee_estimator, logger)
+ let mut inner = self.inner.lock().unwrap();
+ let logger = WithChannelMonitor::from_impl(logger, &*inner);
+ inner.block_connected(
+ header, txdata, height, broadcaster, fee_estimator, &logger)
}
/// Determines if the disconnected block contained any transactions of interest and updates
height: u32,
broadcaster: B,
fee_estimator: F,
- logger: L,
+ logger: &L,
) where
B::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
- self.inner.lock().unwrap().block_disconnected(
- header, height, broadcaster, fee_estimator, logger)
+ let mut inner = self.inner.lock().unwrap();
+ let logger = WithChannelMonitor::from_impl(logger, &*inner);
+ inner.block_disconnected(
+ header, height, broadcaster, fee_estimator, &logger)
}
/// Processes transactions confirmed in a block with the given header and height, returning new
height: u32,
broadcaster: B,
fee_estimator: F,
- logger: L,
+ logger: &L,
) -> Vec<TransactionOutputs>
where
B::Target: BroadcasterInterface,
L::Target: Logger,
{
let bounded_fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator);
- self.inner.lock().unwrap().transactions_confirmed(
- header, txdata, height, broadcaster, &bounded_fee_estimator, logger)
+ let mut inner = self.inner.lock().unwrap();
+ let logger = WithChannelMonitor::from_impl(logger, &*inner);
+ inner.transactions_confirmed(
+ header, txdata, height, broadcaster, &bounded_fee_estimator, &logger)
}
/// Processes a transaction that was reorganized out of the chain.
txid: &Txid,
broadcaster: B,
fee_estimator: F,
- logger: L,
+ logger: &L,
) where
B::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
let bounded_fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator);
- self.inner.lock().unwrap().transaction_unconfirmed(
- txid, broadcaster, &bounded_fee_estimator, logger);
+ let mut inner = self.inner.lock().unwrap();
+ let logger = WithChannelMonitor::from_impl(logger, &*inner);
+ inner.transaction_unconfirmed(
+ txid, broadcaster, &bounded_fee_estimator, &logger
+ );
}
/// Updates the monitor with the current best chain tip, returning new outputs to watch. See
height: u32,
broadcaster: B,
fee_estimator: F,
- logger: L,
+ logger: &L,
) -> Vec<TransactionOutputs>
where
B::Target: BroadcasterInterface,
L::Target: Logger,
{
let bounded_fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator);
- self.inner.lock().unwrap().best_block_updated(
- header, height, broadcaster, &bounded_fee_estimator, logger)
+ let mut inner = self.inner.lock().unwrap();
+ let logger = WithChannelMonitor::from_impl(logger, &*inner);
+ inner.best_block_updated(
+ header, height, broadcaster, &bounded_fee_estimator, &logger
+ )
}
/// Returns the set of txids that should be monitored for re-organization out of the chain.
- pub fn get_relevant_txids(&self) -> Vec<(Txid, Option<BlockHash>)> {
+ pub fn get_relevant_txids(&self) -> Vec<(Txid, u32, Option<BlockHash>)> {
let inner = self.inner.lock().unwrap();
- let mut txids: Vec<(Txid, Option<BlockHash>)> = inner.onchain_events_awaiting_threshold_conf
+ let mut txids: Vec<(Txid, u32, Option<BlockHash>)> = inner.onchain_events_awaiting_threshold_conf
.iter()
- .map(|entry| (entry.txid, entry.block_hash))
+ .map(|entry| (entry.txid, entry.height, entry.block_hash))
.chain(inner.onchain_tx_handler.get_relevant_txids().into_iter())
.collect();
- txids.sort_unstable();
- txids.dedup();
+ txids.sort_unstable_by(|a, b| a.0.cmp(&b.0).then(b.1.cmp(&a.1)));
+ txids.dedup_by_key(|(txid, _, _)| *txid);
txids
}
/// invoking this every 30 seconds, or lower if running in an environment with spotty
/// connections, like on mobile.
pub fn rebroadcast_pending_claims<B: Deref, F: Deref, L: Deref>(
- &self, broadcaster: B, fee_estimator: F, logger: L,
+ &self, broadcaster: B, fee_estimator: F, logger: &L,
+ )
+ where
+ B::Target: BroadcasterInterface,
+ F::Target: FeeEstimator,
+ L::Target: Logger,
+ {
+ let fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator);
+ let mut inner = self.inner.lock().unwrap();
+ let logger = WithChannelMonitor::from_impl(logger, &*inner);
+ let current_height = inner.best_block.height;
+ inner.onchain_tx_handler.rebroadcast_pending_claims(
+ current_height, FeerateStrategy::HighestOfPreviousOrNew, &broadcaster, &fee_estimator, &logger,
+ );
+ }
+
+ /// Triggers rebroadcasts of pending claims from a force-closed channel after a transaction
+ /// signature generation failure.
+ pub fn signer_unblocked<B: Deref, F: Deref, L: Deref>(
+ &self, broadcaster: B, fee_estimator: F, logger: &L,
)
where
B::Target: BroadcasterInterface,
{
let fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator);
let mut inner = self.inner.lock().unwrap();
+ let logger = WithChannelMonitor::from_impl(logger, &*inner);
let current_height = inner.best_block.height;
inner.onchain_tx_handler.rebroadcast_pending_claims(
- current_height, &broadcaster, &fee_estimator, &logger,
+ current_height, FeerateStrategy::RetryPrevious, &broadcaster, &fee_estimator, &logger,
);
}
pub fn set_counterparty_payment_script(&self, script: ScriptBuf) {
self.inner.lock().unwrap().counterparty_payment_script = script;
}
+
+ #[cfg(test)]
+ pub fn do_signer_call<F: FnMut(&Signer) -> ()>(&self, mut f: F) {
+ let inner = self.inner.lock().unwrap();
+ f(&inner.onchain_tx_handler.signer);
+ }
}
impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
/// HTLCs which were resolved on-chain (i.e. where the final HTLC resolution was done by an
/// event from this `ChannelMonitor`).
pub(crate) fn get_all_current_outbound_htlcs(&self) -> HashMap<HTLCSource, (HTLCOutputInCommitment, Option<PaymentPreimage>)> {
- let mut res = HashMap::new();
+ let mut res = new_hash_map();
// Just examine the available counterparty commitment transactions. See docs on
// `fail_unbroadcast_htlcs`, below, for justification.
let us = self.inner.lock().unwrap();
return self.get_all_current_outbound_htlcs();
}
- let mut res = HashMap::new();
+ let mut res = new_hash_map();
macro_rules! walk_htlcs {
($holder_commitment: expr, $htlc_iter: expr) => {
for (htlc, source) in $htlc_iter {
// before considering it "no longer pending" - this matches when we
// provide the ChannelManager an HTLC failure event.
Some(commitment_tx_output_idx) == htlc.transaction_output_index &&
- us.best_block.height() >= event.height + ANTI_REORG_DELAY - 1
+ us.best_block.height >= event.height + ANTI_REORG_DELAY - 1
} else if let OnchainEvent::HTLCSpendConfirmation { commitment_tx_output_idx, .. } = event.event {
// If the HTLC was fulfilled with a preimage, we consider the HTLC
// immediately non-pending, matching when we provide ChannelManager
// broadcastable commitment transaction has the HTLC in it, but it
// cannot currently change after channel initialization, so we don't
// need to here.
- let confirmed_htlcs_iter: &mut Iterator<Item = (&HTLCOutputInCommitment, Option<&HTLCSource>)> = &mut $confirmed_htlcs_list;
+ let confirmed_htlcs_iter: &mut dyn Iterator<Item = (&HTLCOutputInCommitment, Option<&HTLCSource>)> = &mut $confirmed_htlcs_list;
let mut matched_htlc = false;
for (ref broadcast_htlc, ref broadcast_source) in confirmed_htlcs_iter {
Ok(())
}
- pub(crate) fn provide_initial_counterparty_commitment_tx<L: Deref>(
+ fn provide_initial_counterparty_commitment_tx<L: Deref>(
&mut self, txid: Txid, htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)>,
commitment_number: u64, their_per_commitment_point: PublicKey, feerate_per_kw: u32,
- to_broadcaster_value: u64, to_countersignatory_value: u64, logger: &L
- )
- where L::Target: Logger
- {
+ to_broadcaster_value: u64, to_countersignatory_value: u64, logger: &WithChannelMonitor<L>,
+ ) where L::Target: Logger {
self.initial_counterparty_commitment_info = Some((their_per_commitment_point.clone(),
feerate_per_kw, to_broadcaster_value, to_countersignatory_value));
their_per_commitment_point, logger);
}
- pub(crate) fn provide_latest_counterparty_commitment_tx<L: Deref>(&mut self, txid: Txid, htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)>, commitment_number: u64, their_per_commitment_point: PublicKey, logger: &L) where L::Target: Logger {
+ fn provide_latest_counterparty_commitment_tx<L: Deref>(
+ &mut self, txid: Txid,
+ htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)>,
+ commitment_number: u64, their_per_commitment_point: PublicKey, logger: &WithChannelMonitor<L>,
+ ) where L::Target: Logger {
// TODO: Encrypt the htlc_outputs data with the single-hash of the commitment transaction
// so that a remote monitor doesn't learn anything unless there is a malicious close.
// (only maybe, sadly we cant do the same for local info, as we need to be aware of
/// commitment_tx_infos which contain the payment hash have been revoked.
fn provide_payment_preimage<B: Deref, F: Deref, L: Deref>(
&mut self, payment_hash: &PaymentHash, payment_preimage: &PaymentPreimage, broadcaster: &B,
- fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
+ fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &WithChannelMonitor<L>)
where B::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
macro_rules! claim_htlcs {
($commitment_number: expr, $txid: expr) => {
let (htlc_claim_reqs, _) = self.get_counterparty_output_claim_info($commitment_number, $txid, None);
- self.onchain_tx_handler.update_claims_view_from_requests(htlc_claim_reqs, self.best_block.height(), self.best_block.height(), broadcaster, fee_estimator, logger);
+ self.onchain_tx_handler.update_claims_view_from_requests(htlc_claim_reqs, self.best_block.height, self.best_block.height, broadcaster, fee_estimator, logger);
}
}
if let Some(txid) = self.current_counterparty_commitment_txid {
// Assume that the broadcasted commitment transaction confirmed in the current best
// block. Even if not, its a reasonable metric for the bump criteria on the HTLC
// transactions.
- let (claim_reqs, _) = self.get_broadcasted_holder_claims(&holder_commitment_tx, self.best_block.height());
- self.onchain_tx_handler.update_claims_view_from_requests(claim_reqs, self.best_block.height(), self.best_block.height(), broadcaster, fee_estimator, logger);
+ let (claim_reqs, _) = self.get_broadcasted_holder_claims(&holder_commitment_tx, self.best_block.height);
+ self.onchain_tx_handler.update_claims_view_from_requests(claim_reqs, self.best_block.height, self.best_block.height, broadcaster, fee_estimator, logger);
}
}
}
- pub(crate) fn broadcast_latest_holder_commitment_txn<B: Deref, L: Deref>(&mut self, broadcaster: &B, logger: &L)
- where B::Target: BroadcasterInterface,
- L::Target: Logger,
- {
- let commit_txs = self.get_latest_holder_commitment_txn(logger);
- let mut txs = vec![];
- for tx in commit_txs.iter() {
- log_info!(logger, "Broadcasting local {}", log_tx!(tx));
- txs.push(tx);
+ fn generate_claimable_outpoints_and_watch_outputs(&mut self, reason: ClosureReason) -> (Vec<PackageTemplate>, Vec<TransactionOutputs>) {
+ let funding_outp = HolderFundingOutput::build(
+ self.funding_redeemscript.clone(),
+ self.channel_value_satoshis,
+ self.onchain_tx_handler.channel_type_features().clone()
+ );
+ let commitment_package = PackageTemplate::build_package(
+ self.funding_info.0.txid.clone(), self.funding_info.0.index as u32,
+ PackageSolvingData::HolderFundingOutput(funding_outp),
+ self.best_block.height, self.best_block.height
+ );
+ let mut claimable_outpoints = vec![commitment_package];
+ let event = MonitorEvent::HolderForceClosedWithInfo {
+ reason,
+ outpoint: self.funding_info.0,
+ channel_id: self.channel_id,
+ };
+ self.pending_monitor_events.push(event);
+
+ // Although we aren't signing the transaction directly here, the transaction will be signed
+ // in the claim that is queued to OnchainTxHandler. We set holder_tx_signed here to reject
+ // new channel updates.
+ self.holder_tx_signed = true;
+ let mut watch_outputs = Vec::new();
+ // We can't broadcast our HTLC transactions while the commitment transaction is
+ // unconfirmed. We'll delay doing so until we detect the confirmed commitment in
+ // `transactions_confirmed`.
+ if !self.onchain_tx_handler.channel_type_features().supports_anchors_zero_fee_htlc_tx() {
+ // Because we're broadcasting a commitment transaction, we should construct the package
+ // assuming it gets confirmed in the next block. Sadly, we have code which considers
+ // "not yet confirmed" things as discardable, so we cannot do that here.
+ let (mut new_outpoints, _) = self.get_broadcasted_holder_claims(
+ &self.current_holder_commitment_tx, self.best_block.height
+ );
+ let unsigned_commitment_tx = self.onchain_tx_handler.get_unsigned_holder_commitment_tx();
+ let new_outputs = self.get_broadcasted_holder_watch_outputs(
+ &self.current_holder_commitment_tx, &unsigned_commitment_tx
+ );
+ if !new_outputs.is_empty() {
+ watch_outputs.push((self.current_holder_commitment_tx.txid.clone(), new_outputs));
+ }
+ claimable_outpoints.append(&mut new_outpoints);
}
- broadcaster.broadcast_transactions(&txs);
- self.pending_monitor_events.push(MonitorEvent::HolderForceClosed(self.funding_info.0));
+ (claimable_outpoints, watch_outputs)
+ }
+
+ pub(crate) fn queue_latest_holder_commitment_txn_for_broadcast<B: Deref, F: Deref, L: Deref>(
+ &mut self, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &WithChannelMonitor<L>
+ )
+ where
+ B::Target: BroadcasterInterface,
+ F::Target: FeeEstimator,
+ L::Target: Logger,
+ {
+ let (claimable_outpoints, _) = self.generate_claimable_outpoints_and_watch_outputs(ClosureReason::HolderForceClosed);
+ self.onchain_tx_handler.update_claims_view_from_requests(
+ claimable_outpoints, self.best_block.height, self.best_block.height, broadcaster,
+ fee_estimator, logger
+ );
}
- pub fn update_monitor<B: Deref, F: Deref, L: Deref>(&mut self, updates: &ChannelMonitorUpdate, broadcaster: &B, fee_estimator: &F, logger: &L) -> Result<(), ()>
+ fn update_monitor<B: Deref, F: Deref, L: Deref>(
+ &mut self, updates: &ChannelMonitorUpdate, broadcaster: &B, fee_estimator: &F, logger: &WithChannelMonitor<L>
+ ) -> Result<(), ()>
where B::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
log_info!(logger, "Applying update to monitor {}, bringing update_id from {} to {} with {} change(s).",
log_funding_info!(self), self.latest_update_id, updates.update_id, updates.updates.len());
}
+
+ if updates.counterparty_node_id.is_some() {
+ if self.counterparty_node_id.is_none() {
+ self.counterparty_node_id = updates.counterparty_node_id;
+ } else {
+ debug_assert_eq!(self.counterparty_node_id, updates.counterparty_node_id);
+ }
+ }
+
// ChannelMonitor updates may be applied after force close if we receive a preimage for a
// broadcasted commitment transaction HTLC output that we'd like to claim on-chain. If this
// is the case, we no longer have guaranteed access to the monitor's update ID, so we use a
log_trace!(logger, "Avoiding commitment broadcast, already detected confirmed spend onchain");
continue;
}
- self.broadcast_latest_holder_commitment_txn(broadcaster, logger);
- // If the channel supports anchor outputs, we'll need to emit an external
- // event to be consumed such that a child transaction is broadcast with a
- // high enough feerate for the parent commitment transaction to confirm.
- if self.onchain_tx_handler.channel_type_features().supports_anchors_zero_fee_htlc_tx() {
- let funding_output = HolderFundingOutput::build(
- self.funding_redeemscript.clone(), self.channel_value_satoshis,
- self.onchain_tx_handler.channel_type_features().clone(),
- );
- let best_block_height = self.best_block.height();
- let commitment_package = PackageTemplate::build_package(
- self.funding_info.0.txid.clone(), self.funding_info.0.index as u32,
- PackageSolvingData::HolderFundingOutput(funding_output),
- best_block_height, best_block_height
- );
- self.onchain_tx_handler.update_claims_view_from_requests(
- vec![commitment_package], best_block_height, best_block_height,
- broadcaster, &bounded_fee_estimator, logger,
- );
- }
+ self.queue_latest_holder_commitment_txn_for_broadcast(broadcaster, &bounded_fee_estimator, logger);
} else if !self.holder_tx_signed {
log_error!(logger, "WARNING: You have a potentially-unsafe holder commitment transaction available to broadcast");
- log_error!(logger, " in channel monitor for channel {}!", &self.funding_info.0.to_channel_id());
- log_error!(logger, " Read the docs for ChannelMonitor::get_latest_holder_commitment_txn and take manual action!");
+ log_error!(logger, " in channel monitor for channel {}!", &self.channel_id());
+ log_error!(logger, " Read the docs for ChannelMonitor::broadcast_latest_holder_commitment_txn to take manual action!");
} else {
// If we generated a MonitorEvent::HolderForceClosed, the ChannelManager
// will still give us a ChannelForceClosed event with !should_broadcast, but we
} else { ret }
}
- pub fn get_latest_update_id(&self) -> u64 {
+ fn get_latest_update_id(&self) -> u64 {
self.latest_update_id
}
- pub fn get_funding_txo(&self) -> &(OutPoint, ScriptBuf) {
+ fn get_funding_txo(&self) -> &(OutPoint, ScriptBuf) {
&self.funding_info
}
- pub fn get_outputs_to_watch(&self) -> &HashMap<Txid, Vec<(u32, ScriptBuf)>> {
+ pub fn channel_id(&self) -> ChannelId {
+ self.channel_id
+ }
+
+ fn get_outputs_to_watch(&self) -> &HashMap<Txid, Vec<(u32, ScriptBuf)>> {
// If we've detected a counterparty commitment tx on chain, we must include it in the set
// of outputs to watch for spends of, otherwise we're likely to lose user funds. Because
// its trivial to do, double-check that here.
&self.outputs_to_watch
}
- pub fn get_and_clear_pending_monitor_events(&mut self) -> Vec<MonitorEvent> {
+ fn get_and_clear_pending_monitor_events(&mut self) -> Vec<MonitorEvent> {
let mut ret = Vec::new();
mem::swap(&mut ret, &mut self.pending_monitor_events);
ret
ClaimEvent::BumpCommitment {
package_target_feerate_sat_per_1000_weight, commitment_tx, anchor_output_idx,
} => {
+ let channel_id = self.channel_id;
+ // unwrap safety: `ClaimEvent`s are only available for Anchor channels,
+ // introduced with v0.0.116. counterparty_node_id is guaranteed to be `Some`
+ // since v0.0.110.
+ let counterparty_node_id = self.counterparty_node_id.unwrap();
let commitment_txid = commitment_tx.txid();
debug_assert_eq!(self.current_holder_commitment_tx.txid, commitment_txid);
let pending_htlcs = self.current_holder_commitment_tx.non_dust_htlcs();
let commitment_tx_fee_satoshis = self.channel_value_satoshis -
commitment_tx.output.iter().fold(0u64, |sum, output| sum + output.value);
ret.push(Event::BumpTransaction(BumpTransactionEvent::ChannelClose {
+ channel_id,
+ counterparty_node_id,
claim_id,
package_target_feerate_sat_per_1000_weight,
commitment_tx,
ClaimEvent::BumpHTLC {
target_feerate_sat_per_1000_weight, htlcs, tx_lock_time,
} => {
+ let channel_id = self.channel_id;
+ // unwrap safety: `ClaimEvent`s are only available for Anchor channels,
+ // introduced with v0.0.116. counterparty_node_id is guaranteed to be `Some`
+ // since v0.0.110.
+ let counterparty_node_id = self.counterparty_node_id.unwrap();
let mut htlc_descriptors = Vec::with_capacity(htlcs.len());
for htlc in htlcs {
htlc_descriptors.push(HTLCDescriptor {
});
}
ret.push(Event::BumpTransaction(BumpTransactionEvent::HTLCResolution {
+ channel_id,
+ counterparty_node_id,
claim_id,
target_feerate_sat_per_1000_weight,
htlc_descriptors,
ret
}
- pub(crate) fn initial_counterparty_commitment_tx(&mut self) -> Option<CommitmentTransaction> {
+ fn initial_counterparty_commitment_tx(&mut self) -> Option<CommitmentTransaction> {
let (their_per_commitment_point, feerate_per_kw, to_broadcaster_value,
to_countersignatory_value) = self.initial_counterparty_commitment_info?;
let htlc_outputs = vec![];
channel_parameters)
}
- pub(crate) fn counterparty_commitment_txs_from_update(&self, update: &ChannelMonitorUpdate) -> Vec<CommitmentTransaction> {
+ fn counterparty_commitment_txs_from_update(&self, update: &ChannelMonitorUpdate) -> Vec<CommitmentTransaction> {
update.updates.iter().filter_map(|update| {
match update {
&ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { commitment_txid,
}).collect()
}
- pub(crate) fn sign_to_local_justice_tx(
+ fn sign_to_local_justice_tx(
&self, mut justice_tx: Transaction, input_idx: usize, value: u64, commitment_number: u64
) -> Result<Transaction, ()> {
let secret = self.get_secret(commitment_number).ok_or(())?;
self.commitment_secrets.get_secret(idx)
}
- pub(crate) fn get_min_seen_secret(&self) -> u64 {
+ fn get_min_seen_secret(&self) -> u64 {
self.commitment_secrets.get_min_seen_secret()
}
- pub(crate) fn get_cur_counterparty_commitment_number(&self) -> u64 {
+ fn get_cur_counterparty_commitment_number(&self) -> u64 {
self.current_counterparty_commitment_number
}
- pub(crate) fn get_cur_holder_commitment_number(&self) -> u64 {
+ fn get_cur_holder_commitment_number(&self) -> u64 {
self.current_holder_commitment_number
}
(htlc, htlc_source.as_ref().map(|htlc_source| htlc_source.as_ref()))
), logger);
} else {
- debug_assert!(false, "We should have per-commitment option for any recognized old commitment txn");
+ // Our fuzzers aren't constrained by pesky things like valid signatures, so can
+ // spend our funding output with a transaction which doesn't match our past
+ // commitment transactions. Thus, we can only debug-assert here when not
+ // fuzzing.
+ debug_assert!(cfg!(fuzzing), "We should have per-commitment option for any recognized old commitment txn");
fail_unbroadcast_htlcs!(self, "revoked counterparty", commitment_txid, tx, height,
block_hash, [].iter().map(|reference| *reference), logger);
}
}
}
- pub fn get_latest_holder_commitment_txn<L: Deref>(&mut self, logger: &L) -> Vec<Transaction> where L::Target: Logger {
- log_debug!(logger, "Getting signed latest holder commitment transaction!");
- self.holder_tx_signed = true;
- let commitment_tx = self.onchain_tx_handler.get_fully_signed_holder_tx(&self.funding_redeemscript);
- let txid = commitment_tx.txid();
- let mut holder_transactions = vec![commitment_tx];
- // When anchor outputs are present, the HTLC transactions are only valid once the commitment
- // transaction confirms.
- if self.onchain_tx_handler.channel_type_features().supports_anchors_zero_fee_htlc_tx() {
- return holder_transactions;
+ /// Cancels any existing pending claims for a commitment that previously confirmed and has now
+ /// been replaced by another.
+ pub fn cancel_prev_commitment_claims<L: Deref>(
+ &mut self, logger: &L, confirmed_commitment_txid: &Txid
+ ) where L::Target: Logger {
+ for (counterparty_commitment_txid, _) in &self.counterparty_commitment_txn_on_chain {
+ // Cancel any pending claims for counterparty commitments we've seen confirm.
+ if counterparty_commitment_txid == confirmed_commitment_txid {
+ continue;
+ }
+ for (htlc, _) in self.counterparty_claimable_outpoints.get(counterparty_commitment_txid).unwrap_or(&vec![]) {
+ log_trace!(logger, "Canceling claims for previously confirmed counterparty commitment {}",
+ counterparty_commitment_txid);
+ let mut outpoint = BitcoinOutPoint { txid: *counterparty_commitment_txid, vout: 0 };
+ if let Some(vout) = htlc.transaction_output_index {
+ outpoint.vout = vout;
+ self.onchain_tx_handler.abandon_claim(&outpoint);
+ }
+ }
}
- for htlc in self.current_holder_commitment_tx.htlc_outputs.iter() {
- if let Some(vout) = htlc.0.transaction_output_index {
- let preimage = if !htlc.0.offered {
- if let Some(preimage) = self.payment_preimages.get(&htlc.0.payment_hash) { Some(preimage.clone()) } else {
- // We can't build an HTLC-Success transaction without the preimage
- continue;
+ if self.holder_tx_signed {
+ // If we've signed, we may have broadcast either commitment (prev or current), and
+ // attempted to claim from it immediately without waiting for a confirmation.
+ if self.current_holder_commitment_tx.txid != *confirmed_commitment_txid {
+ log_trace!(logger, "Canceling claims for previously broadcast holder commitment {}",
+ self.current_holder_commitment_tx.txid);
+ let mut outpoint = BitcoinOutPoint { txid: self.current_holder_commitment_tx.txid, vout: 0 };
+ for (htlc, _, _) in &self.current_holder_commitment_tx.htlc_outputs {
+ if let Some(vout) = htlc.transaction_output_index {
+ outpoint.vout = vout;
+ self.onchain_tx_handler.abandon_claim(&outpoint);
+ }
+ }
+ }
+ if let Some(prev_holder_commitment_tx) = &self.prev_holder_signed_commitment_tx {
+ if prev_holder_commitment_tx.txid != *confirmed_commitment_txid {
+ log_trace!(logger, "Canceling claims for previously broadcast holder commitment {}",
+ prev_holder_commitment_tx.txid);
+ let mut outpoint = BitcoinOutPoint { txid: prev_holder_commitment_tx.txid, vout: 0 };
+ for (htlc, _, _) in &prev_holder_commitment_tx.htlc_outputs {
+ if let Some(vout) = htlc.transaction_output_index {
+ outpoint.vout = vout;
+ self.onchain_tx_handler.abandon_claim(&outpoint);
+ }
}
- } else if htlc.0.cltv_expiry > self.best_block.height() + 1 {
- // Don't broadcast HTLC-Timeout transactions immediately as they don't meet the
- // current locktime requirements on-chain. We will broadcast them in
- // `block_confirmed` when `should_broadcast_holder_commitment_txn` returns true.
- // Note that we add + 1 as transactions are broadcastable when they can be
- // confirmed in the next block.
- continue;
- } else { None };
- if let Some(htlc_tx) = self.onchain_tx_handler.get_fully_signed_htlc_tx(
- &::bitcoin::OutPoint { txid, vout }, &preimage) {
- holder_transactions.push(htlc_tx);
}
}
+ } else {
+ // No previous claim.
}
- // We throw away the generated waiting_first_conf data as we aren't (yet) confirmed and we don't actually know what the caller wants to do.
- // The data will be re-generated and tracked in check_spend_holder_transaction if we get a confirmation.
- holder_transactions
}
#[cfg(any(test,feature = "unsafe_revoked_tx_signing"))]
/// Note that this includes possibly-locktimed-in-the-future transactions!
- fn unsafe_get_latest_holder_commitment_txn<L: Deref>(&mut self, logger: &L) -> Vec<Transaction> where L::Target: Logger {
+ fn unsafe_get_latest_holder_commitment_txn<L: Deref>(
+ &mut self, logger: &WithChannelMonitor<L>
+ ) -> Vec<Transaction> where L::Target: Logger {
log_debug!(logger, "Getting signed copy of latest holder commitment transaction!");
let commitment_tx = self.onchain_tx_handler.get_fully_signed_copy_holder_tx(&self.funding_redeemscript);
let txid = commitment_tx.txid();
continue;
}
} else { None };
- if let Some(htlc_tx) = self.onchain_tx_handler.get_fully_signed_htlc_tx(
- &::bitcoin::OutPoint { txid, vout }, &preimage) {
- holder_transactions.push(htlc_tx);
+ if let Some(htlc_tx) = self.onchain_tx_handler.get_maybe_signed_htlc_tx(
+ &::bitcoin::OutPoint { txid, vout }, &preimage
+ ) {
+ if htlc_tx.is_fully_signed() {
+ holder_transactions.push(htlc_tx.0);
+ }
}
}
}
holder_transactions
}
- pub fn block_connected<B: Deref, F: Deref, L: Deref>(&mut self, header: &Header, txdata: &TransactionData, height: u32, broadcaster: B, fee_estimator: F, logger: L) -> Vec<TransactionOutputs>
+ fn block_connected<B: Deref, F: Deref, L: Deref>(
+ &mut self, header: &Header, txdata: &TransactionData, height: u32, broadcaster: B,
+ fee_estimator: F, logger: &WithChannelMonitor<L>,
+ ) -> Vec<TransactionOutputs>
where B::Target: BroadcasterInterface,
- F::Target: FeeEstimator,
- L::Target: Logger,
+ F::Target: FeeEstimator,
+ L::Target: Logger,
{
let block_hash = header.block_hash();
self.best_block = BestBlock::new(block_hash, height);
height: u32,
broadcaster: B,
fee_estimator: &LowerBoundedFeeEstimator<F>,
- logger: L,
+ logger: &WithChannelMonitor<L>,
) -> Vec<TransactionOutputs>
where
B::Target: BroadcasterInterface,
{
let block_hash = header.block_hash();
- if height > self.best_block.height() {
+ if height > self.best_block.height {
self.best_block = BestBlock::new(block_hash, height);
- self.block_confirmed(height, block_hash, vec![], vec![], vec![], &broadcaster, &fee_estimator, &logger)
- } else if block_hash != self.best_block.block_hash() {
+ log_trace!(logger, "Connecting new block {} at height {}", block_hash, height);
+ self.block_confirmed(height, block_hash, vec![], vec![], vec![], &broadcaster, &fee_estimator, logger)
+ } else if block_hash != self.best_block.block_hash {
self.best_block = BestBlock::new(block_hash, height);
+ log_trace!(logger, "Best block re-orged, replaced with new block {} at height {}", block_hash, height);
self.onchain_events_awaiting_threshold_conf.retain(|ref entry| entry.height <= height);
self.onchain_tx_handler.block_disconnected(height + 1, broadcaster, fee_estimator, logger);
Vec::new()
height: u32,
broadcaster: B,
fee_estimator: &LowerBoundedFeeEstimator<F>,
- logger: L,
+ logger: &WithChannelMonitor<L>,
) -> Vec<TransactionOutputs>
where
B::Target: BroadcasterInterface,
let mut claimable_outpoints = Vec::new();
'tx_iter: for tx in &txn_matched {
let txid = tx.txid();
+ log_trace!(logger, "Transaction {} confirmed in block {}", txid , block_hash);
// If a transaction has already been confirmed, ensure we don't bother processing it duplicatively.
if Some(txid) == self.funding_spend_confirmed {
log_debug!(logger, "Skipping redundant processing of funding-spend tx {} as it was previously confirmed", txid);
if prevout.txid == self.funding_info.0.txid && prevout.vout == self.funding_info.0.index as u32 {
let mut balance_spendable_csv = None;
log_info!(logger, "Channel {} closed by funding output spend in txid {}.",
- &self.funding_info.0.to_channel_id(), txid);
+ &self.channel_id(), txid);
self.funding_spend_seen = true;
let mut commitment_tx_to_counterparty_output = None;
if (tx.input[0].sequence.0 >> 8*3) as u8 == 0x80 && (tx.lock_time.to_consensus_u32() >> 8*3) as u8 == 0x20 {
claimable_outpoints.append(&mut new_outpoints);
if new_outpoints.is_empty() {
if let Some((mut new_outpoints, new_outputs)) = self.check_spend_holder_transaction(&tx, height, &block_hash, &logger) {
+ #[cfg(not(fuzzing))]
debug_assert!(commitment_tx_to_counterparty_output.is_none(),
"A commitment transaction matched as both a counterparty and local commitment tx?");
if !new_outputs.1.is_empty() {
commitment_tx_to_counterparty_output,
},
});
+ // Now that we've detected a confirmed commitment transaction, attempt to cancel
+ // pending claims for any commitments that were previously confirmed such that
+ // we don't continue claiming inputs that no longer exist.
+ self.cancel_prev_commitment_claims(&logger, &txid);
}
}
if tx.input.len() >= 1 {
break;
}
}
- self.is_resolving_htlc_output(&tx, height, &block_hash, &logger);
+ self.is_resolving_htlc_output(&tx, height, &block_hash, logger);
- self.check_tx_and_push_spendable_outputs(&tx, height, &block_hash, &logger);
+ self.check_tx_and_push_spendable_outputs(&tx, height, &block_hash, logger);
}
}
- if height > self.best_block.height() {
+ if height > self.best_block.height {
self.best_block = BestBlock::new(block_hash, height);
}
- self.block_confirmed(height, block_hash, txn_matched, watch_outputs, claimable_outpoints, &broadcaster, &fee_estimator, &logger)
+ self.block_confirmed(height, block_hash, txn_matched, watch_outputs, claimable_outpoints, &broadcaster, &fee_estimator, logger)
}
/// Update state for new block(s)/transaction(s) confirmed. Note that the caller must update
mut claimable_outpoints: Vec<PackageTemplate>,
broadcaster: &B,
fee_estimator: &LowerBoundedFeeEstimator<F>,
- logger: &L,
+ logger: &WithChannelMonitor<L>,
) -> Vec<TransactionOutputs>
where
B::Target: BroadcasterInterface,
L::Target: Logger,
{
log_trace!(logger, "Processing {} matched transactions for block at height {}.", txn_matched.len(), conf_height);
- debug_assert!(self.best_block.height() >= conf_height);
+ debug_assert!(self.best_block.height >= conf_height);
let should_broadcast = self.should_broadcast_holder_commitment_txn(logger);
if should_broadcast {
- let funding_outp = HolderFundingOutput::build(self.funding_redeemscript.clone(), self.channel_value_satoshis, self.onchain_tx_handler.channel_type_features().clone());
- let commitment_package = PackageTemplate::build_package(self.funding_info.0.txid.clone(), self.funding_info.0.index as u32, PackageSolvingData::HolderFundingOutput(funding_outp), self.best_block.height(), self.best_block.height());
- claimable_outpoints.push(commitment_package);
- self.pending_monitor_events.push(MonitorEvent::HolderForceClosed(self.funding_info.0));
- // Although we aren't signing the transaction directly here, the transaction will be signed
- // in the claim that is queued to OnchainTxHandler. We set holder_tx_signed here to reject
- // new channel updates.
- self.holder_tx_signed = true;
- // We can't broadcast our HTLC transactions while the commitment transaction is
- // unconfirmed. We'll delay doing so until we detect the confirmed commitment in
- // `transactions_confirmed`.
- if !self.onchain_tx_handler.channel_type_features().supports_anchors_zero_fee_htlc_tx() {
- // Because we're broadcasting a commitment transaction, we should construct the package
- // assuming it gets confirmed in the next block. Sadly, we have code which considers
- // "not yet confirmed" things as discardable, so we cannot do that here.
- let (mut new_outpoints, _) = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx, self.best_block.height());
- let unsigned_commitment_tx = self.onchain_tx_handler.get_unsigned_holder_commitment_tx();
- let new_outputs = self.get_broadcasted_holder_watch_outputs(&self.current_holder_commitment_tx, &unsigned_commitment_tx);
- if !new_outputs.is_empty() {
- watch_outputs.push((self.current_holder_commitment_tx.txid.clone(), new_outputs));
- }
- claimable_outpoints.append(&mut new_outpoints);
- }
+ let (mut new_outpoints, mut new_outputs) = self.generate_claimable_outpoints_and_watch_outputs(ClosureReason::HTLCsTimedOut);
+ claimable_outpoints.append(&mut new_outpoints);
+ watch_outputs.append(&mut new_outputs);
}
// Find which on-chain events have reached their confirmation threshold.
log_debug!(logger, "Descriptor {} has got enough confirmations to be passed upstream", log_spendable!(descriptor));
self.pending_events.push(Event::SpendableOutputs {
outputs: vec![descriptor],
- channel_id: Some(self.funding_info.0.to_channel_id()),
+ channel_id: Some(self.channel_id()),
});
self.spendable_txids_confirmed.push(entry.txid);
},
}
}
- self.onchain_tx_handler.update_claims_view_from_requests(claimable_outpoints, conf_height, self.best_block.height(), broadcaster, fee_estimator, logger);
- self.onchain_tx_handler.update_claims_view_from_matched_txn(&txn_matched, conf_height, conf_hash, self.best_block.height(), broadcaster, fee_estimator, logger);
+ self.onchain_tx_handler.update_claims_view_from_requests(claimable_outpoints, conf_height, self.best_block.height, broadcaster, fee_estimator, logger);
+ self.onchain_tx_handler.update_claims_view_from_matched_txn(&txn_matched, conf_height, conf_hash, self.best_block.height, broadcaster, fee_estimator, logger);
// Determine new outputs to watch by comparing against previously known outputs to watch,
// updating the latter in the process.
watch_outputs
}
- pub fn block_disconnected<B: Deref, F: Deref, L: Deref>(&mut self, header: &Header, height: u32, broadcaster: B, fee_estimator: F, logger: L)
- where B::Target: BroadcasterInterface,
- F::Target: FeeEstimator,
- L::Target: Logger,
+ fn block_disconnected<B: Deref, F: Deref, L: Deref>(
+ &mut self, header: &Header, height: u32, broadcaster: B, fee_estimator: F, logger: &WithChannelMonitor<L>
+ ) where B::Target: BroadcasterInterface,
+ F::Target: FeeEstimator,
+ L::Target: Logger,
{
log_trace!(logger, "Block {} at height {} disconnected", header.block_hash(), height);
txid: &Txid,
broadcaster: B,
fee_estimator: &LowerBoundedFeeEstimator<F>,
- logger: L,
+ logger: &WithChannelMonitor<L>,
) where
B::Target: BroadcasterInterface,
F::Target: FeeEstimator,
/// Filters a block's `txdata` for transactions spending watched outputs or for any child
/// transactions thereof.
fn filter_block<'a>(&self, txdata: &TransactionData<'a>) -> Vec<&'a Transaction> {
- let mut matched_txn = HashSet::new();
+ let mut matched_txn = new_hash_set();
txdata.iter().filter(|&&(_, tx)| {
let mut matches = self.spends_watched_output(tx);
for input in tx.input.iter() {
false
}
- fn should_broadcast_holder_commitment_txn<L: Deref>(&self, logger: &L) -> bool where L::Target: Logger {
+ fn should_broadcast_holder_commitment_txn<L: Deref>(
+ &self, logger: &WithChannelMonitor<L>
+ ) -> bool where L::Target: Logger {
// There's no need to broadcast our commitment transaction if we've seen one confirmed (even
// with 1 confirmation) as it'll be rejected as duplicate/conflicting.
if self.funding_spend_confirmed.is_some() ||
// to the source, and if we don't fail the channel we will have to ensure that the next
// updates that peer sends us are update_fails, failing the channel if not. It's probably
// easier to just fail the channel as this case should be rare enough anyway.
- let height = self.best_block.height();
+ let height = self.best_block.height;
macro_rules! scan_commitment {
($htlcs: expr, $holder_tx: expr) => {
for ref htlc in $htlcs {
/// Check if any transaction broadcasted is resolving HTLC output by a success or timeout on a holder
/// or counterparty commitment tx, if so send back the source, preimage if found and payment_hash of resolved HTLC
- fn is_resolving_htlc_output<L: Deref>(&mut self, tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &L) where L::Target: Logger {
+ fn is_resolving_htlc_output<L: Deref>(
+ &mut self, tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &WithChannelMonitor<L>,
+ ) where L::Target: Logger {
'outer_loop: for input in &tx.input {
let mut payment_data = None;
let htlc_claim = HTLCClaim::from_witness(&input.witness);
spendable_outputs.push(SpendableOutputDescriptor::StaticOutput {
outpoint: OutPoint { txid: tx.txid(), index: i as u16 },
output: outp.clone(),
+ channel_keys_id: Some(self.channel_keys_id),
});
}
if let Some(ref broadcasted_holder_revokable_script) = self.broadcasted_holder_revokable_script {
spendable_outputs.push(SpendableOutputDescriptor::StaticOutput {
outpoint: OutPoint { txid: tx.txid(), index: i as u16 },
output: outp.clone(),
+ channel_keys_id: Some(self.channel_keys_id),
});
}
}
/// Checks if the confirmed transaction is paying funds back to some address we can assume to
/// own.
fn check_tx_and_push_spendable_outputs<L: Deref>(
- &mut self, tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &L,
+ &mut self, tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &WithChannelMonitor<L>,
) where L::Target: Logger {
for spendable_output in self.get_spendable_outputs(tx) {
let entry = OnchainEventEntry {
L::Target: Logger,
{
fn filtered_block_connected(&self, header: &Header, txdata: &TransactionData, height: u32) {
- self.0.block_connected(header, txdata, height, &*self.1, &*self.2, &*self.3);
+ self.0.block_connected(header, txdata, height, &*self.1, &*self.2, &self.3);
}
fn block_disconnected(&self, header: &Header, height: u32) {
- self.0.block_disconnected(header, height, &*self.1, &*self.2, &*self.3);
+ self.0.block_disconnected(header, height, &*self.1, &*self.2, &self.3);
}
}
L::Target: Logger,
{
fn transactions_confirmed(&self, header: &Header, txdata: &TransactionData, height: u32) {
- self.0.transactions_confirmed(header, txdata, height, &*self.1, &*self.2, &*self.3);
+ self.0.transactions_confirmed(header, txdata, height, &*self.1, &*self.2, &self.3);
}
fn transaction_unconfirmed(&self, txid: &Txid) {
- self.0.transaction_unconfirmed(txid, &*self.1, &*self.2, &*self.3);
+ self.0.transaction_unconfirmed(txid, &*self.1, &*self.2, &self.3);
}
fn best_block_updated(&self, header: &Header, height: u32) {
- self.0.best_block_updated(header, height, &*self.1, &*self.2, &*self.3);
+ self.0.best_block_updated(header, height, &*self.1, &*self.2, &self.3);
}
- fn get_relevant_txids(&self) -> Vec<(Txid, Option<BlockHash>)> {
+ fn get_relevant_txids(&self) -> Vec<(Txid, u32, Option<BlockHash>)> {
self.0.get_relevant_txids()
}
}
const MAX_ALLOC_SIZE: usize = 64*1024;
impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP)>
- for (BlockHash, ChannelMonitor<SP::Signer>) {
+ for (BlockHash, ChannelMonitor<SP::EcdsaSigner>) {
fn read<R: io::Read>(reader: &mut R, args: (&'a ES, &'b SP)) -> Result<Self, DecodeError> {
macro_rules! unwrap_obj {
($key: expr) => {
}
let counterparty_claimable_outpoints_len: u64 = Readable::read(reader)?;
- let mut counterparty_claimable_outpoints = HashMap::with_capacity(cmp::min(counterparty_claimable_outpoints_len as usize, MAX_ALLOC_SIZE / 64));
+ let mut counterparty_claimable_outpoints = hash_map_with_capacity(cmp::min(counterparty_claimable_outpoints_len as usize, MAX_ALLOC_SIZE / 64));
for _ in 0..counterparty_claimable_outpoints_len {
let txid: Txid = Readable::read(reader)?;
let htlcs_count: u64 = Readable::read(reader)?;
}
let counterparty_commitment_txn_on_chain_len: u64 = Readable::read(reader)?;
- let mut counterparty_commitment_txn_on_chain = HashMap::with_capacity(cmp::min(counterparty_commitment_txn_on_chain_len as usize, MAX_ALLOC_SIZE / 32));
+ let mut counterparty_commitment_txn_on_chain = hash_map_with_capacity(cmp::min(counterparty_commitment_txn_on_chain_len as usize, MAX_ALLOC_SIZE / 32));
for _ in 0..counterparty_commitment_txn_on_chain_len {
let txid: Txid = Readable::read(reader)?;
let commitment_number = <U48 as Readable>::read(reader)?.0;
}
let counterparty_hash_commitment_number_len: u64 = Readable::read(reader)?;
- let mut counterparty_hash_commitment_number = HashMap::with_capacity(cmp::min(counterparty_hash_commitment_number_len as usize, MAX_ALLOC_SIZE / 32));
+ let mut counterparty_hash_commitment_number = hash_map_with_capacity(cmp::min(counterparty_hash_commitment_number_len as usize, MAX_ALLOC_SIZE / 32));
for _ in 0..counterparty_hash_commitment_number_len {
let payment_hash: PaymentHash = Readable::read(reader)?;
let commitment_number = <U48 as Readable>::read(reader)?.0;
let current_holder_commitment_number = <U48 as Readable>::read(reader)?.0;
let payment_preimages_len: u64 = Readable::read(reader)?;
- let mut payment_preimages = HashMap::with_capacity(cmp::min(payment_preimages_len as usize, MAX_ALLOC_SIZE / 32));
+ let mut payment_preimages = hash_map_with_capacity(cmp::min(payment_preimages_len as usize, MAX_ALLOC_SIZE / 32));
for _ in 0..payment_preimages_len {
let preimage: PaymentPreimage = Readable::read(reader)?;
let hash = PaymentHash(Sha256::hash(&preimage.0[..]).to_byte_array());
}
let outputs_to_watch_len: u64 = Readable::read(reader)?;
- let mut outputs_to_watch = HashMap::with_capacity(cmp::min(outputs_to_watch_len as usize, MAX_ALLOC_SIZE / (mem::size_of::<Txid>() + mem::size_of::<u32>() + mem::size_of::<Vec<ScriptBuf>>())));
+ let mut outputs_to_watch = hash_map_with_capacity(cmp::min(outputs_to_watch_len as usize, MAX_ALLOC_SIZE / (mem::size_of::<Txid>() + mem::size_of::<u32>() + mem::size_of::<Vec<ScriptBuf>>())));
for _ in 0..outputs_to_watch_len {
let txid = Readable::read(reader)?;
let outputs_len: u64 = Readable::read(reader)?;
return Err(DecodeError::InvalidValue);
}
}
- let onchain_tx_handler: OnchainTxHandler<SP::Signer> = ReadableArgs::read(
+ let onchain_tx_handler: OnchainTxHandler<SP::EcdsaSigner> = ReadableArgs::read(
reader, (entropy_source, signer_provider, channel_value_satoshis, channel_keys_id)
)?;
let mut counterparty_node_id = None;
let mut confirmed_commitment_tx_counterparty_output = None;
let mut spendable_txids_confirmed = Some(Vec::new());
- let mut counterparty_fulfilled_htlcs = Some(HashMap::new());
+ let mut counterparty_fulfilled_htlcs = Some(new_hash_map());
let mut initial_counterparty_commitment_info = None;
+ let mut channel_id = None;
read_tlv_fields!(reader, {
(1, funding_spend_confirmed, option),
(3, htlcs_resolved_on_chain, optional_vec),
(13, spendable_txids_confirmed, optional_vec),
(15, counterparty_fulfilled_htlcs, option),
(17, initial_counterparty_commitment_info, option),
+ (19, channel_id, option),
});
+ // `HolderForceClosedWithInfo` replaced `HolderForceClosed` in v0.0.122. If we have both
+ // events, we can remove the `HolderForceClosed` event and just keep the `HolderForceClosedWithInfo`.
+ if let Some(ref mut pending_monitor_events) = pending_monitor_events {
+ if pending_monitor_events.iter().any(|e| matches!(e, MonitorEvent::HolderForceClosed(_))) &&
+ pending_monitor_events.iter().any(|e| matches!(e, MonitorEvent::HolderForceClosedWithInfo { .. }))
+ {
+ pending_monitor_events.retain(|e| !matches!(e, MonitorEvent::HolderForceClosed(_)));
+ }
+ }
+
// Monitors for anchor outputs channels opened in v0.0.116 suffered from a bug in which the
// wrong `counterparty_payment_script` was being tracked. Fix it now on deserialization to
// give them a chance to recognize the spendable output.
chan_utils::get_to_countersignatory_with_anchors_redeemscript(&payment_point).to_v0_p2wsh();
}
- Ok((best_block.block_hash(), ChannelMonitor::from_impl(ChannelMonitorImpl {
+ Ok((best_block.block_hash, ChannelMonitor::from_impl(ChannelMonitorImpl {
latest_update_id,
commitment_transaction_number_obscure_factor,
channel_keys_id,
holder_revocation_basepoint,
+ channel_id: channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(outpoint)),
funding_info,
current_counterparty_commitment_txid,
prev_counterparty_commitment_txid,
use super::ChannelMonitorUpdateStep;
use crate::{check_added_monitors, check_spends, get_local_commitment_txn, get_monitor, get_route_and_payment_hash, unwrap_send_err};
use crate::chain::{BestBlock, Confirm};
- use crate::chain::channelmonitor::ChannelMonitor;
+ use crate::chain::channelmonitor::{ChannelMonitor, WithChannelMonitor};
use crate::chain::package::{weight_offered_htlc, weight_received_htlc, weight_revoked_offered_htlc, weight_revoked_received_htlc, WEIGHT_REVOKED_OUTPUT};
use crate::chain::transaction::OutPoint;
use crate::sign::InMemorySigner;
- use crate::ln::{PaymentPreimage, PaymentHash};
+ use crate::ln::{PaymentPreimage, PaymentHash, ChannelId};
use crate::ln::channel_keys::{DelayedPaymentBasepoint, DelayedPaymentKey, HtlcBasepoint, RevocationBasepoint, RevocationKey};
use crate::ln::chan_utils::{self,HTLCOutputInCommitment, ChannelPublicKeys, ChannelTransactionParameters, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
use crate::ln::channelmanager::{PaymentSendFailure, PaymentId, RecipientOnionFields};
use crate::util::errors::APIError;
use crate::util::test_utils::{TestLogger, TestBroadcaster, TestFeeEstimator};
use crate::util::ser::{ReadableArgs, Writeable};
+ use crate::util::logger::Logger;
use crate::sync::{Arc, Mutex};
use crate::io;
use crate::ln::features::ChannelTypeFeatures;
+
+ #[allow(unused_imports)]
use crate::prelude::*;
use std::str::FromStr;
preimages_slice_to_htlcs!($preimages_slice).into_iter().map(|(htlc, _)| (htlc, None)).collect()
}
}
- let dummy_sig = crate::util::crypto::sign(&secp_ctx,
+ let dummy_sig = crate::crypto::utils::sign(&secp_ctx,
&bitcoin::secp256k1::Message::from_slice(&[42; 32]).unwrap(),
&SecretKey::from_slice(&[42; 32]).unwrap());
htlc_basepoint: HtlcBasepoint::from(PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[48; 32]).unwrap()))
};
let funding_outpoint = OutPoint { txid: Txid::all_zeros(), index: u16::max_value() };
+ let channel_id = ChannelId::v1_from_funding_outpoint(funding_outpoint);
let channel_parameters = ChannelTransactionParameters {
holder_pubkeys: keys.holder_channel_pubkeys.clone(),
holder_selected_contest_delay: 66,
Some(ShutdownScript::new_p2wpkh_from_pubkey(shutdown_pubkey).into_inner()), 0, &ScriptBuf::new(),
(OutPoint { txid: Txid::from_slice(&[43; 32]).unwrap(), index: 0 }, ScriptBuf::new()),
&channel_parameters, ScriptBuf::new(), 46, 0, HolderCommitmentTransaction::dummy(&mut Vec::new()),
- best_block, dummy_key);
+ best_block, dummy_key, channel_id);
let mut htlcs = preimages_slice_to_htlcs!(preimages[0..10]);
let dummy_commitment_tx = HolderCommitmentTransaction::dummy(&mut htlcs);
+
monitor.provide_latest_holder_commitment_tx(dummy_commitment_tx.clone(),
htlcs.into_iter().map(|(htlc, _)| (htlc, Some(dummy_sig), None)).collect()).unwrap();
monitor.provide_latest_counterparty_commitment_tx(Txid::from_byte_array(Sha256::hash(b"1").to_byte_array()),
}
}
+ #[test]
+ fn test_with_channel_monitor_impl_logger() {
+ let secp_ctx = Secp256k1::new();
+ let logger = Arc::new(TestLogger::new());
+
+ let dummy_key = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
+
+ let keys = InMemorySigner::new(
+ &secp_ctx,
+ SecretKey::from_slice(&[41; 32]).unwrap(),
+ SecretKey::from_slice(&[41; 32]).unwrap(),
+ SecretKey::from_slice(&[41; 32]).unwrap(),
+ SecretKey::from_slice(&[41; 32]).unwrap(),
+ SecretKey::from_slice(&[41; 32]).unwrap(),
+ [41; 32],
+ 0,
+ [0; 32],
+ [0; 32],
+ );
+
+ let counterparty_pubkeys = ChannelPublicKeys {
+ funding_pubkey: PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[44; 32]).unwrap()),
+ revocation_basepoint: RevocationBasepoint::from(PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[45; 32]).unwrap())),
+ payment_point: PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[46; 32]).unwrap()),
+ delayed_payment_basepoint: DelayedPaymentBasepoint::from(PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[47; 32]).unwrap())),
+ htlc_basepoint: HtlcBasepoint::from(PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[48; 32]).unwrap())),
+ };
+ let funding_outpoint = OutPoint { txid: Txid::all_zeros(), index: u16::max_value() };
+ let channel_id = ChannelId::v1_from_funding_outpoint(funding_outpoint);
+ let channel_parameters = ChannelTransactionParameters {
+ holder_pubkeys: keys.holder_channel_pubkeys.clone(),
+ holder_selected_contest_delay: 66,
+ is_outbound_from_holder: true,
+ counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
+ pubkeys: counterparty_pubkeys,
+ selected_contest_delay: 67,
+ }),
+ funding_outpoint: Some(funding_outpoint),
+ channel_type_features: ChannelTypeFeatures::only_static_remote_key()
+ };
+ let shutdown_pubkey = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
+ let best_block = BestBlock::from_network(Network::Testnet);
+ let monitor = ChannelMonitor::new(Secp256k1::new(), keys,
+ Some(ShutdownScript::new_p2wpkh_from_pubkey(shutdown_pubkey).into_inner()), 0, &ScriptBuf::new(),
+ (OutPoint { txid: Txid::from_slice(&[43; 32]).unwrap(), index: 0 }, ScriptBuf::new()),
+ &channel_parameters, ScriptBuf::new(), 46, 0, HolderCommitmentTransaction::dummy(&mut Vec::new()),
+ best_block, dummy_key, channel_id);
+
+ let chan_id = monitor.inner.lock().unwrap().channel_id();
+ let context_logger = WithChannelMonitor::from(&logger, &monitor);
+ log_error!(context_logger, "This is an error");
+ log_warn!(context_logger, "This is an error");
+ log_debug!(context_logger, "This is an error");
+ log_trace!(context_logger, "This is an error");
+ log_gossip!(context_logger, "This is an error");
+ log_info!(context_logger, "This is an error");
+ logger.assert_log_context_contains("lightning::chain::channelmonitor::tests", Some(dummy_key), Some(chan_id), 6);
+ }
// Further testing is done in the ChannelManager integration tests.
}