use bitcoin::sighash::EcdsaSighashType;
use crate::ln::channel::INITIAL_COMMITMENT_NUMBER;
-use crate::ln::{PaymentHash, PaymentPreimage, ChannelId};
+use crate::ln::types::{PaymentHash, PaymentPreimage, ChannelId};
use crate::ln::msgs::DecodeError;
use crate::ln::channel_keys::{DelayedPaymentKey, DelayedPaymentBasepoint, HtlcBasepoint, HtlcKey, RevocationKey, RevocationBasepoint};
use crate::ln::chan_utils::{self,CommitmentTransaction, CounterpartyCommitmentSecrets, HTLCOutputInCommitment, HTLCClaim, ChannelTransactionParameters, HolderCommitmentTransaction, TxCreationKeys};
use crate::chain::{BestBlock, WatchedOutput};
use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator, LowerBoundedFeeEstimator};
use crate::chain::transaction::{OutPoint, TransactionData};
-use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, SpendableOutputDescriptor, StaticPaymentOutputDescriptor, DelayedPaymentOutputDescriptor, ecdsa::WriteableEcdsaChannelSigner, SignerProvider, EntropySource};
+use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, SpendableOutputDescriptor, StaticPaymentOutputDescriptor, DelayedPaymentOutputDescriptor, ecdsa::EcdsaChannelSigner, SignerProvider, EntropySource};
use crate::chain::onchaintx::{ClaimEvent, FeerateStrategy, OnchainTxHandler};
use crate::chain::package::{CounterpartyOfferedHTLCOutput, CounterpartyReceivedHTLCOutput, HolderFundingOutput, HolderHTLCOutput, PackageSolvingData, PackageTemplate, RevokedOutput, RevokedHTLCOutput};
use crate::chain::Filter;
use crate::util::logger::{Logger, Record};
use crate::util::ser::{Readable, ReadableArgs, RequiredWrapper, MaybeReadable, UpgradableRequired, Writer, Writeable, U48};
use crate::util::byte_utils;
-use crate::events::{Event, EventHandler};
+use crate::events::{ClosureReason, Event, EventHandler};
use crate::events::bump_transaction::{AnchorDescriptor, BumpTransactionEvent};
+#[allow(unused_imports)]
use crate::prelude::*;
+
use core::{cmp, mem};
use crate::io::{self, Error};
-use core::convert::TryInto;
use core::ops::Deref;
use crate::sync::{Mutex, LockTestExt};
/// A monitor event containing an HTLCUpdate.
HTLCEvent(HTLCUpdate),
+ /// Indicates we broadcasted the channel's latest commitment transaction and thus closed the
+ /// channel. Holds information about the channel and why it was closed.
+ HolderForceClosedWithInfo {
+ /// The reason the channel was closed.
+ reason: ClosureReason,
+ /// The funding outpoint of the channel.
+ outpoint: OutPoint,
+ /// The channel ID of the channel.
+ channel_id: ChannelId,
+ },
+
/// Indicates we broadcasted the channel's latest commitment transaction and thus closed the
/// channel.
HolderForceClosed(OutPoint),
(2, monitor_update_id, required),
(4, channel_id, required),
},
+ (5, HolderForceClosedWithInfo) => {
+ (0, reason, upgradable_required),
+ (2, outpoint, required),
+ (4, channel_id, required),
+ },
;
(2, HTLCEvent),
(4, HolderForceClosed),
}
fn has_reached_confirmation_threshold(&self, best_block: &BestBlock) -> bool {
- best_block.height() >= self.confirmation_threshold()
+ best_block.height >= self.confirmation_threshold()
}
}
/// the "reorg path" (ie disconnecting blocks until you find a common ancestor from both the
/// returned block hash and the the current chain and then reconnecting blocks to get to the
/// best chain) upon deserializing the object!
-pub struct ChannelMonitor<Signer: WriteableEcdsaChannelSigner> {
+pub struct ChannelMonitor<Signer: EcdsaChannelSigner> {
#[cfg(test)]
pub(crate) inner: Mutex<ChannelMonitorImpl<Signer>>,
#[cfg(not(test))]
pub(super) inner: Mutex<ChannelMonitorImpl<Signer>>,
}
-impl<Signer: WriteableEcdsaChannelSigner> Clone for ChannelMonitor<Signer> where Signer: Clone {
+impl<Signer: EcdsaChannelSigner> Clone for ChannelMonitor<Signer> where Signer: Clone {
fn clone(&self) -> Self {
let inner = self.inner.lock().unwrap().clone();
ChannelMonitor::from_impl(inner)
}
#[derive(Clone, PartialEq)]
-pub(crate) struct ChannelMonitorImpl<Signer: WriteableEcdsaChannelSigner> {
+pub(crate) struct ChannelMonitorImpl<Signer: EcdsaChannelSigner> {
latest_update_id: u64,
commitment_transaction_number_obscure_factor: u64,
/// Ordering of tuple data: (their_per_commitment_point, feerate_per_kw, to_broadcaster_sats,
/// to_countersignatory_sats)
initial_counterparty_commitment_info: Option<(PublicKey, u32, u64, u64)>,
+
+ /// The first block height at which we had no remaining claimable balances.
+ balances_empty_height: Option<u32>,
}
/// Transaction outputs to watch for on-chain spends.
pub type TransactionOutputs = (Txid, Vec<(u32, TxOut)>);
-impl<Signer: WriteableEcdsaChannelSigner> PartialEq for ChannelMonitor<Signer> where Signer: PartialEq {
+impl<Signer: EcdsaChannelSigner> PartialEq for ChannelMonitor<Signer> where Signer: PartialEq {
fn eq(&self, other: &Self) -> bool {
// We need some kind of total lockorder. Absent a better idea, we sort by position in
// memory and take locks in that order (assuming that we can't move within memory while a
}
}
-impl<Signer: WriteableEcdsaChannelSigner> Writeable for ChannelMonitor<Signer> {
+impl<Signer: EcdsaChannelSigner> Writeable for ChannelMonitor<Signer> {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), Error> {
self.inner.lock().unwrap().write(writer)
}
const SERIALIZATION_VERSION: u8 = 1;
const MIN_SERIALIZATION_VERSION: u8 = 1;
-impl<Signer: WriteableEcdsaChannelSigner> Writeable for ChannelMonitorImpl<Signer> {
+impl<Signer: EcdsaChannelSigner> Writeable for ChannelMonitorImpl<Signer> {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), Error> {
write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
writer.write_all(&(self.pending_monitor_events.iter().filter(|ev| match ev {
MonitorEvent::HTLCEvent(_) => true,
MonitorEvent::HolderForceClosed(_) => true,
+ MonitorEvent::HolderForceClosedWithInfo { .. } => true,
_ => false,
}).count() as u64).to_be_bytes())?;
for event in self.pending_monitor_events.iter() {
upd.write(writer)?;
},
MonitorEvent::HolderForceClosed(_) => 1u8.write(writer)?,
+ // `HolderForceClosedWithInfo` replaced `HolderForceClosed` in v0.0.122. To keep
+ // backwards compatibility, we write a `HolderForceClosed` event along with the
+ // `HolderForceClosedWithInfo` event. This is deduplicated in the reader.
+ MonitorEvent::HolderForceClosedWithInfo { .. } => 1u8.write(writer)?,
_ => {}, // Covered in the TLV writes below
}
}
event.write(writer)?;
}
- self.best_block.block_hash().write(writer)?;
- writer.write_all(&self.best_block.height().to_be_bytes())?;
+ self.best_block.block_hash.write(writer)?;
+ writer.write_all(&self.best_block.height.to_be_bytes())?;
writer.write_all(&(self.onchain_events_awaiting_threshold_conf.len() as u64).to_be_bytes())?;
for ref entry in self.onchain_events_awaiting_threshold_conf.iter() {
self.lockdown_from_offchain.write(writer)?;
self.holder_tx_signed.write(writer)?;
+ // If we have a `HolderForceClosedWithInfo` event, we need to write the `HolderForceClosed` for backwards compatibility.
+ let pending_monitor_events = match self.pending_monitor_events.iter().find(|ev| match ev {
+ MonitorEvent::HolderForceClosedWithInfo { .. } => true,
+ _ => false,
+ }) {
+ Some(MonitorEvent::HolderForceClosedWithInfo { outpoint, .. }) => {
+ let mut pending_monitor_events = self.pending_monitor_events.clone();
+ pending_monitor_events.push(MonitorEvent::HolderForceClosed(*outpoint));
+ pending_monitor_events
+ }
+ _ => self.pending_monitor_events.clone(),
+ };
+
write_tlv_fields!(writer, {
(1, self.funding_spend_confirmed, option),
(3, self.htlcs_resolved_on_chain, required_vec),
- (5, self.pending_monitor_events, required_vec),
+ (5, pending_monitor_events, required_vec),
(7, self.funding_spend_seen, required),
(9, self.counterparty_node_id, option),
(11, self.confirmed_commitment_tx_counterparty_output, option),
(15, self.counterparty_fulfilled_htlcs, required),
(17, self.initial_counterparty_commitment_info, option),
(19, self.channel_id, required),
+ (21, self.balances_empty_height, option),
});
Ok(())
logger: &'a L,
peer_id: Option<PublicKey>,
channel_id: Option<ChannelId>,
+ payment_hash: Option<PaymentHash>,
}
impl<'a, L: Deref> Logger for WithChannelMonitor<'a, L> where L::Target: Logger {
fn log(&self, mut record: Record) {
record.peer_id = self.peer_id;
record.channel_id = self.channel_id;
+ record.payment_hash = self.payment_hash;
self.logger.log(record)
}
}
impl<'a, L: Deref> WithChannelMonitor<'a, L> where L::Target: Logger {
- pub(crate) fn from<S: WriteableEcdsaChannelSigner>(logger: &'a L, monitor: &ChannelMonitor<S>) -> Self {
- Self::from_impl(logger, &*monitor.inner.lock().unwrap())
+ pub(crate) fn from<S: EcdsaChannelSigner>(logger: &'a L, monitor: &ChannelMonitor<S>, payment_hash: Option<PaymentHash>) -> Self {
+ Self::from_impl(logger, &*monitor.inner.lock().unwrap(), payment_hash)
}
- pub(crate) fn from_impl<S: WriteableEcdsaChannelSigner>(logger: &'a L, monitor_impl: &ChannelMonitorImpl<S>) -> Self {
+ pub(crate) fn from_impl<S: EcdsaChannelSigner>(logger: &'a L, monitor_impl: &ChannelMonitorImpl<S>, payment_hash: Option<PaymentHash>) -> Self {
let peer_id = monitor_impl.counterparty_node_id;
let channel_id = Some(monitor_impl.channel_id());
WithChannelMonitor {
- logger, peer_id, channel_id,
+ logger, peer_id, channel_id, payment_hash,
}
}
}
-impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
+impl<Signer: EcdsaChannelSigner> ChannelMonitor<Signer> {
/// For lockorder enforcement purposes, we need to have a single site which constructs the
/// `inner` mutex, otherwise cases where we lock two monitors at the same time (eg in our
/// PartialEq implementation) we may decide a lockorder violation has occurred.
best_block,
counterparty_node_id: Some(counterparty_node_id),
initial_counterparty_commitment_info: None,
+ balances_empty_height: None,
})
}
where L::Target: Logger
{
let mut inner = self.inner.lock().unwrap();
- let logger = WithChannelMonitor::from_impl(logger, &*inner);
+ let logger = WithChannelMonitor::from_impl(logger, &*inner, None);
inner.provide_initial_counterparty_commitment_tx(txid,
htlc_outputs, commitment_number, their_cur_per_commitment_point, feerate_per_kw,
to_broadcaster_value_sat, to_countersignatory_value_sat, &logger);
logger: &L,
) where L::Target: Logger {
let mut inner = self.inner.lock().unwrap();
- let logger = WithChannelMonitor::from_impl(logger, &*inner);
+ let logger = WithChannelMonitor::from_impl(logger, &*inner, None);
inner.provide_latest_counterparty_commitment_tx(
txid, htlc_outputs, commitment_number, their_per_commitment_point, &logger)
}
L::Target: Logger,
{
let mut inner = self.inner.lock().unwrap();
- let logger = WithChannelMonitor::from_impl(logger, &*inner);
+ let logger = WithChannelMonitor::from_impl(logger, &*inner, Some(*payment_hash));
inner.provide_payment_preimage(
payment_hash, payment_preimage, broadcaster, fee_estimator, &logger)
}
L::Target: Logger,
{
let mut inner = self.inner.lock().unwrap();
- let logger = WithChannelMonitor::from_impl(logger, &*inner);
+ let logger = WithChannelMonitor::from_impl(logger, &*inner, None);
inner.update_monitor(updates, broadcaster, fee_estimator, &logger)
}
F::Target: chain::Filter, L::Target: Logger,
{
let lock = self.inner.lock().unwrap();
- let logger = WithChannelMonitor::from_impl(logger, &*lock);
+ let logger = WithChannelMonitor::from_impl(logger, &*lock, None);
log_trace!(&logger, "Registering funding outpoint {}", &lock.get_funding_txo().0);
filter.register_tx(&lock.get_funding_txo().0.txid, &lock.get_funding_txo().1);
for (txid, outputs) in lock.get_outputs_to_watch().iter() {
{
let mut inner = self.inner.lock().unwrap();
let fee_estimator = LowerBoundedFeeEstimator::new(&**fee_estimator);
- let logger = WithChannelMonitor::from_impl(logger, &*inner);
+ let logger = WithChannelMonitor::from_impl(logger, &*inner, None);
inner.queue_latest_holder_commitment_txn_for_broadcast(broadcaster, &fee_estimator, &logger);
}
pub fn unsafe_get_latest_holder_commitment_txn<L: Deref>(&self, logger: &L) -> Vec<Transaction>
where L::Target: Logger {
let mut inner = self.inner.lock().unwrap();
- let logger = WithChannelMonitor::from_impl(logger, &*inner);
+ let logger = WithChannelMonitor::from_impl(logger, &*inner, None);
inner.unsafe_get_latest_holder_commitment_txn(&logger)
}
L::Target: Logger,
{
let mut inner = self.inner.lock().unwrap();
- let logger = WithChannelMonitor::from_impl(logger, &*inner);
+ let logger = WithChannelMonitor::from_impl(logger, &*inner, None);
inner.block_connected(
header, txdata, height, broadcaster, fee_estimator, &logger)
}
L::Target: Logger,
{
let mut inner = self.inner.lock().unwrap();
- let logger = WithChannelMonitor::from_impl(logger, &*inner);
+ let logger = WithChannelMonitor::from_impl(logger, &*inner, None);
inner.block_disconnected(
header, height, broadcaster, fee_estimator, &logger)
}
{
let bounded_fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator);
let mut inner = self.inner.lock().unwrap();
- let logger = WithChannelMonitor::from_impl(logger, &*inner);
+ let logger = WithChannelMonitor::from_impl(logger, &*inner, None);
inner.transactions_confirmed(
header, txdata, height, broadcaster, &bounded_fee_estimator, &logger)
}
{
let bounded_fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator);
let mut inner = self.inner.lock().unwrap();
- let logger = WithChannelMonitor::from_impl(logger, &*inner);
+ let logger = WithChannelMonitor::from_impl(logger, &*inner, None);
inner.transaction_unconfirmed(
txid, broadcaster, &bounded_fee_estimator, &logger
);
{
let bounded_fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator);
let mut inner = self.inner.lock().unwrap();
- let logger = WithChannelMonitor::from_impl(logger, &*inner);
+ let logger = WithChannelMonitor::from_impl(logger, &*inner, None);
inner.best_block_updated(
header, height, broadcaster, &bounded_fee_estimator, &logger
)
{
let fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator);
let mut inner = self.inner.lock().unwrap();
- let logger = WithChannelMonitor::from_impl(logger, &*inner);
+ let logger = WithChannelMonitor::from_impl(logger, &*inner, None);
let current_height = inner.best_block.height;
inner.onchain_tx_handler.rebroadcast_pending_claims(
current_height, FeerateStrategy::HighestOfPreviousOrNew, &broadcaster, &fee_estimator, &logger,
{
let fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator);
let mut inner = self.inner.lock().unwrap();
- let logger = WithChannelMonitor::from_impl(logger, &*inner);
+ let logger = WithChannelMonitor::from_impl(logger, &*inner, None);
let current_height = inner.best_block.height;
inner.onchain_tx_handler.rebroadcast_pending_claims(
current_height, FeerateStrategy::RetryPrevious, &broadcaster, &fee_estimator, &logger,
spendable_outputs
}
+ /// Checks if the monitor is fully resolved. Resolved monitor is one that has claimed all of
+ /// its outputs and balances (i.e. [`Self::get_claimable_balances`] returns an empty set).
+ ///
+ /// This function returns true only if [`Self::get_claimable_balances`] has been empty for at least
+ /// 4032 blocks as an additional protection against any bugs resulting in spuriously empty balance sets.
+ pub fn is_fully_resolved<L: Logger>(&self, logger: &L) -> bool {
+ let mut is_all_funds_claimed = self.get_claimable_balances().is_empty();
+ let current_height = self.current_best_block().height;
+ let mut inner = self.inner.lock().unwrap();
+
+ if is_all_funds_claimed {
+ if !inner.funding_spend_seen {
+ debug_assert!(false, "We should see funding spend by the time a monitor clears out");
+ is_all_funds_claimed = false;
+ }
+ }
+
+ const BLOCKS_THRESHOLD: u32 = 4032; // ~four weeks
+ match (inner.balances_empty_height, is_all_funds_claimed) {
+ (Some(balances_empty_height), true) => {
+ // Claimed all funds, check if reached the blocks threshold.
+ return current_height >= balances_empty_height + BLOCKS_THRESHOLD;
+ },
+ (Some(_), false) => {
+ // previously assumed we claimed all funds, but we have new funds to claim.
+ // Should not happen in practice.
+ debug_assert!(false, "Thought we were done claiming funds, but claimable_balances now has entries");
+ log_error!(logger,
+ "WARNING: LDK thought it was done claiming all the available funds in the ChannelMonitor for channel {}, but later decided it had more to claim. This is potentially an important bug in LDK, please report it at https://github.com/lightningdevkit/rust-lightning/issues/new",
+ inner.get_funding_txo().0);
+ inner.balances_empty_height = None;
+ false
+ },
+ (None, true) => {
+ // Claimed all funds but `balances_empty_height` is None. It is set to the
+ // current block height.
+ log_debug!(logger,
+ "ChannelMonitor funded at {} is now fully resolved. It will become archivable in {} blocks",
+ inner.get_funding_txo().0, BLOCKS_THRESHOLD);
+ inner.balances_empty_height = Some(current_height);
+ false
+ },
+ (None, false) => {
+ // Have funds to claim.
+ false
+ },
+ }
+ }
+
#[cfg(test)]
pub fn get_counterparty_payment_script(&self) -> ScriptBuf {
self.inner.lock().unwrap().counterparty_payment_script.clone()
}
}
-impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
+impl<Signer: EcdsaChannelSigner> ChannelMonitorImpl<Signer> {
/// Helper for get_claimable_balances which does the work for an individual HTLC, generating up
/// to one `Balance` for the HTLC.
fn get_htlc_balance(&self, htlc: &HTLCOutputInCommitment, holder_commitment: bool,
}
}
-impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
+impl<Signer: EcdsaChannelSigner> ChannelMonitor<Signer> {
/// Gets the balances in this channel which are either claimable by us if we were to
/// force-close the channel now or which are claimable on-chain (possibly awaiting
/// confirmation).
// before considering it "no longer pending" - this matches when we
// provide the ChannelManager an HTLC failure event.
Some(commitment_tx_output_idx) == htlc.transaction_output_index &&
- us.best_block.height() >= event.height + ANTI_REORG_DELAY - 1
+ us.best_block.height >= event.height + ANTI_REORG_DELAY - 1
} else if let OnchainEvent::HTLCSpendConfirmation { commitment_tx_output_idx, .. } = event.event {
// If the HTLC was fulfilled with a preimage, we consider the HTLC
// immediately non-pending, matching when we provide ChannelManager
vec![Vec::new(), Vec::new(), Vec::new(), Vec::new(), deliberately_bogus_accepted_htlc_witness_program().into()].into()
}
-impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
+impl<Signer: EcdsaChannelSigner> ChannelMonitorImpl<Signer> {
/// Inserts a revocation secret into this channel monitor. Prunes old preimages if neither
/// needed by holder commitment transactions HTCLs nor by counterparty ones. Unless we haven't already seen
/// counterparty commitment transaction's secret, they are de facto pruned (we can use revocation key).
macro_rules! claim_htlcs {
($commitment_number: expr, $txid: expr) => {
let (htlc_claim_reqs, _) = self.get_counterparty_output_claim_info($commitment_number, $txid, None);
- self.onchain_tx_handler.update_claims_view_from_requests(htlc_claim_reqs, self.best_block.height(), self.best_block.height(), broadcaster, fee_estimator, logger);
+ self.onchain_tx_handler.update_claims_view_from_requests(htlc_claim_reqs, self.best_block.height, self.best_block.height, broadcaster, fee_estimator, logger);
}
}
if let Some(txid) = self.current_counterparty_commitment_txid {
// Assume that the broadcasted commitment transaction confirmed in the current best
// block. Even if not, its a reasonable metric for the bump criteria on the HTLC
// transactions.
- let (claim_reqs, _) = self.get_broadcasted_holder_claims(&holder_commitment_tx, self.best_block.height());
- self.onchain_tx_handler.update_claims_view_from_requests(claim_reqs, self.best_block.height(), self.best_block.height(), broadcaster, fee_estimator, logger);
+ let (claim_reqs, _) = self.get_broadcasted_holder_claims(&holder_commitment_tx, self.best_block.height);
+ self.onchain_tx_handler.update_claims_view_from_requests(claim_reqs, self.best_block.height, self.best_block.height, broadcaster, fee_estimator, logger);
}
}
}
- fn generate_claimable_outpoints_and_watch_outputs(&mut self) -> (Vec<PackageTemplate>, Vec<TransactionOutputs>) {
+ fn generate_claimable_outpoints_and_watch_outputs(&mut self, reason: ClosureReason) -> (Vec<PackageTemplate>, Vec<TransactionOutputs>) {
let funding_outp = HolderFundingOutput::build(
self.funding_redeemscript.clone(),
self.channel_value_satoshis,
let commitment_package = PackageTemplate::build_package(
self.funding_info.0.txid.clone(), self.funding_info.0.index as u32,
PackageSolvingData::HolderFundingOutput(funding_outp),
- self.best_block.height(), self.best_block.height()
+ self.best_block.height, self.best_block.height
);
let mut claimable_outpoints = vec![commitment_package];
- self.pending_monitor_events.push(MonitorEvent::HolderForceClosed(self.funding_info.0));
+ let event = MonitorEvent::HolderForceClosedWithInfo {
+ reason,
+ outpoint: self.funding_info.0,
+ channel_id: self.channel_id,
+ };
+ self.pending_monitor_events.push(event);
+
// Although we aren't signing the transaction directly here, the transaction will be signed
// in the claim that is queued to OnchainTxHandler. We set holder_tx_signed here to reject
// new channel updates.
// assuming it gets confirmed in the next block. Sadly, we have code which considers
// "not yet confirmed" things as discardable, so we cannot do that here.
let (mut new_outpoints, _) = self.get_broadcasted_holder_claims(
- &self.current_holder_commitment_tx, self.best_block.height()
+ &self.current_holder_commitment_tx, self.best_block.height
);
let unsigned_commitment_tx = self.onchain_tx_handler.get_unsigned_holder_commitment_tx();
let new_outputs = self.get_broadcasted_holder_watch_outputs(
F::Target: FeeEstimator,
L::Target: Logger,
{
- let (claimable_outpoints, _) = self.generate_claimable_outpoints_and_watch_outputs();
+ let (claimable_outpoints, _) = self.generate_claimable_outpoints_and_watch_outputs(ClosureReason::HolderForceClosed);
self.onchain_tx_handler.update_claims_view_from_requests(
- claimable_outpoints, self.best_block.height(), self.best_block.height(), broadcaster,
+ claimable_outpoints, self.best_block.height, self.best_block.height, broadcaster,
fee_estimator, logger
);
}
{
let block_hash = header.block_hash();
- if height > self.best_block.height() {
+ if height > self.best_block.height {
self.best_block = BestBlock::new(block_hash, height);
log_trace!(logger, "Connecting new block {} at height {}", block_hash, height);
self.block_confirmed(height, block_hash, vec![], vec![], vec![], &broadcaster, &fee_estimator, logger)
- } else if block_hash != self.best_block.block_hash() {
+ } else if block_hash != self.best_block.block_hash {
self.best_block = BestBlock::new(block_hash, height);
log_trace!(logger, "Best block re-orged, replaced with new block {} at height {}", block_hash, height);
self.onchain_events_awaiting_threshold_conf.retain(|ref entry| entry.height <= height);
}
}
- if height > self.best_block.height() {
+ if height > self.best_block.height {
self.best_block = BestBlock::new(block_hash, height);
}
L::Target: Logger,
{
log_trace!(logger, "Processing {} matched transactions for block at height {}.", txn_matched.len(), conf_height);
- debug_assert!(self.best_block.height() >= conf_height);
+ debug_assert!(self.best_block.height >= conf_height);
let should_broadcast = self.should_broadcast_holder_commitment_txn(logger);
if should_broadcast {
- let (mut new_outpoints, mut new_outputs) = self.generate_claimable_outpoints_and_watch_outputs();
+ let (mut new_outpoints, mut new_outputs) = self.generate_claimable_outpoints_and_watch_outputs(ClosureReason::HTLCsTimedOut);
claimable_outpoints.append(&mut new_outpoints);
watch_outputs.append(&mut new_outputs);
}
}
}
- self.onchain_tx_handler.update_claims_view_from_requests(claimable_outpoints, conf_height, self.best_block.height(), broadcaster, fee_estimator, logger);
- self.onchain_tx_handler.update_claims_view_from_matched_txn(&txn_matched, conf_height, conf_hash, self.best_block.height(), broadcaster, fee_estimator, logger);
+ self.onchain_tx_handler.update_claims_view_from_requests(claimable_outpoints, conf_height, self.best_block.height, broadcaster, fee_estimator, logger);
+ self.onchain_tx_handler.update_claims_view_from_matched_txn(&txn_matched, conf_height, conf_hash, self.best_block.height, broadcaster, fee_estimator, logger);
// Determine new outputs to watch by comparing against previously known outputs to watch,
// updating the latter in the process.
// to the source, and if we don't fail the channel we will have to ensure that the next
// updates that peer sends us are update_fails, failing the channel if not. It's probably
// easier to just fail the channel as this case should be rare enough anyway.
- let height = self.best_block.height();
+ let height = self.best_block.height;
macro_rules! scan_commitment {
($htlcs: expr, $holder_tx: expr) => {
for ref htlc in $htlcs {
revocation_pubkey: broadcasted_holder_revokable_script.2,
channel_keys_id: self.channel_keys_id,
channel_value_satoshis: self.channel_value_satoshis,
+ channel_transaction_parameters: Some(self.onchain_tx_handler.channel_transaction_parameters.clone()),
}));
}
}
}
}
-impl<Signer: WriteableEcdsaChannelSigner, T: Deref, F: Deref, L: Deref> chain::Listen for (ChannelMonitor<Signer>, T, F, L)
+impl<Signer: EcdsaChannelSigner, T: Deref, F: Deref, L: Deref> chain::Listen for (ChannelMonitor<Signer>, T, F, L)
where
T::Target: BroadcasterInterface,
F::Target: FeeEstimator,
}
}
-impl<Signer: WriteableEcdsaChannelSigner, M, T: Deref, F: Deref, L: Deref> chain::Confirm for (M, T, F, L)
+impl<Signer: EcdsaChannelSigner, M, T: Deref, F: Deref, L: Deref> chain::Confirm for (M, T, F, L)
where
M: Deref<Target = ChannelMonitor<Signer>>,
T::Target: BroadcasterInterface,
let mut spendable_txids_confirmed = Some(Vec::new());
let mut counterparty_fulfilled_htlcs = Some(new_hash_map());
let mut initial_counterparty_commitment_info = None;
+ let mut balances_empty_height = None;
let mut channel_id = None;
read_tlv_fields!(reader, {
(1, funding_spend_confirmed, option),
(15, counterparty_fulfilled_htlcs, option),
(17, initial_counterparty_commitment_info, option),
(19, channel_id, option),
+ (21, balances_empty_height, option),
});
+ // `HolderForceClosedWithInfo` replaced `HolderForceClosed` in v0.0.122. If we have both
+ // events, we can remove the `HolderForceClosed` event and just keep the `HolderForceClosedWithInfo`.
+ if let Some(ref mut pending_monitor_events) = pending_monitor_events {
+ if pending_monitor_events.iter().any(|e| matches!(e, MonitorEvent::HolderForceClosed(_))) &&
+ pending_monitor_events.iter().any(|e| matches!(e, MonitorEvent::HolderForceClosedWithInfo { .. }))
+ {
+ pending_monitor_events.retain(|e| !matches!(e, MonitorEvent::HolderForceClosed(_)));
+ }
+ }
+
// Monitors for anchor outputs channels opened in v0.0.116 suffered from a bug in which the
// wrong `counterparty_payment_script` was being tracked. Fix it now on deserialization to
// give them a chance to recognize the spendable output.
chan_utils::get_to_countersignatory_with_anchors_redeemscript(&payment_point).to_v0_p2wsh();
}
- Ok((best_block.block_hash(), ChannelMonitor::from_impl(ChannelMonitorImpl {
+ Ok((best_block.block_hash, ChannelMonitor::from_impl(ChannelMonitorImpl {
latest_update_id,
commitment_transaction_number_obscure_factor,
best_block,
counterparty_node_id,
initial_counterparty_commitment_info,
+ balances_empty_height,
})))
}
}
use crate::chain::package::{weight_offered_htlc, weight_received_htlc, weight_revoked_offered_htlc, weight_revoked_received_htlc, WEIGHT_REVOKED_OUTPUT};
use crate::chain::transaction::OutPoint;
use crate::sign::InMemorySigner;
- use crate::ln::{PaymentPreimage, PaymentHash, ChannelId};
+ use crate::ln::types::{PaymentPreimage, PaymentHash, ChannelId};
use crate::ln::channel_keys::{DelayedPaymentBasepoint, DelayedPaymentKey, HtlcBasepoint, RevocationBasepoint, RevocationKey};
use crate::ln::chan_utils::{self,HTLCOutputInCommitment, ChannelPublicKeys, ChannelTransactionParameters, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
use crate::ln::channelmanager::{PaymentSendFailure, PaymentId, RecipientOnionFields};
use crate::sync::{Arc, Mutex};
use crate::io;
use crate::ln::features::ChannelTypeFeatures;
+
+ #[allow(unused_imports)]
use crate::prelude::*;
use std::str::FromStr;
best_block, dummy_key, channel_id);
let chan_id = monitor.inner.lock().unwrap().channel_id();
- let context_logger = WithChannelMonitor::from(&logger, &monitor);
+ let payment_hash = PaymentHash([1; 32]);
+ let context_logger = WithChannelMonitor::from(&logger, &monitor, Some(payment_hash));
log_error!(context_logger, "This is an error");
log_warn!(context_logger, "This is an error");
log_debug!(context_logger, "This is an error");