X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannel.rs;h=68e70faf171e94504872ced0dc6b558b4d55bf6b;hb=d189cf0e82e3219882506e033aba1019646bfe15;hp=33df5303814da439f5f83fd33a2acf53963b115f;hpb=6d5c952556aefa8f61c5a2c74ff72f426f5406ac;p=rust-lightning diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 33df5303..68e70faf 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -24,7 +24,7 @@ use bitcoin::secp256k1::{PublicKey,SecretKey}; use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature}; use bitcoin::secp256k1; -use crate::ln::{ChannelId, PaymentPreimage, PaymentHash}; +use crate::ln::types::{ChannelId, PaymentPreimage, PaymentHash}; use crate::ln::features::{ChannelTypeFeatures, InitFeatures}; use crate::ln::msgs; use crate::ln::msgs::DecodeError; @@ -37,7 +37,7 @@ use crate::chain::BestBlock; use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator}; use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID}; use crate::chain::transaction::{OutPoint, TransactionData}; -use crate::sign::ecdsa::{EcdsaChannelSigner, WriteableEcdsaChannelSigner}; +use crate::sign::ecdsa::EcdsaChannelSigner; use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient}; use crate::events::ClosureReason; use crate::routing::gossip::NodeId; @@ -50,7 +50,6 @@ use crate::util::scid_utils::scid_from_parts; use crate::io; use crate::prelude::*; use core::{cmp,mem,fmt}; -use core::convert::TryInto; use core::ops::Deref; #[cfg(any(test, fuzzing, debug_assertions))] use crate::sync::Mutex; @@ -104,10 +103,38 @@ enum InboundHTLCRemovalReason { Fulfill(PaymentPreimage), } +/// Represents the resolution status of an inbound HTLC. +#[derive(Clone)] +enum InboundHTLCResolution { + /// Resolved implies the action we must take with the inbound HTLC has already been determined, + /// i.e., we already know whether it must be failed back or forwarded. + // + // TODO: Once this variant is removed, we should also clean up + // [`MonitorRestoreUpdates::accepted_htlcs`] as the path will be unreachable. + Resolved { + pending_htlc_status: PendingHTLCStatus, + }, + /// Pending implies we will attempt to resolve the inbound HTLC once it has been fully committed + /// to by both sides of the channel, i.e., once a `revoke_and_ack` has been processed by both + /// nodes for the state update in which it was proposed. + Pending { + update_add_htlc: msgs::UpdateAddHTLC, + }, +} + +impl_writeable_tlv_based_enum!(InboundHTLCResolution, + (0, Resolved) => { + (0, pending_htlc_status, required), + }, + (2, Pending) => { + (0, update_add_htlc, required), + }; +); + enum InboundHTLCState { /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an /// update_add_htlc message for this HTLC. - RemoteAnnounced(PendingHTLCStatus), + RemoteAnnounced(InboundHTLCResolution), /// Included in a received commitment_signed message (implying we've /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous /// state (see the example below). We have not yet included this HTLC in a @@ -137,13 +164,13 @@ enum InboundHTLCState { /// Implies AwaitingRemoteRevoke. /// /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md - AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus), + AwaitingRemoteRevokeToAnnounce(InboundHTLCResolution), /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it). /// We have also included this HTLC in our latest commitment_signed and are now just waiting /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the /// channel (before it can then get forwarded and/or removed). /// Implies AwaitingRemoteRevoke. - AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus), + AwaitingAnnouncedRemoteRevoke(InboundHTLCResolution), Committed, /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we /// created it we would have put it in the holding cell instead). When they next revoke_and_ack @@ -896,25 +923,28 @@ pub(super) struct WithChannelContext<'a, L: Deref> where L::Target: Logger { pub logger: &'a L, pub peer_id: Option, pub channel_id: Option, + pub payment_hash: Option, } impl<'a, L: Deref> Logger for WithChannelContext<'a, L> where L::Target: Logger { fn log(&self, mut record: Record) { record.peer_id = self.peer_id; record.channel_id = self.channel_id; + record.payment_hash = self.payment_hash; self.logger.log(record) } } impl<'a, 'b, L: Deref> WithChannelContext<'a, L> where L::Target: Logger { - pub(super) fn from(logger: &'a L, context: &'b ChannelContext) -> Self + pub(super) fn from(logger: &'a L, context: &'b ChannelContext, payment_hash: Option) -> Self where S::Target: SignerProvider { WithChannelContext { logger, peer_id: Some(context.counterparty_node_id), channel_id: Some(context.channel_id), + payment_hash } } } @@ -971,14 +1001,16 @@ enum HTLCInitiator { RemoteOffered, } -/// An enum gathering stats on pending HTLCs, either inbound or outbound side. +/// Current counts of various HTLCs, useful for calculating current balances available exactly. struct HTLCStats { - pending_htlcs: u32, - pending_htlcs_value_msat: u64, + pending_inbound_htlcs: usize, + pending_outbound_htlcs: usize, + pending_inbound_htlcs_value_msat: u64, + pending_outbound_htlcs_value_msat: u64, on_counterparty_tx_dust_exposure_msat: u64, on_holder_tx_dust_exposure_msat: u64, - holding_cell_msat: u64, - on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included + outbound_holding_cell_msat: u64, + on_holder_tx_outbound_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included } /// An enum gathering stats on commitment transaction, either local or remote. @@ -1044,6 +1076,7 @@ pub(super) struct MonitorRestoreUpdates { pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>, pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, pub finalized_claimed_htlcs: Vec, + pub pending_update_adds: Vec, pub funding_broadcastable: Option, pub channel_ready: Option, pub announcement_sigs: Option, @@ -1086,6 +1119,75 @@ pub(crate) struct ShutdownResult { pub(crate) channel_funding_txo: Option, } +/// Tracks the transaction number, along with current and next commitment points. +/// This consolidates the logic to advance our commitment number and request new +/// commitment points from our signer. +#[derive(Debug, Copy, Clone)] +enum HolderCommitmentPoint { + // TODO: add a variant for before our first commitment point is retrieved + /// We've advanced our commitment number and are waiting on the next commitment point. + /// Until the `get_per_commitment_point` signer method becomes async, this variant + /// will not be used. + PendingNext { transaction_number: u64, current: PublicKey }, + /// Our current commitment point is ready, we've cached our next point, + /// and we are not pending a new one. + Available { transaction_number: u64, current: PublicKey, next: PublicKey }, +} + +impl HolderCommitmentPoint { + pub fn new(signer: &ChannelSignerType, secp_ctx: &Secp256k1) -> Self + where SP::Target: SignerProvider + { + HolderCommitmentPoint::Available { + transaction_number: INITIAL_COMMITMENT_NUMBER, + current: signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER, secp_ctx), + next: signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, secp_ctx), + } + } + + pub fn is_available(&self) -> bool { + if let HolderCommitmentPoint::Available { .. } = self { true } else { false } + } + + pub fn transaction_number(&self) -> u64 { + match self { + HolderCommitmentPoint::PendingNext { transaction_number, .. } => *transaction_number, + HolderCommitmentPoint::Available { transaction_number, .. } => *transaction_number, + } + } + + pub fn current_point(&self) -> PublicKey { + match self { + HolderCommitmentPoint::PendingNext { current, .. } => *current, + HolderCommitmentPoint::Available { current, .. } => *current, + } + } + + pub fn next_point(&self) -> Option { + match self { + HolderCommitmentPoint::PendingNext { .. } => None, + HolderCommitmentPoint::Available { next, .. } => Some(*next), + } + } + + pub fn advance(&mut self, signer: &ChannelSignerType, secp_ctx: &Secp256k1, logger: &L) + where SP::Target: SignerProvider, L::Target: Logger + { + if let HolderCommitmentPoint::Available { transaction_number, next, .. } = self { + *self = HolderCommitmentPoint::PendingNext { + transaction_number: *transaction_number - 1, + current: *next, + }; + } + + if let HolderCommitmentPoint::PendingNext { transaction_number, current } = self { + let next = signer.as_ref().get_per_commitment_point(*transaction_number - 1, secp_ctx); + log_trace!(logger, "Retrieved next per-commitment point {}", *transaction_number - 1); + *self = HolderCommitmentPoint::Available { transaction_number: *transaction_number, current: *current, next }; + } + } +} + /// If the majority of the channels funds are to the fundee and the initiator holds only just /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the /// initiator controls the feerate, if they then go to increase the channel fee, they may have no @@ -1161,9 +1263,9 @@ impl_writeable_tlv_based!(PendingChannelMonitorUpdate, { pub(super) enum ChannelPhase where SP::Target: SignerProvider { UnfundedOutboundV1(OutboundV1Channel), UnfundedInboundV1(InboundV1Channel), - #[cfg(dual_funding)] + #[cfg(any(dual_funding, splicing))] UnfundedOutboundV2(OutboundV2Channel), - #[cfg(dual_funding)] + #[cfg(any(dual_funding, splicing))] UnfundedInboundV2(InboundV2Channel), Funded(Channel), } @@ -1177,9 +1279,9 @@ impl<'a, SP: Deref> ChannelPhase where ChannelPhase::Funded(chan) => &chan.context, ChannelPhase::UnfundedOutboundV1(chan) => &chan.context, ChannelPhase::UnfundedInboundV1(chan) => &chan.context, - #[cfg(dual_funding)] + #[cfg(any(dual_funding, splicing))] ChannelPhase::UnfundedOutboundV2(chan) => &chan.context, - #[cfg(dual_funding)] + #[cfg(any(dual_funding, splicing))] ChannelPhase::UnfundedInboundV2(chan) => &chan.context, } } @@ -1189,9 +1291,9 @@ impl<'a, SP: Deref> ChannelPhase where ChannelPhase::Funded(ref mut chan) => &mut chan.context, ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context, ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context, - #[cfg(dual_funding)] + #[cfg(any(dual_funding, splicing))] ChannelPhase::UnfundedOutboundV2(ref mut chan) => &mut chan.context, - #[cfg(dual_funding)] + #[cfg(any(dual_funding, splicing))] ChannelPhase::UnfundedInboundV2(ref mut chan) => &mut chan.context, } } @@ -1264,7 +1366,7 @@ pub(super) struct ChannelContext where SP::Target: SignerProvider { // generation start at 0 and count up...this simplifies some parts of implementation at the // cost of others, but should really just be changed. - cur_holder_commitment_transaction_number: u64, + holder_commitment_point: HolderCommitmentPoint, cur_counterparty_commitment_transaction_number: u64, value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs pending_inbound_htlcs: Vec, @@ -1291,6 +1393,7 @@ pub(super) struct ChannelContext where SP::Target: SignerProvider { monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>, monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, monitor_pending_finalized_fulfills: Vec, + monitor_pending_update_adds: Vec, /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`]) /// but our signer (initially) refused to give us a signature, we should retry at some point in @@ -1369,7 +1472,7 @@ pub(super) struct ChannelContext where SP::Target: SignerProvider { /// Either the height at which this channel was created or the height at which it was last /// serialized if it was serialized by versions prior to 0.0.103. /// We use this to close if funding is never broadcasted. - channel_creation_height: u32, + pub(super) channel_creation_height: u32, counterparty_dust_limit_satoshis: u64, @@ -1540,7 +1643,7 @@ impl ChannelContext where SP::Target: SignerProvider { L::Target: Logger, SP::Target: SignerProvider, { - let logger = WithContext::from(logger, Some(counterparty_node_id), Some(open_channel_fields.temporary_channel_id)); + let logger = WithContext::from(logger, Some(counterparty_node_id), Some(open_channel_fields.temporary_channel_id), None); let announced_channel = if (open_channel_fields.channel_flags & 1) == 1 { true } else { false }; let channel_value_satoshis = our_funding_satoshis.saturating_add(open_channel_fields.funding_satoshis); @@ -1705,6 +1808,9 @@ impl ChannelContext where SP::Target: SignerProvider { let value_to_self_msat = our_funding_satoshis * 1000 + msg_push_msat; + let holder_signer = ChannelSignerType::Ecdsa(holder_signer); + let holder_commitment_point = HolderCommitmentPoint::new(&holder_signer, &secp_ctx); + // TODO(dual_funding): Checks for `funding_feerate_sat_per_1000_weight`? let channel_context = ChannelContext { @@ -1730,11 +1836,11 @@ impl ChannelContext where SP::Target: SignerProvider { latest_monitor_update_id: 0, - holder_signer: ChannelSignerType::Ecdsa(holder_signer), + holder_signer, shutdown_scriptpubkey, destination_script, - cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, + holder_commitment_point, cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, value_to_self_msat, @@ -1755,6 +1861,7 @@ impl ChannelContext where SP::Target: SignerProvider { monitor_pending_forwards: Vec::new(), monitor_pending_failures: Vec::new(), monitor_pending_finalized_fulfills: Vec::new(), + monitor_pending_update_adds: Vec::new(), signer_pending_commitment_update: false, signer_pending_funding: false, @@ -1928,6 +2035,9 @@ impl ChannelContext where SP::Target: SignerProvider { let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source)); + let holder_signer = ChannelSignerType::Ecdsa(holder_signer); + let holder_commitment_point = HolderCommitmentPoint::new(&holder_signer, &secp_ctx); + Ok(Self { user_id, @@ -1951,11 +2061,11 @@ impl ChannelContext where SP::Target: SignerProvider { latest_monitor_update_id: 0, - holder_signer: ChannelSignerType::Ecdsa(holder_signer), + holder_signer, shutdown_scriptpubkey, destination_script, - cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, + holder_commitment_point, cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, value_to_self_msat, @@ -1976,6 +2086,7 @@ impl ChannelContext where SP::Target: SignerProvider { monitor_pending_forwards: Vec::new(), monitor_pending_failures: Vec::new(), monitor_pending_finalized_fulfills: Vec::new(), + monitor_pending_update_adds: Vec::new(), signer_pending_commitment_update: false, signer_pending_funding: false, @@ -2306,15 +2417,16 @@ impl ChannelContext where SP::Target: SignerProvider { cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA) } - pub fn get_max_dust_htlc_exposure_msat(&self, - fee_estimator: &LowerBoundedFeeEstimator) -> u64 - where F::Target: FeeEstimator - { + fn get_dust_exposure_limiting_feerate(&self, + fee_estimator: &LowerBoundedFeeEstimator, + ) -> u32 where F::Target: FeeEstimator { + fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::OnChainSweep) + } + + pub fn get_max_dust_htlc_exposure_msat(&self, limiting_feerate_sat_per_kw: u32) -> u64 { match self.config.options.max_dust_htlc_exposure { MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => { - let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight( - ConfirmationTarget::OnChainSweep) as u64; - feerate_per_kw.saturating_mul(multiplier) + (limiting_feerate_sat_per_kw as u64).saturating_mul(multiplier) }, MaxDustHTLCExposure::FixedLimitMsat(limit) => limit, } @@ -2699,7 +2811,7 @@ impl ChannelContext where SP::Target: SignerProvider { feerate_per_kw = cmp::max(feerate_per_kw, feerate); } let feerate_plus_quarter = feerate_per_kw.checked_mul(1250).map(|v| v / 1000); - cmp::max(2530, feerate_plus_quarter.unwrap_or(u32::max_value())) + cmp::max(feerate_per_kw + 2530, feerate_plus_quarter.unwrap_or(u32::max_value())) } /// Get forwarding information for the counterparty. @@ -2707,86 +2819,111 @@ impl ChannelContext where SP::Target: SignerProvider { self.counterparty_forwarding_info.clone() } - /// Returns a HTLCStats about inbound pending htlcs - fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option) -> HTLCStats { + /// Returns a HTLCStats about pending htlcs + fn get_pending_htlc_stats(&self, outbound_feerate_update: Option, dust_exposure_limiting_feerate: u32) -> HTLCStats { let context = self; - let mut stats = HTLCStats { - pending_htlcs: context.pending_inbound_htlcs.len() as u32, - pending_htlcs_value_msat: 0, - on_counterparty_tx_dust_exposure_msat: 0, - on_holder_tx_dust_exposure_msat: 0, - holding_cell_msat: 0, - on_holder_tx_holding_cell_htlcs_count: 0, - }; + let uses_0_htlc_fee_anchors = self.get_channel_type().supports_anchors_zero_fee_htlc_tx(); - let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { + let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update); + let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if uses_0_htlc_fee_anchors { (0, 0) } else { - let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64; - (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000, - dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000) + (dust_buffer_feerate as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000, + dust_buffer_feerate as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000) }; - let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis; - let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis; - for ref htlc in context.pending_inbound_htlcs.iter() { - stats.pending_htlcs_value_msat += htlc.amount_msat; - if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat { - stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat; - } - if htlc.amount_msat / 1000 < holder_dust_limit_success_sat { - stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat; - } - } - stats - } - /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell. - fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option) -> HTLCStats { - let context = self; - let mut stats = HTLCStats { - pending_htlcs: context.pending_outbound_htlcs.len() as u32, - pending_htlcs_value_msat: 0, - on_counterparty_tx_dust_exposure_msat: 0, - on_holder_tx_dust_exposure_msat: 0, - holding_cell_msat: 0, - on_holder_tx_holding_cell_htlcs_count: 0, - }; + let mut on_holder_tx_dust_exposure_msat = 0; + let mut on_counterparty_tx_dust_exposure_msat = 0; - let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { - (0, 0) - } else { - let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64; - (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000, - dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000) - }; - let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis; - let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis; - for ref htlc in context.pending_outbound_htlcs.iter() { - stats.pending_htlcs_value_msat += htlc.amount_msat; - if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat { - stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat; - } - if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat { - stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat; + let mut on_counterparty_tx_offered_nondust_htlcs = 0; + let mut on_counterparty_tx_accepted_nondust_htlcs = 0; + + let mut pending_inbound_htlcs_value_msat = 0; + + { + let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis; + let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis; + for ref htlc in context.pending_inbound_htlcs.iter() { + pending_inbound_htlcs_value_msat += htlc.amount_msat; + if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat { + on_counterparty_tx_dust_exposure_msat += htlc.amount_msat; + } else { + on_counterparty_tx_offered_nondust_htlcs += 1; + } + if htlc.amount_msat / 1000 < holder_dust_limit_success_sat { + on_holder_tx_dust_exposure_msat += htlc.amount_msat; + } } } - for update in context.holding_cell_htlc_updates.iter() { - if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update { - stats.pending_htlcs += 1; - stats.pending_htlcs_value_msat += amount_msat; - stats.holding_cell_msat += amount_msat; - if *amount_msat / 1000 < counterparty_dust_limit_success_sat { - stats.on_counterparty_tx_dust_exposure_msat += amount_msat; - } - if *amount_msat / 1000 < holder_dust_limit_timeout_sat { - stats.on_holder_tx_dust_exposure_msat += amount_msat; + let mut pending_outbound_htlcs_value_msat = 0; + let mut outbound_holding_cell_msat = 0; + let mut on_holder_tx_outbound_holding_cell_htlcs_count = 0; + let mut pending_outbound_htlcs = self.pending_outbound_htlcs.len(); + { + let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis; + let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis; + for ref htlc in context.pending_outbound_htlcs.iter() { + pending_outbound_htlcs_value_msat += htlc.amount_msat; + if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat { + on_counterparty_tx_dust_exposure_msat += htlc.amount_msat; } else { - stats.on_holder_tx_holding_cell_htlcs_count += 1; + on_counterparty_tx_accepted_nondust_htlcs += 1; + } + if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat { + on_holder_tx_dust_exposure_msat += htlc.amount_msat; } } + + for update in context.holding_cell_htlc_updates.iter() { + if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update { + pending_outbound_htlcs += 1; + pending_outbound_htlcs_value_msat += amount_msat; + outbound_holding_cell_msat += amount_msat; + if *amount_msat / 1000 < counterparty_dust_limit_success_sat { + on_counterparty_tx_dust_exposure_msat += amount_msat; + } else { + on_counterparty_tx_accepted_nondust_htlcs += 1; + } + if *amount_msat / 1000 < holder_dust_limit_timeout_sat { + on_holder_tx_dust_exposure_msat += amount_msat; + } else { + on_holder_tx_outbound_holding_cell_htlcs_count += 1; + } + } + } + } + + // Include any mining "excess" fees in the dust calculation + let excess_feerate_opt = outbound_feerate_update + .or(self.pending_update_fee.map(|(fee, _)| fee)) + .unwrap_or(self.feerate_per_kw) + .checked_sub(dust_exposure_limiting_feerate); + if let Some(excess_feerate) = excess_feerate_opt { + let on_counterparty_tx_nondust_htlcs = + on_counterparty_tx_accepted_nondust_htlcs + on_counterparty_tx_offered_nondust_htlcs; + on_counterparty_tx_dust_exposure_msat += + commit_tx_fee_msat(excess_feerate, on_counterparty_tx_nondust_htlcs, &self.channel_type); + if !self.channel_type.supports_anchors_zero_fee_htlc_tx() { + on_counterparty_tx_dust_exposure_msat += + on_counterparty_tx_accepted_nondust_htlcs as u64 * htlc_success_tx_weight(&self.channel_type) + * excess_feerate as u64 / 1000; + on_counterparty_tx_dust_exposure_msat += + on_counterparty_tx_offered_nondust_htlcs as u64 * htlc_timeout_tx_weight(&self.channel_type) + * excess_feerate as u64 / 1000; + } + } + + HTLCStats { + pending_inbound_htlcs: self.pending_inbound_htlcs.len(), + pending_outbound_htlcs, + pending_inbound_htlcs_value_msat, + pending_outbound_htlcs_value_msat, + on_counterparty_tx_dust_exposure_msat, + on_holder_tx_dust_exposure_msat, + outbound_holding_cell_msat, + on_holder_tx_outbound_holding_cell_htlcs_count, } - stats } /// Returns information on all pending inbound HTLCs. @@ -2891,9 +3028,11 @@ impl ChannelContext where SP::Target: SignerProvider { where F::Target: FeeEstimator { let context = &self; - // Note that we have to handle overflow due to the above case. - let inbound_stats = context.get_inbound_pending_htlc_stats(None); - let outbound_stats = context.get_outbound_pending_htlc_stats(None); + // Note that we have to handle overflow due to the case mentioned in the docs in general + // here. + + let dust_exposure_limiting_feerate = self.get_dust_exposure_limiting_feerate(&fee_estimator); + let htlc_stats = context.get_pending_htlc_stats(None, dust_exposure_limiting_feerate); let mut balance_msat = context.value_to_self_msat; for ref htlc in context.pending_inbound_htlcs.iter() { @@ -2901,10 +3040,10 @@ impl ChannelContext where SP::Target: SignerProvider { balance_msat += htlc.amount_msat; } } - balance_msat -= outbound_stats.pending_htlcs_value_msat; + balance_msat -= htlc_stats.pending_outbound_htlcs_value_msat; let outbound_capacity_msat = context.value_to_self_msat - .saturating_sub(outbound_stats.pending_htlcs_value_msat) + .saturating_sub(htlc_stats.pending_outbound_htlcs_value_msat) .saturating_sub( context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000); @@ -2964,7 +3103,7 @@ impl ChannelContext where SP::Target: SignerProvider { let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000; let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat) - .saturating_sub(inbound_stats.pending_htlcs_value_msat); + .saturating_sub(htlc_stats.pending_inbound_htlcs_value_msat); if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat + anchor_outputs_value_msat { // If another HTLC's fee would reduce the remote's balance below the reserve limit @@ -2981,7 +3120,7 @@ impl ChannelContext where SP::Target: SignerProvider { // send above the dust limit (as the router can always overpay to meet the dust limit). let mut remaining_msat_below_dust_exposure_limit = None; let mut dust_exposure_dust_limit_msat = 0; - let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator); + let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(dust_exposure_limiting_feerate); let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis) @@ -2990,18 +3129,32 @@ impl ChannelContext where SP::Target: SignerProvider { (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000, context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000) }; - let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat; - if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) { + + let excess_feerate_opt = self.feerate_per_kw.checked_sub(dust_exposure_limiting_feerate); + if let Some(excess_feerate) = excess_feerate_opt { + let htlc_dust_exposure_msat = + per_outbound_htlc_counterparty_commit_tx_fee_msat(excess_feerate, &context.channel_type); + let nondust_htlc_counterparty_tx_dust_exposure = + htlc_stats.on_counterparty_tx_dust_exposure_msat.saturating_add(htlc_dust_exposure_msat); + if nondust_htlc_counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat { + // If adding an extra HTLC would put us over the dust limit in total fees, we cannot + // send any non-dust HTLCs. + available_capacity_msat = cmp::min(available_capacity_msat, htlc_success_dust_limit * 1000); + } + } + + if htlc_stats.on_counterparty_tx_dust_exposure_msat.saturating_add(htlc_success_dust_limit * 1000) > max_dust_htlc_exposure_msat.saturating_add(1) { + // Note that we don't use the `counterparty_tx_dust_exposure` (with + // `htlc_dust_exposure_msat`) here as it only applies to non-dust HTLCs. remaining_msat_below_dust_exposure_limit = - Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat)); + Some(max_dust_htlc_exposure_msat.saturating_sub(htlc_stats.on_counterparty_tx_dust_exposure_msat)); dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000); } - let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat; - if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) { + if htlc_stats.on_holder_tx_dust_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) { remaining_msat_below_dust_exposure_limit = Some(cmp::min( remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()), - max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat))); + max_dust_htlc_exposure_msat.saturating_sub(htlc_stats.on_holder_tx_dust_exposure_msat))); dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000); } @@ -3014,16 +3167,16 @@ impl ChannelContext where SP::Target: SignerProvider { } available_capacity_msat = cmp::min(available_capacity_msat, - context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat); + context.counterparty_max_htlc_value_in_flight_msat - htlc_stats.pending_outbound_htlcs_value_msat); - if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 { + if htlc_stats.pending_outbound_htlcs + 1 > context.counterparty_max_accepted_htlcs as usize { available_capacity_msat = 0; } AvailableBalances { inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000 - context.value_to_self_msat as i64 - - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64 + - htlc_stats.pending_inbound_htlcs_value_msat as i64 - context.holder_selected_channel_reserve_satoshis as i64 * 1000, 0) as u64, outbound_capacity_msat, @@ -3470,7 +3623,7 @@ pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channe /// /// This is used both for outbound and inbound channels and has lower bound /// of `dust_limit_satoshis`. -#[cfg(dual_funding)] +#[cfg(any(dual_funding, splicing))] fn get_v2_channel_reserve_satoshis(channel_value_satoshis: u64, dust_limit_satoshis: u64) -> u64 { // Fixed at 1% of channel value by spec. let (q, _) = channel_value_satoshis.overflowing_div(100); @@ -3492,8 +3645,19 @@ pub(crate) fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_ (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000 } +pub(crate) fn per_outbound_htlc_counterparty_commit_tx_fee_msat(feerate_per_kw: u32, channel_type_features: &ChannelTypeFeatures) -> u64 { + // Note that we need to divide before multiplying to round properly, + // since the lowest denomination of bitcoin on-chain is the satoshi. + let commitment_tx_fee = COMMITMENT_TX_WEIGHT_PER_HTLC * feerate_per_kw as u64 / 1000 * 1000; + if channel_type_features.supports_anchors_zero_fee_htlc_tx() { + commitment_tx_fee + htlc_success_tx_weight(channel_type_features) * feerate_per_kw as u64 / 1000 + } else { + commitment_tx_fee + } +} + /// Context for dual-funded channels. -#[cfg(dual_funding)] +#[cfg(any(dual_funding, splicing))] pub(super) struct DualFundingChannelContext { /// The amount in satoshis we will be contributing to the channel. pub our_funding_satoshis: u64, @@ -3510,7 +3674,7 @@ pub(super) struct DualFundingChannelContext { // Counterparty designates channel data owned by the another channel participant entity. pub(super) struct Channel where SP::Target: SignerProvider { pub context: ChannelContext, - #[cfg(dual_funding)] + #[cfg(any(dual_funding, splicing))] pub dual_funding_channel_context: Option, } @@ -3581,7 +3745,7 @@ impl FailHTLCMessageName for msgs::UpdateFailMalformedHTLC { impl Channel where SP::Target: SignerProvider, - ::EcdsaSigner: WriteableEcdsaChannelSigner + ::EcdsaSigner: EcdsaChannelSigner { fn check_remote_fee( channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator, @@ -4089,20 +4253,13 @@ impl Channel where Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height, logger)) } - pub fn update_add_htlc( - &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus, - create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator, logger: &L - ) -> Result<(), ChannelError> - where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus, - FE::Target: FeeEstimator, L::Target: Logger, - { + pub fn update_add_htlc( + &mut self, msg: &msgs::UpdateAddHTLC, pending_forward_status: PendingHTLCStatus, + fee_estimator: &LowerBoundedFeeEstimator, + ) -> Result<(), ChannelError> where F::Target: FeeEstimator { if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) { return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned())); } - // We can't accept HTLCs sent after we've sent a shutdown. - if self.context.channel_state.is_local_shutdown_sent() { - pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8); - } // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec. if self.context.channel_state.is_remote_shutdown_sent() { return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned())); @@ -4120,12 +4277,12 @@ impl Channel where return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat))); } - let inbound_stats = self.context.get_inbound_pending_htlc_stats(None); - let outbound_stats = self.context.get_outbound_pending_htlc_stats(None); - if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 { + let dust_exposure_limiting_feerate = self.context.get_dust_exposure_limiting_feerate(&fee_estimator); + let htlc_stats = self.context.get_pending_htlc_stats(None, dust_exposure_limiting_feerate); + if htlc_stats.pending_inbound_htlcs + 1 > self.context.holder_max_accepted_htlcs as usize { return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs))); } - if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat { + if htlc_stats.pending_inbound_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat { return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat))); } @@ -4150,36 +4307,8 @@ impl Channel where } } - let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator); - let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { - (0, 0) - } else { - let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64; - (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000, - dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000) - }; - let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis; - if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats { - let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat; - if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat { - log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", - on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat); - pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7); - } - } - - let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis; - if msg.amount_msat / 1000 < exposure_dust_limit_success_sats { - let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat; - if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat { - log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", - on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat); - pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7); - } - } - let pending_value_to_self_msat = - self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat; + self.context.value_to_self_msat + htlc_stats.pending_inbound_htlcs_value_msat - removed_outbound_total_msat; let pending_remote_value_msat = self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat; if pending_remote_value_msat < msg.amount_msat { @@ -4211,23 +4340,7 @@ impl Channel where } else { 0 }; - if !self.context.is_outbound() { - // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from - // the spec because the fee spike buffer requirement doesn't exist on the receiver's - // side, only on the sender's. Note that with anchor outputs we are no longer as - // sensitive to fee spikes, so we need to account for them. - let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered); - let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(())); - if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { - remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE; - } - if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat { - // Note that if the pending_forward_status is not updated here, then it's because we're already failing - // the HTLC, i.e. its status is already set to failing. - log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id()); - pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7); - } - } else { + if self.context.is_outbound() { // Check that they won't violate our local required channel reserve by adding this HTLC. let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered); let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None); @@ -4255,7 +4368,9 @@ impl Channel where amount_msat: msg.amount_msat, payment_hash: msg.payment_hash, cltv_expiry: msg.cltv_expiry, - state: InboundHTLCState::RemoteAnnounced(pending_forward_status), + state: InboundHTLCState::RemoteAnnounced(InboundHTLCResolution::Resolved { + pending_htlc_status: pending_forward_status + }), }); Ok(()) } @@ -4341,9 +4456,9 @@ impl Channel where let funding_script = self.context.get_funding_redeemscript(); - let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); + let keys = self.context.build_holder_transaction_keys(self.context.holder_commitment_point.transaction_number()); - let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger); + let commitment_stats = self.context.build_commitment_transaction(self.context.holder_commitment_point.transaction_number(), &keys, true, false, logger); let commitment_txid = { let trusted_tx = commitment_stats.tx.trust(); let bitcoin_tx = trusted_tx.built_transaction(); @@ -4461,13 +4576,13 @@ impl Channel where } for htlc in self.context.pending_inbound_htlcs.iter_mut() { - let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state { - Some(forward_info.clone()) + let htlc_resolution = if let &InboundHTLCState::RemoteAnnounced(ref resolution) = &htlc.state { + Some(resolution.clone()) } else { None }; - if let Some(forward_info) = new_forward { + if let Some(htlc_resolution) = htlc_resolution { log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.", &htlc.payment_hash, &self.context.channel_id); - htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info); + htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(htlc_resolution); need_commitment = true; } } @@ -4506,7 +4621,7 @@ impl Channel where channel_id: Some(self.context.channel_id()), }; - self.context.cur_holder_commitment_transaction_number -= 1; + self.context.holder_commitment_point.advance(&self.context.holder_signer, &self.context.secp_ctx, logger); self.context.expecting_peer_commitment_signed = false; // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call // build_commitment_no_status_check() next which will reset this to RAAFirst. @@ -4777,6 +4892,7 @@ impl Channel where log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id()); let mut to_forward_infos = Vec::new(); + let mut pending_update_adds = Vec::new(); let mut revoked_htlcs = Vec::new(); let mut finalized_claimed_htlcs = Vec::new(); let mut update_fail_htlcs = Vec::new(); @@ -4824,29 +4940,37 @@ impl Channel where let mut state = InboundHTLCState::Committed; mem::swap(&mut state, &mut htlc.state); - if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state { + if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(resolution) = state { log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash); - htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info); + htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution); require_commitment = true; - } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state { - match forward_info { - PendingHTLCStatus::Fail(fail_msg) => { - log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash); - require_commitment = true; - match fail_msg { - HTLCFailureMsg::Relay(msg) => { - htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone())); - update_fail_htlcs.push(msg) - }, - HTLCFailureMsg::Malformed(msg) => { - htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code))); - update_fail_malformed_htlcs.push(msg) + } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution) = state { + match resolution { + InboundHTLCResolution::Resolved { pending_htlc_status } => + match pending_htlc_status { + PendingHTLCStatus::Fail(fail_msg) => { + log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash); + require_commitment = true; + match fail_msg { + HTLCFailureMsg::Relay(msg) => { + htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone())); + update_fail_htlcs.push(msg) + }, + HTLCFailureMsg::Malformed(msg) => { + htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code))); + update_fail_malformed_htlcs.push(msg) + }, + } }, + PendingHTLCStatus::Forward(forward_info) => { + log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed, attempting to forward", &htlc.payment_hash); + to_forward_infos.push((forward_info, htlc.htlc_id)); + htlc.state = InboundHTLCState::Committed; + } } - }, - PendingHTLCStatus::Forward(forward_info) => { + InboundHTLCResolution::Pending { update_add_htlc } => { log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash); - to_forward_infos.push((forward_info, htlc.htlc_id)); + pending_update_adds.push(update_add_htlc); htlc.state = InboundHTLCState::Committed; } } @@ -4907,6 +5031,8 @@ impl Channel where } } + self.context.monitor_pending_update_adds.append(&mut pending_update_adds); + if self.context.channel_state.is_monitor_update_in_progress() { // We can't actually generate a new commitment transaction (incl by freeing holding // cells) while we can't update the monitor, so we just return what we have. @@ -5004,12 +5130,12 @@ impl Channel where } // Before proposing a feerate update, check that we can actually afford the new fee. - let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw)); - let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw)); - let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); - let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger); - let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000; - let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat; + let dust_exposure_limiting_feerate = self.context.get_dust_exposure_limiting_feerate(&fee_estimator); + let htlc_stats = self.context.get_pending_htlc_stats(Some(feerate_per_kw), dust_exposure_limiting_feerate); + let keys = self.context.build_holder_transaction_keys(self.context.holder_commitment_point.transaction_number()); + let commitment_stats = self.context.build_commitment_transaction(self.context.holder_commitment_point.transaction_number(), &keys, true, true, logger); + let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + htlc_stats.on_holder_tx_outbound_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000; + let holder_balance_msat = commitment_stats.local_balance_msat - htlc_stats.outbound_holding_cell_msat; if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 { //TODO: auto-close after a number of failures? log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw); @@ -5017,14 +5143,12 @@ impl Channel where } // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`. - let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat; - let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat; - let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator); - if holder_tx_dust_exposure > max_dust_htlc_exposure_msat { + let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(dust_exposure_limiting_feerate); + if htlc_stats.on_holder_tx_dust_exposure_msat > max_dust_htlc_exposure_msat { log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw); return None; } - if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat { + if htlc_stats.on_counterparty_tx_dust_exposure_msat > max_dust_htlc_exposure_msat { log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw); return None; } @@ -5191,12 +5315,7 @@ impl Channel where assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0), "Funding transaction broadcast by the local client before it should have - LDK didn't do it!"); self.context.monitor_pending_channel_ready = false; - let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); - Some(msgs::ChannelReady { - channel_id: self.context.channel_id(), - next_per_commitment_point, - short_channel_id_alias: Some(self.context.outbound_scid_alias), - }) + Some(self.get_channel_ready()) } else { None }; let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger); @@ -5207,13 +5326,16 @@ impl Channel where mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures); let mut finalized_claimed_htlcs = Vec::new(); mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills); + let mut pending_update_adds = Vec::new(); + mem::swap(&mut pending_update_adds, &mut self.context.monitor_pending_update_adds); if self.context.channel_state.is_peer_disconnected() { self.context.monitor_pending_revoke_and_ack = false; self.context.monitor_pending_commitment_signed = false; return MonitorRestoreUpdates { raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst, - accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs + accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, pending_update_adds, + funding_broadcastable, channel_ready, announcement_sigs }; } @@ -5235,7 +5357,8 @@ impl Channel where if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" }, match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"}); MonitorRestoreUpdates { - raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs + raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, + pending_update_adds, funding_broadcastable, channel_ready, announcement_sigs } } @@ -5253,20 +5376,16 @@ impl Channel where self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced)); self.context.update_time_counter += 1; // Check that we won't be pushed over our dust exposure limit by the feerate increase. - if !self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { - let inbound_stats = self.context.get_inbound_pending_htlc_stats(None); - let outbound_stats = self.context.get_outbound_pending_htlc_stats(None); - let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat; - let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat; - let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator); - if holder_tx_dust_exposure > max_dust_htlc_exposure_msat { - return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)", - msg.feerate_per_kw, holder_tx_dust_exposure))); - } - if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat { - return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)", - msg.feerate_per_kw, counterparty_tx_dust_exposure))); - } + let dust_exposure_limiting_feerate = self.context.get_dust_exposure_limiting_feerate(&fee_estimator); + let htlc_stats = self.context.get_pending_htlc_stats(None, dust_exposure_limiting_feerate); + let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(dust_exposure_limiting_feerate); + if htlc_stats.on_holder_tx_dust_exposure_msat > max_dust_htlc_exposure_msat { + return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)", + msg.feerate_per_kw, htlc_stats.on_holder_tx_dust_exposure_msat))); + } + if htlc_stats.on_counterparty_tx_dust_exposure_msat > max_dust_htlc_exposure_msat { + return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)", + msg.feerate_per_kw, htlc_stats.on_counterparty_tx_dust_exposure_msat))); } Ok(()) } @@ -5282,7 +5401,7 @@ impl Channel where self.context.get_funding_signed_msg(logger).1 } else { None }; let channel_ready = if funding_signed.is_some() { - self.check_get_channel_ready(0) + self.check_get_channel_ready(0, logger) } else { None }; log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed and {} channel_ready", @@ -5298,8 +5417,8 @@ impl Channel where } fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK { - let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); - let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2); + let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.holder_commitment_point.transaction_number(), &self.context.secp_ctx); + let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.holder_commitment_point.transaction_number() + 2); msgs::RevokeAndACK { channel_id: self.context.channel_id, per_commitment_secret, @@ -5432,7 +5551,7 @@ impl Channel where return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned())); } - let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1; + let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.holder_commitment_point.transaction_number() - 1; if msg.next_remote_commitment_number > 0 { let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx); let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret) @@ -5494,13 +5613,8 @@ impl Channel where } // We have OurChannelReady set! - let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); return Ok(ReestablishResponses { - channel_ready: Some(msgs::ChannelReady { - channel_id: self.context.channel_id(), - next_per_commitment_point, - short_channel_id_alias: Some(self.context.outbound_scid_alias), - }), + channel_ready: Some(self.get_channel_ready()), raa: None, commitment_update: None, order: RAACommitmentOrder::CommitmentFirst, shutdown_msg, announcement_sigs, @@ -5537,14 +5651,9 @@ impl Channel where } let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 }; - let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 { + let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.holder_commitment_point.transaction_number() == 1 { // We should never have to worry about MonitorUpdateInProgress resending ChannelReady - let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); - Some(msgs::ChannelReady { - channel_id: self.context.channel_id(), - next_per_commitment_point, - short_channel_id_alias: Some(self.context.outbound_scid_alias), - }) + Some(self.get_channel_ready()) } else { None }; if msg.next_local_commitment_number == next_counterparty_commitment_number { @@ -6099,8 +6208,98 @@ impl Channel where }) } + pub fn can_accept_incoming_htlc( + &self, msg: &msgs::UpdateAddHTLC, fee_estimator: &LowerBoundedFeeEstimator, logger: L + ) -> Result<(), (&'static str, u16)> + where + F::Target: FeeEstimator, + L::Target: Logger + { + if self.context.channel_state.is_local_shutdown_sent() { + return Err(("Shutdown was already sent", 0x4000|8)) + } + + let dust_exposure_limiting_feerate = self.context.get_dust_exposure_limiting_feerate(&fee_estimator); + let htlc_stats = self.context.get_pending_htlc_stats(None, dust_exposure_limiting_feerate); + let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(dust_exposure_limiting_feerate); + let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { + (0, 0) + } else { + let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64; + (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000, + dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000) + }; + let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis; + if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats { + let on_counterparty_tx_dust_htlc_exposure_msat = htlc_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat; + if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat { + log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", + on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat); + return Err(("Exceeded our dust exposure limit on counterparty commitment tx", 0x1000|7)) + } + } else { + let htlc_dust_exposure_msat = + per_outbound_htlc_counterparty_commit_tx_fee_msat(self.context.feerate_per_kw, &self.context.channel_type); + let counterparty_tx_dust_exposure = + htlc_stats.on_counterparty_tx_dust_exposure_msat.saturating_add(htlc_dust_exposure_msat); + if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat { + log_info!(logger, "Cannot accept value that would put our exposure to tx fee dust at {} over the limit {} on counterparty commitment tx", + counterparty_tx_dust_exposure, max_dust_htlc_exposure_msat); + return Err(("Exceeded our tx fee dust exposure limit on counterparty commitment tx", 0x1000|7)) + } + } + + let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis; + if msg.amount_msat / 1000 < exposure_dust_limit_success_sats { + let on_holder_tx_dust_htlc_exposure_msat = htlc_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat; + if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat { + log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", + on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat); + return Err(("Exceeded our dust exposure limit on holder commitment tx", 0x1000|7)) + } + } + + let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { + ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000 + } else { + 0 + }; + + let mut removed_outbound_total_msat = 0; + for ref htlc in self.context.pending_outbound_htlcs.iter() { + if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state { + removed_outbound_total_msat += htlc.amount_msat; + } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state { + removed_outbound_total_msat += htlc.amount_msat; + } + } + + let pending_value_to_self_msat = + self.context.value_to_self_msat + htlc_stats.pending_inbound_htlcs_value_msat - removed_outbound_total_msat; + let pending_remote_value_msat = + self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat; + + if !self.context.is_outbound() { + // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from + // the spec because the fee spike buffer requirement doesn't exist on the receiver's + // side, only on the sender's. Note that with anchor outputs we are no longer as + // sensitive to fee spikes, so we need to account for them. + let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered); + let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(())); + if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { + remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE; + } + if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat { + log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id()); + return Err(("Fee spike buffer violation", 0x1000|7)); + } + } + + Ok(()) + } + pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 { - self.context.cur_holder_commitment_transaction_number + 1 + self.context.holder_commitment_point.transaction_number() + 1 } pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 { @@ -6176,6 +6375,26 @@ impl Channel where } } + /// On startup, its possible we detect some monitor updates have actually completed (and the + /// ChannelManager was simply stale). In that case, we should simply drop them, which we do + /// here after logging them. + pub fn on_startup_drop_completed_blocked_mon_updates_through(&mut self, logger: &L, loaded_mon_update_id: u64) { + let channel_id = self.context.channel_id(); + self.context.blocked_monitor_updates.retain(|update| { + if update.update.update_id <= loaded_mon_update_id { + log_info!( + logger, + "Dropping completed ChannelMonitorUpdate id {} on channel {} due to a stale ChannelManager", + update.update.update_id, + channel_id, + ); + false + } else { + true + } + }); + } + pub fn blocked_monitor_updates_pending(&self) -> usize { self.context.blocked_monitor_updates.len() } @@ -6195,7 +6414,7 @@ impl Channel where debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0); return true; } - if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 && + if self.context.holder_commitment_point.transaction_number() == INITIAL_COMMITMENT_NUMBER - 1 && self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 { // If we're a 0-conf channel, we'll move beyond AwaitingChannelReady immediately even while // waiting for the initial monitor persistence. Thus, we check if our commitment @@ -6254,7 +6473,9 @@ impl Channel where self.context.channel_update_status = status; } - fn check_get_channel_ready(&mut self, height: u32) -> Option { + fn check_get_channel_ready(&mut self, height: u32, logger: &L) -> Option + where L::Target: Logger + { // Called: // * always when a new block/transactions are confirmed with the new height // * when funding is signed with a height of 0 @@ -6274,6 +6495,8 @@ impl Channel where // If we're still pending the signature on a funding transaction, then we're not ready to send a // channel_ready yet. if self.context.signer_pending_funding { + // TODO: set signer_pending_channel_ready + log_debug!(logger, "Can't produce channel_ready: the signer is pending funding."); return None; } @@ -6306,22 +6529,35 @@ impl Channel where false }; - if need_commitment_update { - if !self.context.channel_state.is_monitor_update_in_progress() { - if !self.context.channel_state.is_peer_disconnected() { - let next_per_commitment_point = - self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx); - return Some(msgs::ChannelReady { - channel_id: self.context.channel_id, - next_per_commitment_point, - short_channel_id_alias: Some(self.context.outbound_scid_alias), - }); - } - } else { - self.context.monitor_pending_channel_ready = true; - } + if !need_commitment_update { + log_debug!(logger, "Not producing channel_ready: we do not need a commitment update"); + return None; + } + + if self.context.channel_state.is_monitor_update_in_progress() { + log_debug!(logger, "Not producing channel_ready: a monitor update is in progress. Setting monitor_pending_channel_ready."); + self.context.monitor_pending_channel_ready = true; + return None; + } + + if self.context.channel_state.is_peer_disconnected() { + log_debug!(logger, "Not producing channel_ready: the peer is disconnected."); + return None; + } + + // TODO: when get_per_commiment_point becomes async, check if the point is + // available, if not, set signer_pending_channel_ready and return None + + Some(self.get_channel_ready()) + } + + fn get_channel_ready(&self) -> msgs::ChannelReady { + debug_assert!(self.context.holder_commitment_point.is_available()); + msgs::ChannelReady { + channel_id: self.context.channel_id(), + next_per_commitment_point: self.context.holder_commitment_point.current_point(), + short_channel_id_alias: Some(self.context.outbound_scid_alias), } - None } /// When a transaction is confirmed, we check whether it is or spends the funding transaction @@ -6388,7 +6624,7 @@ impl Channel where // If we allow 1-conf funding, we may need to check for channel_ready here and // send it immediately instead of waiting for a best_block_updated call (which // may have already happened for this block). - if let Some(channel_ready) = self.check_get_channel_ready(height) { + if let Some(channel_ready) = self.check_get_channel_ready(height, logger) { log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id); let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger); msgs = (Some(channel_ready), announcement_sigs); @@ -6454,7 +6690,7 @@ impl Channel where self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time); - if let Some(channel_ready) = self.check_get_channel_ready(height) { + if let Some(channel_ready) = self.check_get_channel_ready(height, logger) { let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer { self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger) } else { None }; @@ -6757,7 +6993,7 @@ impl Channel where // next_local_commitment_number is the next commitment_signed number we expect to // receive (indicating if they need to resend one that we missed). - next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number, + next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.holder_commitment_point.transaction_number(), // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to // receive, however we track it by the next commitment number for a remote transaction // (which is one further, as they always revoke previous commitment transaction, not @@ -7309,7 +7545,7 @@ impl OutboundV1Channel where SP::Target: SignerProvider { } if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) || self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER || - self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { + self.context.holder_commitment_point.transaction_number() != INITIAL_COMMITMENT_NUMBER { panic!("Should not have advanced channel commitment tx numbers prior to funding_created"); } @@ -7361,6 +7597,12 @@ impl OutboundV1Channel where SP::Target: SignerProvider { Ok(self.get_open_channel(chain_hash)) } + /// Returns true if we can resume the channel by sending the [`msgs::OpenChannel`] again. + pub fn is_resumable(&self) -> bool { + !self.context.have_received_message() && + self.context.holder_commitment_point.transaction_number() == INITIAL_COMMITMENT_NUMBER + } + pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel { if !self.context.is_outbound() { panic!("Tried to open a channel for an inbound channel?"); @@ -7369,11 +7611,11 @@ impl OutboundV1Channel where SP::Target: SignerProvider { panic!("Cannot generate an open_channel after we've moved forward"); } - if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { + if self.context.holder_commitment_point.transaction_number() != INITIAL_COMMITMENT_NUMBER { panic!("Tried to send an open_channel for a channel that has already advanced"); } - let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); + let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.holder_commitment_point.transaction_number(), &self.context.secp_ctx); let keys = self.context.get_holder_pubkeys(); msgs::OpenChannel { @@ -7554,7 +7796,7 @@ impl OutboundV1Channel where SP::Target: SignerProvider { } if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) || self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER || - self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { + self.context.holder_commitment_point.transaction_number() != INITIAL_COMMITMENT_NUMBER { panic!("Should not have advanced channel commitment tx numbers prior to funding_created"); } @@ -7568,8 +7810,8 @@ impl OutboundV1Channel where SP::Target: SignerProvider { log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}", &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction)); - let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); - let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx; + let holder_signer = self.context.build_holder_transaction_keys(self.context.holder_commitment_point.transaction_number()); + let initial_commitment_tx = self.context.build_commitment_transaction(self.context.holder_commitment_point.transaction_number(), &holder_signer, true, false, logger).tx; { let trusted_tx = initial_commitment_tx.trust(); let initial_commitment_bitcoin_tx = trusted_tx.built_transaction(); @@ -7622,18 +7864,18 @@ impl OutboundV1Channel where SP::Target: SignerProvider { } else { self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()); } - self.context.cur_holder_commitment_transaction_number -= 1; + self.context.holder_commitment_point.advance(&self.context.holder_signer, &self.context.secp_ctx, logger); self.context.cur_counterparty_commitment_transaction_number -= 1; log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id()); let mut channel = Channel { context: self.context, - #[cfg(dual_funding)] + #[cfg(any(dual_funding, splicing))] dual_funding_channel_context: None, }; - let need_channel_ready = channel.check_get_channel_ready(0).is_some(); + let need_channel_ready = channel.check_get_channel_ready(0, logger).is_some(); channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new()); Ok((channel, channel_monitor)) } @@ -7703,7 +7945,7 @@ impl InboundV1Channel where SP::Target: SignerProvider { F::Target: FeeEstimator, L::Target: Logger, { - let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.common_fields.temporary_channel_id)); + let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.common_fields.temporary_channel_id), None); // First check the channel type is known, failing before we do anything else if we don't // support this channel type. @@ -7758,7 +8000,7 @@ impl InboundV1Channel where SP::Target: SignerProvider { ) { panic!("Tried to send accept_channel after channel had moved forward"); } - if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { + if self.context.holder_commitment_point.transaction_number() != INITIAL_COMMITMENT_NUMBER { panic!("Tried to send an accept_channel for a channel that has already advanced"); } @@ -7771,7 +8013,7 @@ impl InboundV1Channel where SP::Target: SignerProvider { /// /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel fn generate_accept_channel_message(&self) -> msgs::AcceptChannel { - let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); + let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.holder_commitment_point.transaction_number(), &self.context.secp_ctx); let keys = self.context.get_holder_pubkeys(); msgs::AcceptChannel { @@ -7813,8 +8055,8 @@ impl InboundV1Channel where SP::Target: SignerProvider { fn check_funding_created_signature(&mut self, sig: &Signature, logger: &L) -> Result where L::Target: Logger { let funding_script = self.context.get_funding_redeemscript(); - let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); - let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx; + let keys = self.context.build_holder_transaction_keys(self.context.holder_commitment_point.transaction_number()); + let initial_commitment_tx = self.context.build_commitment_transaction(self.context.holder_commitment_point.transaction_number(), &keys, true, false, logger).tx; let trusted_tx = initial_commitment_tx.trust(); let initial_commitment_bitcoin_tx = trusted_tx.built_transaction(); let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis); @@ -7848,7 +8090,7 @@ impl InboundV1Channel where SP::Target: SignerProvider { } if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) || self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER || - self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { + self.context.holder_commitment_point.transaction_number() != INITIAL_COMMITMENT_NUMBER { panic!("Should not have advanced channel commitment tx numbers prior to funding_created"); } @@ -7888,7 +8130,7 @@ impl InboundV1Channel where SP::Target: SignerProvider { self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()); self.context.channel_id = ChannelId::v1_from_funding_outpoint(funding_txo); self.context.cur_counterparty_commitment_transaction_number -= 1; - self.context.cur_holder_commitment_transaction_number -= 1; + self.context.holder_commitment_point.advance(&self.context.holder_signer, &self.context.secp_ctx, logger); let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger); @@ -7919,10 +8161,10 @@ impl InboundV1Channel where SP::Target: SignerProvider { // `ChannelMonitor`. let mut channel = Channel { context: self.context, - #[cfg(dual_funding)] + #[cfg(any(dual_funding, splicing))] dual_funding_channel_context: None, }; - let need_channel_ready = channel.check_get_channel_ready(0).is_some(); + let need_channel_ready = channel.check_get_channel_ready(0, logger).is_some(); channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new()); Ok((channel, funding_signed, channel_monitor)) @@ -7930,15 +8172,15 @@ impl InboundV1Channel where SP::Target: SignerProvider { } // A not-yet-funded outbound (from holder) channel using V2 channel establishment. -#[cfg(dual_funding)] +#[cfg(any(dual_funding, splicing))] pub(super) struct OutboundV2Channel where SP::Target: SignerProvider { pub context: ChannelContext, pub unfunded_context: UnfundedChannelContext, - #[cfg(dual_funding)] + #[cfg(any(dual_funding, splicing))] pub dual_funding_context: DualFundingChannelContext, } -#[cfg(dual_funding)] +#[cfg(any(dual_funding, splicing))] impl OutboundV2Channel where SP::Target: SignerProvider { pub fn new( fee_estimator: &LowerBoundedFeeEstimator, entropy_source: &ES, signer_provider: &SP, @@ -8009,15 +8251,15 @@ impl OutboundV2Channel where SP::Target: SignerProvider { debug_assert!(false, "Cannot generate an open_channel2 after we've moved forward"); } - if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { + if self.context.holder_commitment_point.transaction_number() != INITIAL_COMMITMENT_NUMBER { debug_assert!(false, "Tried to send an open_channel2 for a channel that has already advanced"); } let first_per_commitment_point = self.context.holder_signer.as_ref() - .get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, + .get_per_commitment_point(self.context.holder_commitment_point.transaction_number(), &self.context.secp_ctx); let second_per_commitment_point = self.context.holder_signer.as_ref() - .get_per_commitment_point(self.context.cur_holder_commitment_transaction_number - 1, + .get_per_commitment_point(self.context.holder_commitment_point.transaction_number() - 1, &self.context.secp_ctx); let keys = self.context.get_holder_pubkeys(); @@ -8054,14 +8296,14 @@ impl OutboundV2Channel where SP::Target: SignerProvider { } // A not-yet-funded inbound (from counterparty) channel using V2 channel establishment. -#[cfg(dual_funding)] +#[cfg(any(dual_funding, splicing))] pub(super) struct InboundV2Channel where SP::Target: SignerProvider { pub context: ChannelContext, pub unfunded_context: UnfundedChannelContext, pub dual_funding_context: DualFundingChannelContext, } -#[cfg(dual_funding)] +#[cfg(any(dual_funding, splicing))] impl InboundV2Channel where SP::Target: SignerProvider { /// Creates a new dual-funded channel from a remote side's request for one. /// Assumes chain_hash has already been checked and corresponds with what we expect! @@ -8151,7 +8393,7 @@ impl InboundV2Channel where SP::Target: SignerProvider { ) { debug_assert!(false, "Tried to send accept_channel2 after channel had moved forward"); } - if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { + if self.context.holder_commitment_point.transaction_number() != INITIAL_COMMITMENT_NUMBER { debug_assert!(false, "Tried to send an accept_channel2 for a channel that has already advanced"); } @@ -8165,9 +8407,9 @@ impl InboundV2Channel where SP::Target: SignerProvider { /// [`msgs::AcceptChannelV2`]: crate::ln::msgs::AcceptChannelV2 fn generate_accept_channel_v2_message(&self) -> msgs::AcceptChannelV2 { let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point( - self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); + self.context.holder_commitment_point.transaction_number(), &self.context.secp_ctx); let second_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point( - self.context.cur_holder_commitment_transaction_number - 1, &self.context.secp_ctx); + self.context.holder_commitment_point.transaction_number() - 1, &self.context.secp_ctx); let keys = self.context.get_holder_pubkeys(); msgs::AcceptChannelV2 { @@ -8232,7 +8474,7 @@ fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) ret } -const SERIALIZATION_VERSION: u8 = 3; +const SERIALIZATION_VERSION: u8 = 4; const MIN_SERIALIZATION_VERSION: u8 = 3; impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,; @@ -8294,7 +8536,18 @@ impl Writeable for Channel where SP::Target: SignerProvider { // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been // called. - write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION); + let version_to_write = if self.context.pending_inbound_htlcs.iter().any(|htlc| match htlc.state { + InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_resolution)| + InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_resolution) => { + matches!(htlc_resolution, InboundHTLCResolution::Pending { .. }) + }, + _ => false, + }) { + SERIALIZATION_VERSION + } else { + MIN_SERIALIZATION_VERSION + }; + write_ver_prefix!(writer, version_to_write, MIN_SERIALIZATION_VERSION); // `user_id` used to be a single u64 value. In order to remain backwards compatible with // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write @@ -8329,7 +8582,7 @@ impl Writeable for Channel where SP::Target: SignerProvider { } self.context.destination_script.write(writer)?; - self.context.cur_holder_commitment_transaction_number.write(writer)?; + self.context.holder_commitment_point.transaction_number().write(writer)?; self.context.cur_counterparty_commitment_transaction_number.write(writer)?; self.context.value_to_self_msat.write(writer)?; @@ -8350,13 +8603,29 @@ impl Writeable for Channel where SP::Target: SignerProvider { htlc.payment_hash.write(writer)?; match &htlc.state { &InboundHTLCState::RemoteAnnounced(_) => unreachable!(), - &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => { + &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_resolution) => { 1u8.write(writer)?; - htlc_state.write(writer)?; + if version_to_write <= 3 { + if let InboundHTLCResolution::Resolved { pending_htlc_status } = htlc_resolution { + pending_htlc_status.write(writer)?; + } else { + panic!(); + } + } else { + htlc_resolution.write(writer)?; + } }, - &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => { + &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_resolution) => { 2u8.write(writer)?; - htlc_state.write(writer)?; + if version_to_write <= 3 { + if let InboundHTLCResolution::Resolved { pending_htlc_status } = htlc_resolution { + pending_htlc_status.write(writer)?; + } else { + panic!(); + } + } else { + htlc_resolution.write(writer)?; + } }, &InboundHTLCState::Committed => { 3u8.write(writer)?; @@ -8582,6 +8851,15 @@ impl Writeable for Channel where SP::Target: SignerProvider { let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) }; + let mut monitor_pending_update_adds = None; + if !self.context.monitor_pending_update_adds.is_empty() { + monitor_pending_update_adds = Some(&self.context.monitor_pending_update_adds); + } + + // `current_point` will become optional when async signing is implemented. + let cur_holder_commitment_point = Some(self.context.holder_commitment_point.current_point()); + let next_holder_commitment_point = self.context.holder_commitment_point.next_point(); + write_tlv_fields!(writer, { (0, self.context.announcement_sigs, option), // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a @@ -8599,6 +8877,7 @@ impl Writeable for Channel where SP::Target: SignerProvider { (7, self.context.shutdown_scriptpubkey, option), (8, self.context.blocked_monitor_updates, optional_vec), (9, self.context.target_closing_feerate_sats_per_kw, option), + (10, monitor_pending_update_adds, option), // Added in 0.0.122 (11, self.context.monitor_pending_finalized_fulfills, required_vec), (13, self.context.channel_creation_height, required), (15, preimages, required_vec), @@ -8617,7 +8896,9 @@ impl Writeable for Channel where SP::Target: SignerProvider { (39, pending_outbound_blinding_points, optional_vec), (41, holding_cell_blinding_points, optional_vec), (43, malformed_htlcs, optional_vec), // Added in 0.0.119 - (45, self.context.local_initiated_shutdown, option), // Added in 0.0.122 + (45, cur_holder_commitment_point, option), + (47, next_holder_commitment_point, option), + (49, self.context.local_initiated_shutdown, option), // Added in 0.0.122 }); Ok(()) @@ -8693,8 +8974,22 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch cltv_expiry: Readable::read(reader)?, payment_hash: Readable::read(reader)?, state: match ::read(reader)? { - 1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?), - 2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?), + 1 => { + let resolution = if ver <= 3 { + InboundHTLCResolution::Resolved { pending_htlc_status: Readable::read(reader)? } + } else { + Readable::read(reader)? + }; + InboundHTLCState::AwaitingRemoteRevokeToAnnounce(resolution) + }, + 2 => { + let resolution = if ver <= 3 { + InboundHTLCResolution::Resolved { pending_htlc_status: Readable::read(reader)? } + } else { + Readable::read(reader)? + }; + InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution) + }, 3 => InboundHTLCState::Committed, 4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?), _ => return Err(DecodeError::InvalidValue), @@ -8911,6 +9206,10 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch let mut holding_cell_blinding_points_opt: Option>> = None; let mut malformed_htlcs: Option> = None; + let mut monitor_pending_update_adds: Option> = None; + + let mut cur_holder_commitment_point_opt: Option = None; + let mut next_holder_commitment_point_opt: Option = None; read_tlv_fields!(reader, { (0, announcement_sigs, option), @@ -8923,6 +9222,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch (7, shutdown_scriptpubkey, option), (8, blocked_monitor_updates, optional_vec), (9, target_closing_feerate_sats_per_kw, option), + (10, monitor_pending_update_adds, option), // Added in 0.0.122 (11, monitor_pending_finalized_fulfills, optional_vec), (13, channel_creation_height, option), (15, preimages_opt, optional_vec), @@ -8941,7 +9241,9 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch (39, pending_outbound_blinding_points_opt, optional_vec), (41, holding_cell_blinding_points_opt, optional_vec), (43, malformed_htlcs, optional_vec), // Added in 0.0.119 - (45, local_initiated_shutdown, option), + (45, cur_holder_commitment_point_opt, option), + (47, next_holder_commitment_point_opt, option), + (49, local_initiated_shutdown, option), }); let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id { @@ -9052,6 +9354,24 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch } } + // If we're restoring this channel for the first time after an upgrade, then we require that the + // signer be available so that we can immediately populate the current commitment point. Channel + // restoration will fail if this is not possible. + let holder_commitment_point = match (cur_holder_commitment_point_opt, next_holder_commitment_point_opt) { + (Some(current), Some(next)) => HolderCommitmentPoint::Available { + transaction_number: cur_holder_commitment_transaction_number, current, next + }, + (Some(current), _) => HolderCommitmentPoint::Available { + transaction_number: cur_holder_commitment_transaction_number, current, + next: holder_signer.get_per_commitment_point(cur_holder_commitment_transaction_number - 1, &secp_ctx), + }, + (_, _) => HolderCommitmentPoint::Available { + transaction_number: cur_holder_commitment_transaction_number, + current: holder_signer.get_per_commitment_point(cur_holder_commitment_transaction_number, &secp_ctx), + next: holder_signer.get_per_commitment_point(cur_holder_commitment_transaction_number - 1, &secp_ctx), + }, + }; + Ok(Channel { context: ChannelContext { user_id, @@ -9077,7 +9397,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch shutdown_scriptpubkey, destination_script, - cur_holder_commitment_transaction_number, + holder_commitment_point, cur_counterparty_commitment_transaction_number, value_to_self_msat, @@ -9094,6 +9414,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch monitor_pending_forwards, monitor_pending_failures, monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(), + monitor_pending_update_adds: monitor_pending_update_adds.unwrap_or(Vec::new()), signer_pending_commitment_update: false, signer_pending_funding: false, @@ -9176,7 +9497,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch blocked_monitor_updates: blocked_monitor_updates.unwrap(), }, - #[cfg(dual_funding)] + #[cfg(any(dual_funding, splicing))] dual_funding_channel_context: None, }) } @@ -9191,7 +9512,7 @@ mod tests { use bitcoin::blockdata::opcodes; use bitcoin::network::constants::Network; use crate::ln::onion_utils::INVALID_ONION_BLINDING; - use crate::ln::{PaymentHash, PaymentPreimage}; + use crate::ln::types::{PaymentHash, PaymentPreimage}; use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint}; use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; use crate::ln::channel::InitFeatures; @@ -10950,6 +11271,6 @@ mod tests { // Clear the ChannelState::WaitingForBatch only when called by ChannelManager. node_a_chan.set_batch_ready(); assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY)); - assert!(node_a_chan.check_get_channel_ready(0).is_some()); + assert!(node_a_chan.check_get_channel_ready(0, &&logger).is_some()); } }