X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannel.rs;h=716eba7cf509e2ab79618a0aad0fb2ede36a458b;hb=64e5dc4933a59d63243df81541c5abf063248361;hp=31b2dab38d1299191305380ab25d6b3c8fc21e55;hpb=0b38b3981d4f89fe3dc302c3156e79897da50081;p=rust-lightning diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 31b2dab3..716eba7c 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -7,6 +7,7 @@ // You may not use this file except in accordance with one or both of these // licenses. +use bitcoin::amount::Amount; use bitcoin::blockdata::constants::ChainHash; use bitcoin::blockdata::script::{Script, ScriptBuf, Builder}; use bitcoin::blockdata::transaction::Transaction; @@ -24,12 +25,13 @@ use bitcoin::secp256k1::{PublicKey,SecretKey}; use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature}; use bitcoin::secp256k1; -use crate::ln::{ChannelId, PaymentPreimage, PaymentHash}; +use crate::ln::types::{ChannelId, PaymentPreimage, PaymentHash}; use crate::ln::features::{ChannelTypeFeatures, InitFeatures}; use crate::ln::msgs; use crate::ln::msgs::DecodeError; use crate::ln::script::{self, ShutdownScript}; -use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState}; +use crate::ln::channel_state::{ChannelShutdownState, CounterpartyForwardingInfo, InboundHTLCDetails, InboundHTLCStateDetails, OutboundHTLCDetails, OutboundHTLCStateDetails}; +use crate::ln::channelmanager::{self, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT}; use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction}; use crate::ln::chan_utils; use crate::ln::onion_utils::HTLCFailReason; @@ -37,7 +39,7 @@ use crate::chain::BestBlock; use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator}; use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID}; use crate::chain::transaction::{OutPoint, TransactionData}; -use crate::sign::ecdsa::{EcdsaChannelSigner, WriteableEcdsaChannelSigner}; +use crate::sign::ecdsa::EcdsaChannelSigner; use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient}; use crate::events::ClosureReason; use crate::routing::gossip::NodeId; @@ -50,7 +52,6 @@ use crate::util::scid_utils::scid_from_parts; use crate::io; use crate::prelude::*; use core::{cmp,mem,fmt}; -use core::convert::TryInto; use core::ops::Deref; #[cfg(any(test, fuzzing, debug_assertions))] use crate::sync::Mutex; @@ -186,45 +187,6 @@ enum InboundHTLCState { LocalRemoved(InboundHTLCRemovalReason), } -/// Exposes the state of pending inbound HTLCs. -/// -/// At a high level, an HTLC being forwarded from one Lightning node to another Lightning node goes -/// through the following states in the state machine: -/// - Announced for addition by the originating node through the update_add_htlc message. -/// - Added to the commitment transaction of the receiving node and originating node in turn -/// through the exchange of commitment_signed and revoke_and_ack messages. -/// - Announced for resolution (fulfillment or failure) by the receiving node through either one of -/// the update_fulfill_htlc, update_fail_htlc, and update_fail_malformed_htlc messages. -/// - Removed from the commitment transaction of the originating node and receiving node in turn -/// through the exchange of commitment_signed and revoke_and_ack messages. -/// -/// This can be used to inspect what next message an HTLC is waiting for to advance its state. -#[derive(Clone, Debug, PartialEq)] -pub enum InboundHTLCStateDetails { - /// We have added this HTLC in our commitment transaction by receiving commitment_signed and - /// returning revoke_and_ack. We are awaiting the appropriate revoke_and_ack's from the remote - /// before this HTLC is included on the remote commitment transaction. - AwaitingRemoteRevokeToAdd, - /// This HTLC has been included in the commitment_signed and revoke_and_ack messages on both sides - /// and is included in both commitment transactions. - /// - /// This HTLC is now safe to either forward or be claimed as a payment by us. The HTLC will - /// remain in this state until the forwarded upstream HTLC has been resolved and we resolve this - /// HTLC correspondingly, or until we claim it as a payment. If it is part of a multipart - /// payment, it will only be claimed together with other required parts. - Committed, - /// We have received the preimage for this HTLC and it is being removed by fulfilling it with - /// update_fulfill_htlc. This HTLC is still on both commitment transactions, but we are awaiting - /// the appropriate revoke_and_ack's from the remote before this HTLC is removed from the remote - /// commitment transaction after update_fulfill_htlc. - AwaitingRemoteRevokeToRemoveFulfill, - /// The HTLC is being removed by failing it with update_fail_htlc or update_fail_malformed_htlc. - /// This HTLC is still on both commitment transactions, but we are awaiting the appropriate - /// revoke_and_ack's from the remote before this HTLC is removed from the remote commitment - /// transaction. - AwaitingRemoteRevokeToRemoveFail, -} - impl From<&InboundHTLCState> for Option { fn from(state: &InboundHTLCState) -> Option { match state { @@ -245,13 +207,6 @@ impl From<&InboundHTLCState> for Option { } } -impl_writeable_tlv_based_enum_upgradable!(InboundHTLCStateDetails, - (0, AwaitingRemoteRevokeToAdd) => {}, - (2, Committed) => {}, - (4, AwaitingRemoteRevokeToRemoveFulfill) => {}, - (6, AwaitingRemoteRevokeToRemoveFail) => {}; -); - struct InboundHTLCOutput { htlc_id: u64, amount_msat: u64, @@ -260,53 +215,6 @@ struct InboundHTLCOutput { state: InboundHTLCState, } -/// Exposes details around pending inbound HTLCs. -#[derive(Clone, Debug, PartialEq)] -pub struct InboundHTLCDetails { - /// The HTLC ID. - /// The IDs are incremented by 1 starting from 0 for each offered HTLC. - /// They are unique per channel and inbound/outbound direction, unless an HTLC was only announced - /// and not part of any commitment transaction. - pub htlc_id: u64, - /// The amount in msat. - pub amount_msat: u64, - /// The block height at which this HTLC expires. - pub cltv_expiry: u32, - /// The payment hash. - pub payment_hash: PaymentHash, - /// The state of the HTLC in the state machine. - /// - /// Determines on which commitment transactions the HTLC is included and what message the HTLC is - /// waiting for to advance to the next state. - /// - /// See [`InboundHTLCStateDetails`] for information on the specific states. - /// - /// LDK will always fill this field in, but when downgrading to prior versions of LDK, new - /// states may result in `None` here. - pub state: Option, - /// Whether the HTLC has an output below the local dust limit. If so, the output will be trimmed - /// from the local commitment transaction and added to the commitment transaction fee. - /// For non-anchor channels, this takes into account the cost of the second-stage HTLC - /// transactions as well. - /// - /// When the local commitment transaction is broadcasted as part of a unilateral closure, - /// the value of this HTLC will therefore not be claimable but instead burned as a transaction - /// fee. - /// - /// Note that dust limits are specific to each party. An HTLC can be dust for the local - /// commitment transaction but not for the counterparty's commitment transaction and vice versa. - pub is_dust: bool, -} - -impl_writeable_tlv_based!(InboundHTLCDetails, { - (0, htlc_id, required), - (2, amount_msat, required), - (4, cltv_expiry, required), - (6, payment_hash, required), - (7, state, upgradable_option), - (8, is_dust, required), -}); - #[cfg_attr(test, derive(Clone, Debug, PartialEq))] enum OutboundHTLCState { /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we @@ -340,42 +248,6 @@ enum OutboundHTLCState { AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome), } -/// Exposes the state of pending outbound HTLCs. -/// -/// At a high level, an HTLC being forwarded from one Lightning node to another Lightning node goes -/// through the following states in the state machine: -/// - Announced for addition by the originating node through the update_add_htlc message. -/// - Added to the commitment transaction of the receiving node and originating node in turn -/// through the exchange of commitment_signed and revoke_and_ack messages. -/// - Announced for resolution (fulfillment or failure) by the receiving node through either one of -/// the update_fulfill_htlc, update_fail_htlc, and update_fail_malformed_htlc messages. -/// - Removed from the commitment transaction of the originating node and receiving node in turn -/// through the exchange of commitment_signed and revoke_and_ack messages. -/// -/// This can be used to inspect what next message an HTLC is waiting for to advance its state. -#[derive(Clone, Debug, PartialEq)] -pub enum OutboundHTLCStateDetails { - /// We are awaiting the appropriate revoke_and_ack's from the remote before the HTLC is added - /// on the remote's commitment transaction after update_add_htlc. - AwaitingRemoteRevokeToAdd, - /// The HTLC has been added to the remote's commitment transaction by sending commitment_signed - /// and receiving revoke_and_ack in return. - /// - /// The HTLC will remain in this state until the remote node resolves the HTLC, or until we - /// unilaterally close the channel due to a timeout with an uncooperative remote node. - Committed, - /// The HTLC has been fulfilled successfully by the remote with a preimage in update_fulfill_htlc, - /// and we removed the HTLC from our commitment transaction by receiving commitment_signed and - /// returning revoke_and_ack. We are awaiting the appropriate revoke_and_ack's from the remote - /// for the removal from its commitment transaction. - AwaitingRemoteRevokeToRemoveSuccess, - /// The HTLC has been failed by the remote with update_fail_htlc or update_fail_malformed_htlc, - /// and we removed the HTLC from our commitment transaction by receiving commitment_signed and - /// returning revoke_and_ack. We are awaiting the appropriate revoke_and_ack's from the remote - /// for the removal from its commitment transaction. - AwaitingRemoteRevokeToRemoveFailure, -} - impl From<&OutboundHTLCState> for OutboundHTLCStateDetails { fn from(state: &OutboundHTLCState) -> OutboundHTLCStateDetails { match state { @@ -399,13 +271,6 @@ impl From<&OutboundHTLCState> for OutboundHTLCStateDetails { } } -impl_writeable_tlv_based_enum_upgradable!(OutboundHTLCStateDetails, - (0, AwaitingRemoteRevokeToAdd) => {}, - (2, Committed) => {}, - (4, AwaitingRemoteRevokeToRemoveSuccess) => {}, - (6, AwaitingRemoteRevokeToRemoveFailure) => {}; -); - #[derive(Clone)] #[cfg_attr(test, derive(Debug, PartialEq))] enum OutboundHTLCOutcome { @@ -444,58 +309,6 @@ struct OutboundHTLCOutput { skimmed_fee_msat: Option, } -/// Exposes details around pending outbound HTLCs. -#[derive(Clone, Debug, PartialEq)] -pub struct OutboundHTLCDetails { - /// The HTLC ID. - /// The IDs are incremented by 1 starting from 0 for each offered HTLC. - /// They are unique per channel and inbound/outbound direction, unless an HTLC was only announced - /// and not part of any commitment transaction. - /// - /// Not present when we are awaiting a remote revocation and the HTLC is not added yet. - pub htlc_id: Option, - /// The amount in msat. - pub amount_msat: u64, - /// The block height at which this HTLC expires. - pub cltv_expiry: u32, - /// The payment hash. - pub payment_hash: PaymentHash, - /// The state of the HTLC in the state machine. - /// - /// Determines on which commitment transactions the HTLC is included and what message the HTLC is - /// waiting for to advance to the next state. - /// - /// See [`OutboundHTLCStateDetails`] for information on the specific states. - /// - /// LDK will always fill this field in, but when downgrading to prior versions of LDK, new - /// states may result in `None` here. - pub state: Option, - /// The extra fee being skimmed off the top of this HTLC. - pub skimmed_fee_msat: Option, - /// Whether the HTLC has an output below the local dust limit. If so, the output will be trimmed - /// from the local commitment transaction and added to the commitment transaction fee. - /// For non-anchor channels, this takes into account the cost of the second-stage HTLC - /// transactions as well. - /// - /// When the local commitment transaction is broadcasted as part of a unilateral closure, - /// the value of this HTLC will therefore not be claimable but instead burned as a transaction - /// fee. - /// - /// Note that dust limits are specific to each party. An HTLC can be dust for the local - /// commitment transaction but not for the counterparty's commitment transaction and vice versa. - pub is_dust: bool, -} - -impl_writeable_tlv_based!(OutboundHTLCDetails, { - (0, htlc_id, required), - (2, amount_msat, required), - (4, cltv_expiry, required), - (6, payment_hash, required), - (7, state, upgradable_option), - (8, skimmed_fee_msat, required), - (10, is_dust, required), -}); - /// See AwaitingRemoteRevoke ChannelState for more info #[cfg_attr(test, derive(Clone, Debug, PartialEq))] enum HTLCUpdateAwaitingACK { @@ -924,25 +737,28 @@ pub(super) struct WithChannelContext<'a, L: Deref> where L::Target: Logger { pub logger: &'a L, pub peer_id: Option, pub channel_id: Option, + pub payment_hash: Option, } impl<'a, L: Deref> Logger for WithChannelContext<'a, L> where L::Target: Logger { fn log(&self, mut record: Record) { record.peer_id = self.peer_id; record.channel_id = self.channel_id; + record.payment_hash = self.payment_hash; self.logger.log(record) } } impl<'a, 'b, L: Deref> WithChannelContext<'a, L> where L::Target: Logger { - pub(super) fn from(logger: &'a L, context: &'b ChannelContext) -> Self + pub(super) fn from(logger: &'a L, context: &'b ChannelContext, payment_hash: Option) -> Self where S::Target: SignerProvider { WithChannelContext { logger, peer_id: Some(context.counterparty_node_id), channel_id: Some(context.channel_id), + payment_hash } } } @@ -999,14 +815,16 @@ enum HTLCInitiator { RemoteOffered, } -/// An enum gathering stats on pending HTLCs, either inbound or outbound side. +/// Current counts of various HTLCs, useful for calculating current balances available exactly. struct HTLCStats { - pending_htlcs: u32, - pending_htlcs_value_msat: u64, + pending_inbound_htlcs: usize, + pending_outbound_htlcs: usize, + pending_inbound_htlcs_value_msat: u64, + pending_outbound_htlcs_value_msat: u64, on_counterparty_tx_dust_exposure_msat: u64, on_holder_tx_dust_exposure_msat: u64, - holding_cell_msat: u64, - on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included + outbound_holding_cell_msat: u64, + on_holder_tx_outbound_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included } /// An enum gathering stats on commitment transaction, either local or remote. @@ -1190,9 +1008,9 @@ impl_writeable_tlv_based!(PendingChannelMonitorUpdate, { pub(super) enum ChannelPhase where SP::Target: SignerProvider { UnfundedOutboundV1(OutboundV1Channel), UnfundedInboundV1(InboundV1Channel), - #[cfg(dual_funding)] + #[cfg(any(dual_funding, splicing))] UnfundedOutboundV2(OutboundV2Channel), - #[cfg(dual_funding)] + #[cfg(any(dual_funding, splicing))] UnfundedInboundV2(InboundV2Channel), Funded(Channel), } @@ -1206,9 +1024,9 @@ impl<'a, SP: Deref> ChannelPhase where ChannelPhase::Funded(chan) => &chan.context, ChannelPhase::UnfundedOutboundV1(chan) => &chan.context, ChannelPhase::UnfundedInboundV1(chan) => &chan.context, - #[cfg(dual_funding)] + #[cfg(any(dual_funding, splicing))] ChannelPhase::UnfundedOutboundV2(chan) => &chan.context, - #[cfg(dual_funding)] + #[cfg(any(dual_funding, splicing))] ChannelPhase::UnfundedInboundV2(chan) => &chan.context, } } @@ -1218,9 +1036,9 @@ impl<'a, SP: Deref> ChannelPhase where ChannelPhase::Funded(ref mut chan) => &mut chan.context, ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context, ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context, - #[cfg(dual_funding)] + #[cfg(any(dual_funding, splicing))] ChannelPhase::UnfundedOutboundV2(ref mut chan) => &mut chan.context, - #[cfg(dual_funding)] + #[cfg(any(dual_funding, splicing))] ChannelPhase::UnfundedInboundV2(ref mut chan) => &mut chan.context, } } @@ -1399,7 +1217,7 @@ pub(super) struct ChannelContext where SP::Target: SignerProvider { /// Either the height at which this channel was created or the height at which it was last /// serialized if it was serialized by versions prior to 0.0.103. /// We use this to close if funding is never broadcasted. - channel_creation_height: u32, + pub(super) channel_creation_height: u32, counterparty_dust_limit_satoshis: u64, @@ -1570,7 +1388,7 @@ impl ChannelContext where SP::Target: SignerProvider { L::Target: Logger, SP::Target: SignerProvider, { - let logger = WithContext::from(logger, Some(counterparty_node_id), Some(open_channel_fields.temporary_channel_id)); + let logger = WithContext::from(logger, Some(counterparty_node_id), Some(open_channel_fields.temporary_channel_id), None); let announced_channel = if (open_channel_fields.channel_flags & 1) == 1 { true } else { false }; let channel_value_satoshis = our_funding_satoshis.saturating_add(open_channel_fields.funding_satoshis); @@ -2338,15 +2156,16 @@ impl ChannelContext where SP::Target: SignerProvider { cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA) } - pub fn get_max_dust_htlc_exposure_msat(&self, - fee_estimator: &LowerBoundedFeeEstimator) -> u64 - where F::Target: FeeEstimator - { + fn get_dust_exposure_limiting_feerate(&self, + fee_estimator: &LowerBoundedFeeEstimator, + ) -> u32 where F::Target: FeeEstimator { + fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::OnChainSweep) + } + + pub fn get_max_dust_htlc_exposure_msat(&self, limiting_feerate_sat_per_kw: u32) -> u64 { match self.config.options.max_dust_htlc_exposure { MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => { - let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight( - ConfirmationTarget::OnChainSweep) as u64; - feerate_per_kw.saturating_mul(multiplier) + (limiting_feerate_sat_per_kw as u64).saturating_mul(multiplier) }, MaxDustHTLCExposure::FixedLimitMsat(limit) => limit, } @@ -2731,7 +2550,7 @@ impl ChannelContext where SP::Target: SignerProvider { feerate_per_kw = cmp::max(feerate_per_kw, feerate); } let feerate_plus_quarter = feerate_per_kw.checked_mul(1250).map(|v| v / 1000); - cmp::max(2530, feerate_plus_quarter.unwrap_or(u32::max_value())) + cmp::max(feerate_per_kw + 2530, feerate_plus_quarter.unwrap_or(u32::max_value())) } /// Get forwarding information for the counterparty. @@ -2739,86 +2558,111 @@ impl ChannelContext where SP::Target: SignerProvider { self.counterparty_forwarding_info.clone() } - /// Returns a HTLCStats about inbound pending htlcs - fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option) -> HTLCStats { + /// Returns a HTLCStats about pending htlcs + fn get_pending_htlc_stats(&self, outbound_feerate_update: Option, dust_exposure_limiting_feerate: u32) -> HTLCStats { let context = self; - let mut stats = HTLCStats { - pending_htlcs: context.pending_inbound_htlcs.len() as u32, - pending_htlcs_value_msat: 0, - on_counterparty_tx_dust_exposure_msat: 0, - on_holder_tx_dust_exposure_msat: 0, - holding_cell_msat: 0, - on_holder_tx_holding_cell_htlcs_count: 0, - }; + let uses_0_htlc_fee_anchors = self.get_channel_type().supports_anchors_zero_fee_htlc_tx(); - let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { + let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update); + let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if uses_0_htlc_fee_anchors { (0, 0) } else { - let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64; - (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000, - dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000) + (dust_buffer_feerate as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000, + dust_buffer_feerate as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000) }; - let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis; - let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis; - for ref htlc in context.pending_inbound_htlcs.iter() { - stats.pending_htlcs_value_msat += htlc.amount_msat; - if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat { - stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat; - } - if htlc.amount_msat / 1000 < holder_dust_limit_success_sat { - stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat; - } - } - stats - } - /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell. - fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option) -> HTLCStats { - let context = self; - let mut stats = HTLCStats { - pending_htlcs: context.pending_outbound_htlcs.len() as u32, - pending_htlcs_value_msat: 0, - on_counterparty_tx_dust_exposure_msat: 0, - on_holder_tx_dust_exposure_msat: 0, - holding_cell_msat: 0, - on_holder_tx_holding_cell_htlcs_count: 0, - }; + let mut on_holder_tx_dust_exposure_msat = 0; + let mut on_counterparty_tx_dust_exposure_msat = 0; - let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { - (0, 0) - } else { - let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64; - (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000, - dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000) - }; - let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis; - let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis; - for ref htlc in context.pending_outbound_htlcs.iter() { - stats.pending_htlcs_value_msat += htlc.amount_msat; - if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat { - stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat; - } - if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat { - stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat; + let mut on_counterparty_tx_offered_nondust_htlcs = 0; + let mut on_counterparty_tx_accepted_nondust_htlcs = 0; + + let mut pending_inbound_htlcs_value_msat = 0; + + { + let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis; + let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis; + for ref htlc in context.pending_inbound_htlcs.iter() { + pending_inbound_htlcs_value_msat += htlc.amount_msat; + if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat { + on_counterparty_tx_dust_exposure_msat += htlc.amount_msat; + } else { + on_counterparty_tx_offered_nondust_htlcs += 1; + } + if htlc.amount_msat / 1000 < holder_dust_limit_success_sat { + on_holder_tx_dust_exposure_msat += htlc.amount_msat; + } } } - for update in context.holding_cell_htlc_updates.iter() { - if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update { - stats.pending_htlcs += 1; - stats.pending_htlcs_value_msat += amount_msat; - stats.holding_cell_msat += amount_msat; - if *amount_msat / 1000 < counterparty_dust_limit_success_sat { - stats.on_counterparty_tx_dust_exposure_msat += amount_msat; - } - if *amount_msat / 1000 < holder_dust_limit_timeout_sat { - stats.on_holder_tx_dust_exposure_msat += amount_msat; + let mut pending_outbound_htlcs_value_msat = 0; + let mut outbound_holding_cell_msat = 0; + let mut on_holder_tx_outbound_holding_cell_htlcs_count = 0; + let mut pending_outbound_htlcs = self.pending_outbound_htlcs.len(); + { + let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis; + let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis; + for ref htlc in context.pending_outbound_htlcs.iter() { + pending_outbound_htlcs_value_msat += htlc.amount_msat; + if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat { + on_counterparty_tx_dust_exposure_msat += htlc.amount_msat; } else { - stats.on_holder_tx_holding_cell_htlcs_count += 1; + on_counterparty_tx_accepted_nondust_htlcs += 1; + } + if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat { + on_holder_tx_dust_exposure_msat += htlc.amount_msat; + } + } + + for update in context.holding_cell_htlc_updates.iter() { + if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update { + pending_outbound_htlcs += 1; + pending_outbound_htlcs_value_msat += amount_msat; + outbound_holding_cell_msat += amount_msat; + if *amount_msat / 1000 < counterparty_dust_limit_success_sat { + on_counterparty_tx_dust_exposure_msat += amount_msat; + } else { + on_counterparty_tx_accepted_nondust_htlcs += 1; + } + if *amount_msat / 1000 < holder_dust_limit_timeout_sat { + on_holder_tx_dust_exposure_msat += amount_msat; + } else { + on_holder_tx_outbound_holding_cell_htlcs_count += 1; + } } } } - stats + + // Include any mining "excess" fees in the dust calculation + let excess_feerate_opt = outbound_feerate_update + .or(self.pending_update_fee.map(|(fee, _)| fee)) + .unwrap_or(self.feerate_per_kw) + .checked_sub(dust_exposure_limiting_feerate); + if let Some(excess_feerate) = excess_feerate_opt { + let on_counterparty_tx_nondust_htlcs = + on_counterparty_tx_accepted_nondust_htlcs + on_counterparty_tx_offered_nondust_htlcs; + on_counterparty_tx_dust_exposure_msat += + commit_tx_fee_msat(excess_feerate, on_counterparty_tx_nondust_htlcs, &self.channel_type); + if !self.channel_type.supports_anchors_zero_fee_htlc_tx() { + on_counterparty_tx_dust_exposure_msat += + on_counterparty_tx_accepted_nondust_htlcs as u64 * htlc_success_tx_weight(&self.channel_type) + * excess_feerate as u64 / 1000; + on_counterparty_tx_dust_exposure_msat += + on_counterparty_tx_offered_nondust_htlcs as u64 * htlc_timeout_tx_weight(&self.channel_type) + * excess_feerate as u64 / 1000; + } + } + + HTLCStats { + pending_inbound_htlcs: self.pending_inbound_htlcs.len(), + pending_outbound_htlcs, + pending_inbound_htlcs_value_msat, + pending_outbound_htlcs_value_msat, + on_counterparty_tx_dust_exposure_msat, + on_holder_tx_dust_exposure_msat, + outbound_holding_cell_msat, + on_holder_tx_outbound_holding_cell_htlcs_count, + } } /// Returns information on all pending inbound HTLCs. @@ -2923,9 +2767,11 @@ impl ChannelContext where SP::Target: SignerProvider { where F::Target: FeeEstimator { let context = &self; - // Note that we have to handle overflow due to the above case. - let inbound_stats = context.get_inbound_pending_htlc_stats(None); - let outbound_stats = context.get_outbound_pending_htlc_stats(None); + // Note that we have to handle overflow due to the case mentioned in the docs in general + // here. + + let dust_exposure_limiting_feerate = self.get_dust_exposure_limiting_feerate(&fee_estimator); + let htlc_stats = context.get_pending_htlc_stats(None, dust_exposure_limiting_feerate); let mut balance_msat = context.value_to_self_msat; for ref htlc in context.pending_inbound_htlcs.iter() { @@ -2933,10 +2779,10 @@ impl ChannelContext where SP::Target: SignerProvider { balance_msat += htlc.amount_msat; } } - balance_msat -= outbound_stats.pending_htlcs_value_msat; + balance_msat -= htlc_stats.pending_outbound_htlcs_value_msat; let outbound_capacity_msat = context.value_to_self_msat - .saturating_sub(outbound_stats.pending_htlcs_value_msat) + .saturating_sub(htlc_stats.pending_outbound_htlcs_value_msat) .saturating_sub( context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000); @@ -2996,7 +2842,7 @@ impl ChannelContext where SP::Target: SignerProvider { let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000; let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat) - .saturating_sub(inbound_stats.pending_htlcs_value_msat); + .saturating_sub(htlc_stats.pending_inbound_htlcs_value_msat); if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat + anchor_outputs_value_msat { // If another HTLC's fee would reduce the remote's balance below the reserve limit @@ -3013,7 +2859,7 @@ impl ChannelContext where SP::Target: SignerProvider { // send above the dust limit (as the router can always overpay to meet the dust limit). let mut remaining_msat_below_dust_exposure_limit = None; let mut dust_exposure_dust_limit_msat = 0; - let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator); + let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(dust_exposure_limiting_feerate); let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis) @@ -3022,18 +2868,32 @@ impl ChannelContext where SP::Target: SignerProvider { (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000, context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000) }; - let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat; - if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) { + + let excess_feerate_opt = self.feerate_per_kw.checked_sub(dust_exposure_limiting_feerate); + if let Some(excess_feerate) = excess_feerate_opt { + let htlc_dust_exposure_msat = + per_outbound_htlc_counterparty_commit_tx_fee_msat(excess_feerate, &context.channel_type); + let nondust_htlc_counterparty_tx_dust_exposure = + htlc_stats.on_counterparty_tx_dust_exposure_msat.saturating_add(htlc_dust_exposure_msat); + if nondust_htlc_counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat { + // If adding an extra HTLC would put us over the dust limit in total fees, we cannot + // send any non-dust HTLCs. + available_capacity_msat = cmp::min(available_capacity_msat, htlc_success_dust_limit * 1000); + } + } + + if htlc_stats.on_counterparty_tx_dust_exposure_msat.saturating_add(htlc_success_dust_limit * 1000) > max_dust_htlc_exposure_msat.saturating_add(1) { + // Note that we don't use the `counterparty_tx_dust_exposure` (with + // `htlc_dust_exposure_msat`) here as it only applies to non-dust HTLCs. remaining_msat_below_dust_exposure_limit = - Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat)); + Some(max_dust_htlc_exposure_msat.saturating_sub(htlc_stats.on_counterparty_tx_dust_exposure_msat)); dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000); } - let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat; - if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) { + if htlc_stats.on_holder_tx_dust_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) { remaining_msat_below_dust_exposure_limit = Some(cmp::min( remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()), - max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat))); + max_dust_htlc_exposure_msat.saturating_sub(htlc_stats.on_holder_tx_dust_exposure_msat))); dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000); } @@ -3046,16 +2906,16 @@ impl ChannelContext where SP::Target: SignerProvider { } available_capacity_msat = cmp::min(available_capacity_msat, - context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat); + context.counterparty_max_htlc_value_in_flight_msat - htlc_stats.pending_outbound_htlcs_value_msat); - if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 { + if htlc_stats.pending_outbound_htlcs + 1 > context.counterparty_max_accepted_htlcs as usize { available_capacity_msat = 0; } AvailableBalances { inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000 - context.value_to_self_msat as i64 - - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64 + - htlc_stats.pending_inbound_htlcs_value_msat as i64 - context.holder_selected_channel_reserve_satoshis as i64 * 1000, 0) as u64, outbound_capacity_msat, @@ -3502,7 +3362,7 @@ pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channe /// /// This is used both for outbound and inbound channels and has lower bound /// of `dust_limit_satoshis`. -#[cfg(dual_funding)] +#[cfg(any(dual_funding, splicing))] fn get_v2_channel_reserve_satoshis(channel_value_satoshis: u64, dust_limit_satoshis: u64) -> u64 { // Fixed at 1% of channel value by spec. let (q, _) = channel_value_satoshis.overflowing_div(100); @@ -3524,8 +3384,19 @@ pub(crate) fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_ (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000 } +pub(crate) fn per_outbound_htlc_counterparty_commit_tx_fee_msat(feerate_per_kw: u32, channel_type_features: &ChannelTypeFeatures) -> u64 { + // Note that we need to divide before multiplying to round properly, + // since the lowest denomination of bitcoin on-chain is the satoshi. + let commitment_tx_fee = COMMITMENT_TX_WEIGHT_PER_HTLC * feerate_per_kw as u64 / 1000 * 1000; + if channel_type_features.supports_anchors_zero_fee_htlc_tx() { + commitment_tx_fee + htlc_success_tx_weight(channel_type_features) * feerate_per_kw as u64 / 1000 + } else { + commitment_tx_fee + } +} + /// Context for dual-funded channels. -#[cfg(dual_funding)] +#[cfg(any(dual_funding, splicing))] pub(super) struct DualFundingChannelContext { /// The amount in satoshis we will be contributing to the channel. pub our_funding_satoshis: u64, @@ -3542,7 +3413,7 @@ pub(super) struct DualFundingChannelContext { // Counterparty designates channel data owned by the another channel participant entity. pub(super) struct Channel where SP::Target: SignerProvider { pub context: ChannelContext, - #[cfg(dual_funding)] + #[cfg(any(dual_funding, splicing))] pub dual_funding_channel_context: Option, } @@ -3613,7 +3484,7 @@ impl FailHTLCMessageName for msgs::UpdateFailMalformedHTLC { impl Channel where SP::Target: SignerProvider, - ::EcdsaSigner: WriteableEcdsaChannelSigner + ::EcdsaSigner: EcdsaChannelSigner { fn check_remote_fee( channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator, @@ -4121,20 +3992,13 @@ impl Channel where Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height, logger)) } - pub fn update_add_htlc( - &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus, - create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator, logger: &L - ) -> Result<(), ChannelError> - where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus, - FE::Target: FeeEstimator, L::Target: Logger, - { + pub fn update_add_htlc( + &mut self, msg: &msgs::UpdateAddHTLC, pending_forward_status: PendingHTLCStatus, + fee_estimator: &LowerBoundedFeeEstimator, + ) -> Result<(), ChannelError> where F::Target: FeeEstimator { if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) { return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned())); } - // We can't accept HTLCs sent after we've sent a shutdown. - if self.context.channel_state.is_local_shutdown_sent() { - pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8); - } // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec. if self.context.channel_state.is_remote_shutdown_sent() { return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned())); @@ -4152,12 +4016,12 @@ impl Channel where return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat))); } - let inbound_stats = self.context.get_inbound_pending_htlc_stats(None); - let outbound_stats = self.context.get_outbound_pending_htlc_stats(None); - if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 { + let dust_exposure_limiting_feerate = self.context.get_dust_exposure_limiting_feerate(&fee_estimator); + let htlc_stats = self.context.get_pending_htlc_stats(None, dust_exposure_limiting_feerate); + if htlc_stats.pending_inbound_htlcs + 1 > self.context.holder_max_accepted_htlcs as usize { return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs))); } - if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat { + if htlc_stats.pending_inbound_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat { return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat))); } @@ -4182,36 +4046,8 @@ impl Channel where } } - let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator); - let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { - (0, 0) - } else { - let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64; - (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000, - dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000) - }; - let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis; - if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats { - let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat; - if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat { - log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", - on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat); - pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7); - } - } - - let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis; - if msg.amount_msat / 1000 < exposure_dust_limit_success_sats { - let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat; - if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat { - log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", - on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat); - pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7); - } - } - let pending_value_to_self_msat = - self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat; + self.context.value_to_self_msat + htlc_stats.pending_inbound_htlcs_value_msat - removed_outbound_total_msat; let pending_remote_value_msat = self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat; if pending_remote_value_msat < msg.amount_msat { @@ -4243,23 +4079,7 @@ impl Channel where } else { 0 }; - if !self.context.is_outbound() { - // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from - // the spec because the fee spike buffer requirement doesn't exist on the receiver's - // side, only on the sender's. Note that with anchor outputs we are no longer as - // sensitive to fee spikes, so we need to account for them. - let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered); - let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(())); - if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { - remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE; - } - if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat { - // Note that if the pending_forward_status is not updated here, then it's because we're already failing - // the HTLC, i.e. its status is already set to failing. - log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id()); - pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7); - } - } else { + if self.context.is_outbound() { // Check that they won't violate our local required channel reserve by adding this HTLC. let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered); let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None); @@ -4453,7 +4273,7 @@ impl Channel where let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys); let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All }; - let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]); + let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).p2wsh_signature_hash(0, &htlc_redeemscript, htlc.to_bitcoin_amount(), htlc_sighashtype).unwrap()[..]); log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.", log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.to_public_key().serialize()), encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id()); @@ -5049,12 +4869,12 @@ impl Channel where } // Before proposing a feerate update, check that we can actually afford the new fee. - let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw)); - let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw)); + let dust_exposure_limiting_feerate = self.context.get_dust_exposure_limiting_feerate(&fee_estimator); + let htlc_stats = self.context.get_pending_htlc_stats(Some(feerate_per_kw), dust_exposure_limiting_feerate); let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger); - let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000; - let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat; + let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + htlc_stats.on_holder_tx_outbound_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000; + let holder_balance_msat = commitment_stats.local_balance_msat - htlc_stats.outbound_holding_cell_msat; if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 { //TODO: auto-close after a number of failures? log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw); @@ -5062,14 +4882,12 @@ impl Channel where } // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`. - let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat; - let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat; - let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator); - if holder_tx_dust_exposure > max_dust_htlc_exposure_msat { + let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(dust_exposure_limiting_feerate); + if htlc_stats.on_holder_tx_dust_exposure_msat > max_dust_htlc_exposure_msat { log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw); return None; } - if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat { + if htlc_stats.on_counterparty_tx_dust_exposure_msat > max_dust_htlc_exposure_msat { log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw); return None; } @@ -5302,20 +5120,16 @@ impl Channel where self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced)); self.context.update_time_counter += 1; // Check that we won't be pushed over our dust exposure limit by the feerate increase. - if !self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { - let inbound_stats = self.context.get_inbound_pending_htlc_stats(None); - let outbound_stats = self.context.get_outbound_pending_htlc_stats(None); - let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat; - let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat; - let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator); - if holder_tx_dust_exposure > max_dust_htlc_exposure_msat { - return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)", - msg.feerate_per_kw, holder_tx_dust_exposure))); - } - if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat { - return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)", - msg.feerate_per_kw, counterparty_tx_dust_exposure))); - } + let dust_exposure_limiting_feerate = self.context.get_dust_exposure_limiting_feerate(&fee_estimator); + let htlc_stats = self.context.get_pending_htlc_stats(None, dust_exposure_limiting_feerate); + let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(dust_exposure_limiting_feerate); + if htlc_stats.on_holder_tx_dust_exposure_msat > max_dust_htlc_exposure_msat { + return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)", + msg.feerate_per_kw, htlc_stats.on_holder_tx_dust_exposure_msat))); + } + if htlc_stats.on_counterparty_tx_dust_exposure_msat > max_dust_htlc_exposure_msat { + return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)", + msg.feerate_per_kw, htlc_stats.on_counterparty_tx_dust_exposure_msat))); } Ok(()) } @@ -5967,7 +5781,7 @@ impl Channel where }; for outp in closing_tx.trust().built_transaction().output.iter() { - if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS { + if !outp.script_pubkey.is_witness_program() && outp.value < Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS) { return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned())); } } @@ -6148,6 +5962,96 @@ impl Channel where }) } + pub fn can_accept_incoming_htlc( + &self, msg: &msgs::UpdateAddHTLC, fee_estimator: &LowerBoundedFeeEstimator, logger: L + ) -> Result<(), (&'static str, u16)> + where + F::Target: FeeEstimator, + L::Target: Logger + { + if self.context.channel_state.is_local_shutdown_sent() { + return Err(("Shutdown was already sent", 0x4000|8)) + } + + let dust_exposure_limiting_feerate = self.context.get_dust_exposure_limiting_feerate(&fee_estimator); + let htlc_stats = self.context.get_pending_htlc_stats(None, dust_exposure_limiting_feerate); + let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(dust_exposure_limiting_feerate); + let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { + (0, 0) + } else { + let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64; + (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000, + dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000) + }; + let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis; + if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats { + let on_counterparty_tx_dust_htlc_exposure_msat = htlc_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat; + if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat { + log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", + on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat); + return Err(("Exceeded our dust exposure limit on counterparty commitment tx", 0x1000|7)) + } + } else { + let htlc_dust_exposure_msat = + per_outbound_htlc_counterparty_commit_tx_fee_msat(self.context.feerate_per_kw, &self.context.channel_type); + let counterparty_tx_dust_exposure = + htlc_stats.on_counterparty_tx_dust_exposure_msat.saturating_add(htlc_dust_exposure_msat); + if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat { + log_info!(logger, "Cannot accept value that would put our exposure to tx fee dust at {} over the limit {} on counterparty commitment tx", + counterparty_tx_dust_exposure, max_dust_htlc_exposure_msat); + return Err(("Exceeded our tx fee dust exposure limit on counterparty commitment tx", 0x1000|7)) + } + } + + let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis; + if msg.amount_msat / 1000 < exposure_dust_limit_success_sats { + let on_holder_tx_dust_htlc_exposure_msat = htlc_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat; + if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat { + log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", + on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat); + return Err(("Exceeded our dust exposure limit on holder commitment tx", 0x1000|7)) + } + } + + let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { + ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000 + } else { + 0 + }; + + let mut removed_outbound_total_msat = 0; + for ref htlc in self.context.pending_outbound_htlcs.iter() { + if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state { + removed_outbound_total_msat += htlc.amount_msat; + } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state { + removed_outbound_total_msat += htlc.amount_msat; + } + } + + let pending_value_to_self_msat = + self.context.value_to_self_msat + htlc_stats.pending_inbound_htlcs_value_msat - removed_outbound_total_msat; + let pending_remote_value_msat = + self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat; + + if !self.context.is_outbound() { + // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from + // the spec because the fee spike buffer requirement doesn't exist on the receiver's + // side, only on the sender's. Note that with anchor outputs we are no longer as + // sensitive to fee spikes, so we need to account for them. + let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered); + let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(())); + if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { + remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE; + } + if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat { + log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id()); + return Err(("Fee spike buffer violation", 0x1000|7)); + } + } + + Ok(()) + } + pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 { self.context.cur_holder_commitment_transaction_number + 1 } @@ -6225,6 +6129,26 @@ impl Channel where } } + /// On startup, its possible we detect some monitor updates have actually completed (and the + /// ChannelManager was simply stale). In that case, we should simply drop them, which we do + /// here after logging them. + pub fn on_startup_drop_completed_blocked_mon_updates_through(&mut self, logger: &L, loaded_mon_update_id: u64) { + let channel_id = self.context.channel_id(); + self.context.blocked_monitor_updates.retain(|update| { + if update.update.update_id <= loaded_mon_update_id { + log_info!( + logger, + "Dropping completed ChannelMonitorUpdate id {} on channel {} due to a stale ChannelManager", + update.update.update_id, + channel_id, + ); + false + } else { + true + } + }); + } + pub fn blocked_monitor_updates_pending(&self) -> usize { self.context.blocked_monitor_updates.len() } @@ -6392,8 +6316,8 @@ impl Channel where if self.context.funding_tx_confirmation_height == 0 { if tx.txid() == funding_txo.txid { let txo_idx = funding_txo.index as usize; - if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() || - tx.output[txo_idx].value != self.context.channel_value_satoshis { + if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_p2wsh() || + tx.output[txo_idx].value.to_sat() != self.context.channel_value_satoshis { if self.context.is_outbound() { // If we generated the funding transaction and it doesn't match what it // should, the client is really broken and we should just panic and @@ -6408,7 +6332,7 @@ impl Channel where return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() }); } else { if self.context.is_outbound() { - if !tx.is_coin_base() { + if !tx.is_coinbase() { for input in tx.input.iter() { if input.witness.is_empty() { // We generated a malleable funding transaction, implying we've @@ -6428,7 +6352,7 @@ impl Channel where } // If this is a coinbase transaction and not a 0-conf channel // we should update our min_depth to 100 to handle coinbase maturity - if tx.is_coin_base() && + if tx.is_coinbase() && self.context.minimum_depth.unwrap_or(0) > 0 && self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY { self.context.minimum_depth = Some(COINBASE_MATURITY); @@ -7372,7 +7296,7 @@ impl OutboundV1Channel where SP::Target: SignerProvider { // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100. // We can skip this if it is a zero-conf channel. - if funding_transaction.is_coin_base() && + if funding_transaction.is_coinbase() && self.context.minimum_depth.unwrap_or(0) > 0 && self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY { self.context.minimum_depth = Some(COINBASE_MATURITY); @@ -7410,6 +7334,12 @@ impl OutboundV1Channel where SP::Target: SignerProvider { Ok(self.get_open_channel(chain_hash)) } + /// Returns true if we can resume the channel by sending the [`msgs::OpenChannel`] again. + pub fn is_resumable(&self) -> bool { + !self.context.have_received_message() && + self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER + } + pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel { if !self.context.is_outbound() { panic!("Tried to open a channel for an inbound channel?"); @@ -7645,7 +7575,7 @@ impl OutboundV1Channel where SP::Target: SignerProvider { let funding_redeemscript = self.context.get_funding_redeemscript(); let funding_txo = self.context.get_funding_txo().unwrap(); - let funding_txo_script = funding_redeemscript.to_v0_p2wsh(); + let funding_txo_script = funding_redeemscript.to_p2wsh(); let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound()); let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner()); let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id); @@ -7678,7 +7608,7 @@ impl OutboundV1Channel where SP::Target: SignerProvider { let mut channel = Channel { context: self.context, - #[cfg(dual_funding)] + #[cfg(any(dual_funding, splicing))] dual_funding_channel_context: None, }; @@ -7752,7 +7682,7 @@ impl InboundV1Channel where SP::Target: SignerProvider { F::Target: FeeEstimator, L::Target: Logger, { - let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.common_fields.temporary_channel_id)); + let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.common_fields.temporary_channel_id), None); // First check the channel type is known, failing before we do anything else if we don't // support this channel type. @@ -7942,7 +7872,7 @@ impl InboundV1Channel where SP::Target: SignerProvider { let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger); let funding_redeemscript = self.context.get_funding_redeemscript(); - let funding_txo_script = funding_redeemscript.to_v0_p2wsh(); + let funding_txo_script = funding_redeemscript.to_p2wsh(); let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound()); let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner()); let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id); @@ -7968,7 +7898,7 @@ impl InboundV1Channel where SP::Target: SignerProvider { // `ChannelMonitor`. let mut channel = Channel { context: self.context, - #[cfg(dual_funding)] + #[cfg(any(dual_funding, splicing))] dual_funding_channel_context: None, }; let need_channel_ready = channel.check_get_channel_ready(0).is_some(); @@ -7979,15 +7909,15 @@ impl InboundV1Channel where SP::Target: SignerProvider { } // A not-yet-funded outbound (from holder) channel using V2 channel establishment. -#[cfg(dual_funding)] +#[cfg(any(dual_funding, splicing))] pub(super) struct OutboundV2Channel where SP::Target: SignerProvider { pub context: ChannelContext, pub unfunded_context: UnfundedChannelContext, - #[cfg(dual_funding)] + #[cfg(any(dual_funding, splicing))] pub dual_funding_context: DualFundingChannelContext, } -#[cfg(dual_funding)] +#[cfg(any(dual_funding, splicing))] impl OutboundV2Channel where SP::Target: SignerProvider { pub fn new( fee_estimator: &LowerBoundedFeeEstimator, entropy_source: &ES, signer_provider: &SP, @@ -8103,14 +8033,14 @@ impl OutboundV2Channel where SP::Target: SignerProvider { } // A not-yet-funded inbound (from counterparty) channel using V2 channel establishment. -#[cfg(dual_funding)] +#[cfg(any(dual_funding, splicing))] pub(super) struct InboundV2Channel where SP::Target: SignerProvider { pub context: ChannelContext, pub unfunded_context: UnfundedChannelContext, pub dual_funding_context: DualFundingChannelContext, } -#[cfg(dual_funding)] +#[cfg(any(dual_funding, splicing))] impl InboundV2Channel where SP::Target: SignerProvider { /// Creates a new dual-funded channel from a remote side's request for one. /// Assumes chain_hash has already been checked and corresponds with what we expect! @@ -8699,7 +8629,8 @@ impl Writeable for Channel where SP::Target: SignerProvider { (39, pending_outbound_blinding_points, optional_vec), (41, holding_cell_blinding_points, optional_vec), (43, malformed_htlcs, optional_vec), // Added in 0.0.119 - (45, self.context.local_initiated_shutdown, option), // Added in 0.0.122 + // 45 and 47 are reserved for async signing + (49, self.context.local_initiated_shutdown, option), // Added in 0.0.122 }); Ok(()) @@ -9039,7 +8970,8 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch (39, pending_outbound_blinding_points_opt, optional_vec), (41, holding_cell_blinding_points_opt, optional_vec), (43, malformed_htlcs, optional_vec), // Added in 0.0.119 - (45, local_initiated_shutdown, option), + // 45 and 47 are reserved for async signing + (49, local_initiated_shutdown, option), }); let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id { @@ -9275,7 +9207,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch blocked_monitor_updates: blocked_monitor_updates.unwrap(), }, - #[cfg(dual_funding)] + #[cfg(any(dual_funding, splicing))] dual_funding_channel_context: None, }) } @@ -9284,13 +9216,14 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch #[cfg(test)] mod tests { use std::cmp; + use bitcoin::amount::Amount; use bitcoin::blockdata::constants::ChainHash; use bitcoin::blockdata::script::{ScriptBuf, Builder}; - use bitcoin::blockdata::transaction::{Transaction, TxOut}; + use bitcoin::blockdata::transaction::{Transaction, TxOut, Version}; use bitcoin::blockdata::opcodes; - use bitcoin::network::constants::Network; + use bitcoin::network::Network; use crate::ln::onion_utils::INVALID_ONION_BLINDING; - use crate::ln::{PaymentHash, PaymentPreimage}; + use crate::ln::types::{PaymentHash, PaymentPreimage}; use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint}; use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; use crate::ln::channel::InitFeatures; @@ -9317,9 +9250,8 @@ mod tests { use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::Hash; use bitcoin::hashes::hex::FromHex; - use bitcoin::hash_types::WPubkeyHash; use bitcoin::blockdata::locktime::absolute::LockTime; - use bitcoin::address::{WitnessProgram, WitnessVersion}; + use bitcoin::{WitnessProgram, WitnessVersion, WPubkeyHash}; use crate::prelude::*; #[test] @@ -9475,8 +9407,8 @@ mod tests { // Node A --> Node B: funding created let output_script = node_a_chan.context.get_funding_redeemscript(); - let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut { - value: 10000000, script_pubkey: output_script.clone(), + let tx = Transaction { version: Version::ONE, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut { + value: Amount::from_sat(10000000), script_pubkey: output_script.clone(), }]}; let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 }; let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap(); @@ -9604,8 +9536,8 @@ mod tests { // Node A --> Node B: funding created let output_script = node_a_chan.context.get_funding_redeemscript(); - let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut { - value: 10000000, script_pubkey: output_script.clone(), + let tx = Transaction { version: Version::ONE, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut { + value: Amount::from_sat(10000000), script_pubkey: output_script.clone(), }]}; let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 }; let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap(); @@ -9793,8 +9725,8 @@ mod tests { // Node A --> Node B: funding created let output_script = node_a_chan.context.get_funding_redeemscript(); - let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut { - value: 10000000, script_pubkey: output_script.clone(), + let tx = Transaction { version: Version::ONE, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut { + value: Amount::from_sat(10000000), script_pubkey: output_script.clone(), }]}; let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 }; let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap(); @@ -9860,8 +9792,8 @@ mod tests { &features, &outbound_chan.get_open_channel(ChainHash::using_genesis_block(network)), 7, &config, 0, &&logger, false ).unwrap(); outbound_chan.accept_channel(&inbound_chan.get_accept_channel_message(), &config.channel_handshake_limits, &features).unwrap(); - let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut { - value: 10000000, script_pubkey: outbound_chan.context.get_funding_redeemscript(), + let tx = Transaction { version: Version::ONE, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut { + value: Amount::from_sat(10000000), script_pubkey: outbound_chan.context.get_funding_redeemscript(), }]}; let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 }; let funding_created = outbound_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap().unwrap(); @@ -10118,7 +10050,7 @@ mod tests { &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key); let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys); let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All }; - let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap(); + let htlc_sighash = Message::from_digest(sighash::SighashCache::new(&htlc_tx).p2wsh_signature_hash(0, &htlc_redeemscript, htlc.to_bitcoin_amount(), htlc_sighashtype).unwrap().as_raw_hash().to_byte_array()); assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key.to_public_key()).is_ok(), "verify counterparty htlc sig"); let mut preimage: Option = None; @@ -10984,15 +10916,15 @@ mod tests { // Fund the channel with a batch funding transaction. let output_script = node_a_chan.context.get_funding_redeemscript(); let tx = Transaction { - version: 1, + version: Version::ONE, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![ TxOut { - value: 10000000, script_pubkey: output_script.clone(), + value: Amount::from_sat(10000000), script_pubkey: output_script.clone(), }, TxOut { - value: 10000000, script_pubkey: Builder::new().into_script(), + value: Amount::from_sat(10000000), script_pubkey: Builder::new().into_script(), }, ]}; let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };