// You may not use this file except in accordance with one or both of these
// licenses.
-use bitcoin::blockdata::script::{Script,Builder};
-use bitcoin::blockdata::transaction::{Transaction, EcdsaSighashType};
-use bitcoin::util::sighash;
+use bitcoin::blockdata::constants::ChainHash;
+use bitcoin::blockdata::script::{Script, ScriptBuf, Builder};
+use bitcoin::blockdata::transaction::Transaction;
+use bitcoin::sighash;
+use bitcoin::sighash::EcdsaSighashType;
use bitcoin::consensus::encode;
use bitcoin::hashes::Hash;
use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
use bitcoin::secp256k1;
-use crate::ln::{PaymentPreimage, PaymentHash};
+use crate::ln::{ChannelId, PaymentPreimage, PaymentHash};
use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
use crate::ln::msgs;
use crate::ln::msgs::DecodeError;
use crate::ln::script::{self, ShutdownScript};
-use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT};
+use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
use crate::ln::chan_utils;
use crate::ln::onion_utils::HTLCFailReason;
use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
use crate::chain::transaction::{OutPoint, TransactionData};
-use crate::sign::{WriteableEcdsaChannelSigner, EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
+use crate::sign::ecdsa::{EcdsaChannelSigner, WriteableEcdsaChannelSigner};
+use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
use crate::events::ClosureReason;
use crate::routing::gossip::NodeId;
-use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer, VecWriter};
-use crate::util::logger::Logger;
+use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
+use crate::util::logger::{Logger, Record, WithContext};
use crate::util::errors::APIError;
-use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits};
+use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
use crate::util::scid_utils::scid_from_parts;
use crate::io;
use crate::prelude::*;
use core::{cmp,mem,fmt};
+use core::convert::TryInto;
use core::ops::Deref;
#[cfg(any(test, fuzzing, debug_assertions))]
use crate::sync::Mutex;
-use bitcoin::hashes::hex::ToHex;
+use crate::sign::type_resolver::ChannelSignerType;
+
+use super::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint, RevocationBasepoint};
#[cfg(test)]
pub struct ChannelValueStat {
state: InboundHTLCState,
}
+#[cfg_attr(test, derive(Clone, Debug, PartialEq))]
enum OutboundHTLCState {
/// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
/// created it we would have put it in the holding cell instead). When they next revoke_and_ack
}
#[derive(Clone)]
+#[cfg_attr(test, derive(Debug, PartialEq))]
enum OutboundHTLCOutcome {
/// LDK version 0.0.105+ will always fill in the preimage here.
Success(Option<PaymentPreimage>),
}
}
+#[cfg_attr(test, derive(Clone, Debug, PartialEq))]
struct OutboundHTLCOutput {
htlc_id: u64,
amount_msat: u64,
payment_hash: PaymentHash,
state: OutboundHTLCState,
source: HTLCSource,
+ blinding_point: Option<PublicKey>,
+ skimmed_fee_msat: Option<u64>,
}
/// See AwaitingRemoteRevoke ChannelState for more info
+#[cfg_attr(test, derive(Clone, Debug, PartialEq))]
enum HTLCUpdateAwaitingACK {
AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
// always outbound
payment_hash: PaymentHash,
source: HTLCSource,
onion_routing_packet: msgs::OnionPacket,
+ // The extra fee we're skimming off the top of this HTLC.
+ skimmed_fee_msat: Option<u64>,
+ blinding_point: Option<PublicKey>,
},
ClaimHTLC {
payment_preimage: PaymentPreimage,
},
}
-/// There are a few "states" and then a number of flags which can be applied:
-/// We first move through init with OurInitSent -> TheirInitSent -> FundingCreated -> FundingSent.
-/// TheirChannelReady and OurChannelReady then get set on FundingSent, and when both are set we
-/// move on to ChannelReady.
-/// Note that PeerDisconnected can be set on both ChannelReady and FundingSent.
-/// ChannelReady can then get all remaining flags set on it, until we finish shutdown, then we
-/// move on to ShutdownComplete, at which point most calls into this channel are disallowed.
+macro_rules! define_state_flags {
+ ($flag_type_doc: expr, $flag_type: ident, [$(($flag_doc: expr, $flag: ident, $value: expr)),+], $extra_flags: expr) => {
+ #[doc = $flag_type_doc]
+ #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
+ struct $flag_type(u32);
+
+ impl $flag_type {
+ $(
+ #[doc = $flag_doc]
+ const $flag: $flag_type = $flag_type($value);
+ )*
+
+ /// All flags that apply to the specified [`ChannelState`] variant.
+ #[allow(unused)]
+ const ALL: $flag_type = Self($(Self::$flag.0 | )* $extra_flags);
+
+ #[allow(unused)]
+ fn new() -> Self { Self(0) }
+
+ #[allow(unused)]
+ fn from_u32(flags: u32) -> Result<Self, ()> {
+ if flags & !Self::ALL.0 != 0 {
+ Err(())
+ } else {
+ Ok($flag_type(flags))
+ }
+ }
+
+ #[allow(unused)]
+ fn is_empty(&self) -> bool { self.0 == 0 }
+
+ #[allow(unused)]
+ fn is_set(&self, flag: Self) -> bool { *self & flag == flag }
+ }
+
+ impl core::ops::Not for $flag_type {
+ type Output = Self;
+ fn not(self) -> Self::Output { Self(!self.0) }
+ }
+ impl core::ops::BitOr for $flag_type {
+ type Output = Self;
+ fn bitor(self, rhs: Self) -> Self::Output { Self(self.0 | rhs.0) }
+ }
+ impl core::ops::BitOrAssign for $flag_type {
+ fn bitor_assign(&mut self, rhs: Self) { self.0 |= rhs.0; }
+ }
+ impl core::ops::BitAnd for $flag_type {
+ type Output = Self;
+ fn bitand(self, rhs: Self) -> Self::Output { Self(self.0 & rhs.0) }
+ }
+ impl core::ops::BitAndAssign for $flag_type {
+ fn bitand_assign(&mut self, rhs: Self) { self.0 &= rhs.0; }
+ }
+ };
+ ($flag_type_doc: expr, $flag_type: ident, $flags: tt) => {
+ define_state_flags!($flag_type_doc, $flag_type, $flags, 0);
+ };
+ ($flag_type_doc: expr, FUNDED_STATE, $flag_type: ident, $flags: tt) => {
+ define_state_flags!($flag_type_doc, $flag_type, $flags, FundedStateFlags::ALL.0);
+ impl core::ops::BitOr<FundedStateFlags> for $flag_type {
+ type Output = Self;
+ fn bitor(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 | rhs.0) }
+ }
+ impl core::ops::BitOrAssign<FundedStateFlags> for $flag_type {
+ fn bitor_assign(&mut self, rhs: FundedStateFlags) { self.0 |= rhs.0; }
+ }
+ impl core::ops::BitAnd<FundedStateFlags> for $flag_type {
+ type Output = Self;
+ fn bitand(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 & rhs.0) }
+ }
+ impl core::ops::BitAndAssign<FundedStateFlags> for $flag_type {
+ fn bitand_assign(&mut self, rhs: FundedStateFlags) { self.0 &= rhs.0; }
+ }
+ impl PartialEq<FundedStateFlags> for $flag_type {
+ fn eq(&self, other: &FundedStateFlags) -> bool { self.0 == other.0 }
+ }
+ impl From<FundedStateFlags> for $flag_type {
+ fn from(flags: FundedStateFlags) -> Self { Self(flags.0) }
+ }
+ };
+}
+
+/// We declare all the states/flags here together to help determine which bits are still available
+/// to choose.
+mod state_flags {
+ pub const OUR_INIT_SENT: u32 = 1 << 0;
+ pub const THEIR_INIT_SENT: u32 = 1 << 1;
+ pub const FUNDING_NEGOTIATED: u32 = 1 << 2;
+ pub const AWAITING_CHANNEL_READY: u32 = 1 << 3;
+ pub const THEIR_CHANNEL_READY: u32 = 1 << 4;
+ pub const OUR_CHANNEL_READY: u32 = 1 << 5;
+ pub const CHANNEL_READY: u32 = 1 << 6;
+ pub const PEER_DISCONNECTED: u32 = 1 << 7;
+ pub const MONITOR_UPDATE_IN_PROGRESS: u32 = 1 << 8;
+ pub const AWAITING_REMOTE_REVOKE: u32 = 1 << 9;
+ pub const REMOTE_SHUTDOWN_SENT: u32 = 1 << 10;
+ pub const LOCAL_SHUTDOWN_SENT: u32 = 1 << 11;
+ pub const SHUTDOWN_COMPLETE: u32 = 1 << 12;
+ pub const WAITING_FOR_BATCH: u32 = 1 << 13;
+}
+
+define_state_flags!(
+ "Flags that apply to all [`ChannelState`] variants in which the channel is funded.",
+ FundedStateFlags, [
+ ("Indicates the remote side is considered \"disconnected\" and no updates are allowed \
+ until after we've done a `channel_reestablish` dance.", PEER_DISCONNECTED, state_flags::PEER_DISCONNECTED),
+ ("Indicates the user has told us a `ChannelMonitor` update is pending async persistence \
+ somewhere and we should pause sending any outbound messages until they've managed to \
+ complete it.", MONITOR_UPDATE_IN_PROGRESS, state_flags::MONITOR_UPDATE_IN_PROGRESS),
+ ("Indicates we received a `shutdown` message from the remote end. If set, they may not add \
+ any new HTLCs to the channel, and we are expected to respond with our own `shutdown` \
+ message when possible.", REMOTE_SHUTDOWN_SENT, state_flags::REMOTE_SHUTDOWN_SENT),
+ ("Indicates we sent a `shutdown` message. At this point, we may not add any new HTLCs to \
+ the channel.", LOCAL_SHUTDOWN_SENT, state_flags::LOCAL_SHUTDOWN_SENT)
+ ]
+);
+
+define_state_flags!(
+ "Flags that only apply to [`ChannelState::NegotiatingFunding`].",
+ NegotiatingFundingFlags, [
+ ("Indicates we have (or are prepared to) send our `open_channel`/`accept_channel` message.",
+ OUR_INIT_SENT, state_flags::OUR_INIT_SENT),
+ ("Indicates we have received their `open_channel`/`accept_channel` message.",
+ THEIR_INIT_SENT, state_flags::THEIR_INIT_SENT)
+ ]
+);
+
+define_state_flags!(
+ "Flags that only apply to [`ChannelState::AwaitingChannelReady`].",
+ FUNDED_STATE, AwaitingChannelReadyFlags, [
+ ("Indicates they sent us a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
+ `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
+ THEIR_CHANNEL_READY, state_flags::THEIR_CHANNEL_READY),
+ ("Indicates we sent them a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
+ `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
+ OUR_CHANNEL_READY, state_flags::OUR_CHANNEL_READY),
+ ("Indicates the channel was funded in a batch and the broadcast of the funding transaction \
+ is being held until all channels in the batch have received `funding_signed` and have \
+ their monitors persisted.", WAITING_FOR_BATCH, state_flags::WAITING_FOR_BATCH)
+ ]
+);
+
+define_state_flags!(
+ "Flags that only apply to [`ChannelState::ChannelReady`].",
+ FUNDED_STATE, ChannelReadyFlags, [
+ ("Indicates that we have sent a `commitment_signed` but are awaiting the responding \
+ `revoke_and_ack` message. During this period, we can't generate new `commitment_signed` \
+ messages as we'd be unable to determine which HTLCs they included in their `revoke_and_ack` \
+ implicit ACK, so instead we have to hold them away temporarily to be sent later.",
+ AWAITING_REMOTE_REVOKE, state_flags::AWAITING_REMOTE_REVOKE)
+ ]
+);
+
+#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
enum ChannelState {
- /// Implies we have (or are prepared to) send our open_channel/accept_channel message
- OurInitSent = 1 << 0,
- /// Implies we have received their open_channel/accept_channel message
- TheirInitSent = 1 << 1,
- /// We have sent funding_created and are awaiting a funding_signed to advance to FundingSent.
- /// Note that this is nonsense for an inbound channel as we immediately generate funding_signed
- /// upon receipt of funding_created, so simply skip this state.
- FundingCreated = 4,
- /// Set when we have received/sent funding_created and funding_signed and are thus now waiting
- /// on the funding transaction to confirm. The ChannelReady flags are set to indicate when we
- /// and our counterparty consider the funding transaction confirmed.
- FundingSent = 8,
- /// Flag which can be set on FundingSent to indicate they sent us a channel_ready message.
- /// Once both TheirChannelReady and OurChannelReady are set, state moves on to ChannelReady.
- TheirChannelReady = 1 << 4,
- /// Flag which can be set on FundingSent to indicate we sent them a channel_ready message.
- /// Once both TheirChannelReady and OurChannelReady are set, state moves on to ChannelReady.
- OurChannelReady = 1 << 5,
- ChannelReady = 64,
- /// Flag which is set on ChannelReady and FundingSent indicating remote side is considered
- /// "disconnected" and no updates are allowed until after we've done a channel_reestablish
- /// dance.
- PeerDisconnected = 1 << 7,
- /// Flag which is set on ChannelReady, FundingCreated, and FundingSent indicating the user has
- /// told us a ChannelMonitor update is pending async persistence somewhere and we should pause
- /// sending any outbound messages until they've managed to finish.
- MonitorUpdateInProgress = 1 << 8,
- /// Flag which implies that we have sent a commitment_signed but are awaiting the responding
- /// revoke_and_ack message. During this time period, we can't generate new commitment_signed
- /// messages as then we will be unable to determine which HTLCs they included in their
- /// revoke_and_ack implicit ACK, so instead we have to hold them away temporarily to be sent
- /// later.
- /// Flag is set on ChannelReady.
- AwaitingRemoteRevoke = 1 << 9,
- /// Flag which is set on ChannelReady or FundingSent after receiving a shutdown message from
- /// the remote end. If set, they may not add any new HTLCs to the channel, and we are expected
- /// to respond with our own shutdown message when possible.
- RemoteShutdownSent = 1 << 10,
- /// Flag which is set on ChannelReady or FundingSent after sending a shutdown message. At this
- /// point, we may not add any new HTLCs to the channel.
- LocalShutdownSent = 1 << 11,
- /// We've successfully negotiated a closing_signed dance. At this point ChannelManager is about
- /// to drop us, but we store this anyway.
- ShutdownComplete = 4096,
+ /// We are negotiating the parameters required for the channel prior to funding it.
+ NegotiatingFunding(NegotiatingFundingFlags),
+ /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to
+ /// `AwaitingChannelReady`. Note that this is nonsense for an inbound channel as we immediately generate
+ /// `funding_signed` upon receipt of `funding_created`, so simply skip this state.
+ FundingNegotiated,
+ /// We've received/sent `funding_created` and `funding_signed` and are thus now waiting on the
+ /// funding transaction to confirm.
+ AwaitingChannelReady(AwaitingChannelReadyFlags),
+ /// Both we and our counterparty consider the funding transaction confirmed and the channel is
+ /// now operational.
+ ChannelReady(ChannelReadyFlags),
+ /// We've successfully negotiated a `closing_signed` dance. At this point, the `ChannelManager`
+ /// is about to drop us, but we store this anyway.
+ ShutdownComplete,
+}
+
+macro_rules! impl_state_flag {
+ ($get: ident, $set: ident, $clear: ident, $state_flag: expr, [$($state: ident),+]) => {
+ #[allow(unused)]
+ fn $get(&self) -> bool {
+ match self {
+ $(
+ ChannelState::$state(flags) => flags.is_set($state_flag.into()),
+ )*
+ _ => false,
+ }
+ }
+ #[allow(unused)]
+ fn $set(&mut self) {
+ match self {
+ $(
+ ChannelState::$state(flags) => *flags |= $state_flag,
+ )*
+ _ => debug_assert!(false, "Attempted to set flag on unexpected ChannelState"),
+ }
+ }
+ #[allow(unused)]
+ fn $clear(&mut self) {
+ match self {
+ $(
+ ChannelState::$state(flags) => *flags &= !($state_flag),
+ )*
+ _ => debug_assert!(false, "Attempted to clear flag on unexpected ChannelState"),
+ }
+ }
+ };
+ ($get: ident, $set: ident, $clear: ident, $state_flag: expr, FUNDED_STATES) => {
+ impl_state_flag!($get, $set, $clear, $state_flag, [AwaitingChannelReady, ChannelReady]);
+ };
+ ($get: ident, $set: ident, $clear: ident, $state_flag: expr, $state: ident) => {
+ impl_state_flag!($get, $set, $clear, $state_flag, [$state]);
+ };
+}
+
+impl ChannelState {
+ fn from_u32(state: u32) -> Result<Self, ()> {
+ match state {
+ state_flags::FUNDING_NEGOTIATED => Ok(ChannelState::FundingNegotiated),
+ state_flags::SHUTDOWN_COMPLETE => Ok(ChannelState::ShutdownComplete),
+ val => {
+ if val & state_flags::AWAITING_CHANNEL_READY == state_flags::AWAITING_CHANNEL_READY {
+ AwaitingChannelReadyFlags::from_u32(val & !state_flags::AWAITING_CHANNEL_READY)
+ .map(|flags| ChannelState::AwaitingChannelReady(flags))
+ } else if val & state_flags::CHANNEL_READY == state_flags::CHANNEL_READY {
+ ChannelReadyFlags::from_u32(val & !state_flags::CHANNEL_READY)
+ .map(|flags| ChannelState::ChannelReady(flags))
+ } else if let Ok(flags) = NegotiatingFundingFlags::from_u32(val) {
+ Ok(ChannelState::NegotiatingFunding(flags))
+ } else {
+ Err(())
+ }
+ },
+ }
+ }
+
+ fn to_u32(&self) -> u32 {
+ match self {
+ ChannelState::NegotiatingFunding(flags) => flags.0,
+ ChannelState::FundingNegotiated => state_flags::FUNDING_NEGOTIATED,
+ ChannelState::AwaitingChannelReady(flags) => state_flags::AWAITING_CHANNEL_READY | flags.0,
+ ChannelState::ChannelReady(flags) => state_flags::CHANNEL_READY | flags.0,
+ ChannelState::ShutdownComplete => state_flags::SHUTDOWN_COMPLETE,
+ }
+ }
+
+ fn is_pre_funded_state(&self) -> bool {
+ matches!(self, ChannelState::NegotiatingFunding(_)|ChannelState::FundingNegotiated)
+ }
+
+ fn is_both_sides_shutdown(&self) -> bool {
+ self.is_local_shutdown_sent() && self.is_remote_shutdown_sent()
+ }
+
+ fn with_funded_state_flags_mask(&self) -> FundedStateFlags {
+ match self {
+ ChannelState::AwaitingChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
+ ChannelState::ChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
+ _ => FundedStateFlags::new(),
+ }
+ }
+
+ fn should_force_holding_cell(&self) -> bool {
+ match self {
+ ChannelState::ChannelReady(flags) =>
+ flags.is_set(ChannelReadyFlags::AWAITING_REMOTE_REVOKE) ||
+ flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into()) ||
+ flags.is_set(FundedStateFlags::PEER_DISCONNECTED.into()),
+ _ => {
+ debug_assert!(false, "The holding cell is only valid within ChannelReady");
+ false
+ },
+ }
+ }
+
+ impl_state_flag!(is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected,
+ FundedStateFlags::PEER_DISCONNECTED, FUNDED_STATES);
+ impl_state_flag!(is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress,
+ FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS, FUNDED_STATES);
+ impl_state_flag!(is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent,
+ FundedStateFlags::LOCAL_SHUTDOWN_SENT, FUNDED_STATES);
+ impl_state_flag!(is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent,
+ FundedStateFlags::REMOTE_SHUTDOWN_SENT, FUNDED_STATES);
+ impl_state_flag!(is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready,
+ AwaitingChannelReadyFlags::OUR_CHANNEL_READY, AwaitingChannelReady);
+ impl_state_flag!(is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready,
+ AwaitingChannelReadyFlags::THEIR_CHANNEL_READY, AwaitingChannelReady);
+ impl_state_flag!(is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch,
+ AwaitingChannelReadyFlags::WAITING_FOR_BATCH, AwaitingChannelReady);
+ impl_state_flag!(is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke,
+ ChannelReadyFlags::AWAITING_REMOTE_REVOKE, ChannelReady);
}
-const BOTH_SIDES_SHUTDOWN_MASK: u32 = ChannelState::LocalShutdownSent as u32 | ChannelState::RemoteShutdownSent as u32;
-const MULTI_STATE_FLAGS: u32 = BOTH_SIDES_SHUTDOWN_MASK | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32;
pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
+pub const DEFAULT_MAX_HTLCS: u16 = 50;
+
+pub(crate) fn commitment_tx_base_weight(channel_type_features: &ChannelTypeFeatures) -> u64 {
+ const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
+ const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
+ if channel_type_features.supports_anchors_zero_fee_htlc_tx() { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
+}
+
+#[cfg(not(test))]
+const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
+#[cfg(test)]
+pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
+
+pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
+
+/// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
+/// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
+/// although LDK 0.0.104+ enabled serialization of channels with a different value set for
+/// `holder_max_htlc_value_in_flight_msat`.
+pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
+
+/// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
+/// `option_support_large_channel` (aka wumbo channels) is not supported.
+/// It's 2^24 - 1.
+pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
+
+/// Total bitcoin supply in satoshis.
+pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
+
+/// The maximum network dust limit for standard script formats. This currently represents the
+/// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
+/// transaction non-standard and thus refuses to relay it.
+/// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
+/// implementations use this value for their dust limit today.
+pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
+
+/// The maximum channel dust limit we will accept from our counterparty.
+pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
+
+/// The dust limit is used for both the commitment transaction outputs as well as the closing
+/// transactions. For cooperative closing transactions, we require segwit outputs, though accept
+/// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
+/// In order to avoid having to concern ourselves with standardness during the closing process, we
+/// simply require our counterparty to use a dust limit which will leave any segwit output
+/// standard.
+/// See <https://github.com/lightning/bolts/issues/905> for more details.
+pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
+
+// Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
+pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
+
+/// Used to return a simple Error back to ChannelManager. Will get converted to a
+/// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
+/// channel_id in ChannelManager.
+pub(super) enum ChannelError {
+ Ignore(String),
+ Warn(String),
+ Close(String),
+}
+
+impl fmt::Debug for ChannelError {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ match self {
+ &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
+ &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
+ &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
+ }
+ }
+}
+
+impl fmt::Display for ChannelError {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ match self {
+ &ChannelError::Ignore(ref e) => write!(f, "{}", e),
+ &ChannelError::Warn(ref e) => write!(f, "{}", e),
+ &ChannelError::Close(ref e) => write!(f, "{}", e),
+ }
+ }
+}
+
+pub(super) struct WithChannelContext<'a, L: Deref> where L::Target: Logger {
+ pub logger: &'a L,
+ pub peer_id: Option<PublicKey>,
+ pub channel_id: Option<ChannelId>,
+}
+
+impl<'a, L: Deref> Logger for WithChannelContext<'a, L> where L::Target: Logger {
+ fn log(&self, mut record: Record) {
+ record.peer_id = self.peer_id;
+ record.channel_id = self.channel_id;
+ self.logger.log(record)
+ }
+}
+
+impl<'a, 'b, L: Deref> WithChannelContext<'a, L>
+where L::Target: Logger {
+ pub(super) fn from<S: Deref>(logger: &'a L, context: &'b ChannelContext<S>) -> Self
+ where S::Target: SignerProvider
+ {
+ WithChannelContext {
+ logger,
+ peer_id: Some(context.counterparty_node_id),
+ channel_id: Some(context.channel_id),
+ }
+ }
+}
+
+macro_rules! secp_check {
+ ($res: expr, $err: expr) => {
+ match $res {
+ Ok(thing) => thing,
+ Err(_) => return Err(ChannelError::Close($err)),
+ }
+ };
+}
+
/// The "channel disabled" bit in channel_update must be set based on whether we are connected to
/// our counterparty or not. However, we don't want to announce updates right away to avoid
/// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
local_balance_msat: u64, // local balance before fees but considering dust limits
remote_balance_msat: u64, // remote balance before fees but considering dust limits
- preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
+ outbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
+ inbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful received HTLCs since last commitment
}
/// Used when calculating whether we or the remote can afford an additional HTLC.
}
/// The return type of get_update_fulfill_htlc_and_commit.
-pub enum UpdateFulfillCommitFetch<'a> {
+pub enum UpdateFulfillCommitFetch {
/// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
/// it in the holding cell, or re-generated the update_fulfill message after the same claim was
/// previously placed in the holding cell (and has since been removed).
NewClaim {
/// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
- monitor_update: &'a ChannelMonitorUpdate,
+ monitor_update: ChannelMonitorUpdate,
/// The value of the HTLC which was claimed, in msat.
htlc_value_msat: u64,
},
pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
}
+/// The return value of `signer_maybe_unblocked`
+#[allow(unused)]
+pub(super) struct SignerResumeUpdates {
+ pub commitment_update: Option<msgs::CommitmentUpdate>,
+ pub funding_signed: Option<msgs::FundingSigned>,
+ pub channel_ready: Option<msgs::ChannelReady>,
+}
+
/// The return value of `channel_reestablish`
pub(super) struct ReestablishResponses {
pub channel_ready: Option<msgs::ChannelReady>,
pub shutdown_msg: Option<msgs::Shutdown>,
}
-/// The return type of `force_shutdown`
-pub(crate) type ShutdownResult = (
- Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
- Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>
-);
+/// The result of a shutdown that should be handled.
+#[must_use]
+pub(crate) struct ShutdownResult {
+ /// A channel monitor update to apply.
+ pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
+ /// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
+ pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
+ /// An unbroadcasted batch funding transaction id. The closure of this channel should be
+ /// propagated to the remainder of the batch.
+ pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
+ pub(crate) channel_id: ChannelId,
+ pub(crate) counterparty_node_id: PublicKey,
+}
/// If the majority of the channels funds are to the fundee and the initiator holds only just
/// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
/// See [`ChannelContext::sent_message_awaiting_response`] for more information.
pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
+/// The number of ticks that may elapse while we're waiting for an unfunded outbound/inbound channel
+/// to be promoted to a [`Channel`] since the unfunded channel was created. An unfunded channel
+/// exceeding this age limit will be force-closed and purged from memory.
+pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
+
+/// Number of blocks needed for an output from a coinbase transaction to be spendable.
+pub(crate) const COINBASE_MATURITY: u32 = 100;
+
struct PendingChannelMonitorUpdate {
update: ChannelMonitorUpdate,
- /// In some cases we need to delay letting the [`ChannelMonitorUpdate`] go until after an
- /// `Event` is processed by the user. This bool indicates the [`ChannelMonitorUpdate`] is
- /// blocked on some external event and the [`ChannelManager`] will update us when we're ready.
- ///
- /// [`ChannelManager`]: super::channelmanager::ChannelManager
- blocked: bool,
}
impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
(0, update, required),
- (2, blocked, required),
});
+/// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
+/// its variants containing an appropriate channel struct.
+pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
+ UnfundedOutboundV1(OutboundV1Channel<SP>),
+ UnfundedInboundV1(InboundV1Channel<SP>),
+ Funded(Channel<SP>),
+}
+
+impl<'a, SP: Deref> ChannelPhase<SP> where
+ SP::Target: SignerProvider,
+ <SP::Target as SignerProvider>::EcdsaSigner: ChannelSigner,
+{
+ pub fn context(&'a self) -> &'a ChannelContext<SP> {
+ match self {
+ ChannelPhase::Funded(chan) => &chan.context,
+ ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
+ ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
+ }
+ }
+
+ pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
+ match self {
+ ChannelPhase::Funded(ref mut chan) => &mut chan.context,
+ ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
+ ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
+ }
+ }
+}
+
+/// Contains all state common to unfunded inbound/outbound channels.
+pub(super) struct UnfundedChannelContext {
+ /// A counter tracking how many ticks have elapsed since this unfunded channel was
+ /// created. If this unfunded channel reaches peer has yet to respond after reaching
+ /// `UNFUNDED_CHANNEL_AGE_LIMIT_TICKS`, it will be force-closed and purged from memory.
+ ///
+ /// This is so that we don't keep channels around that haven't progressed to a funded state
+ /// in a timely manner.
+ unfunded_channel_age_ticks: usize,
+}
+
+impl UnfundedChannelContext {
+ /// Determines whether we should force-close and purge this unfunded channel from memory due to it
+ /// having reached the unfunded channel age limit.
+ ///
+ /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
+ pub fn should_expire_unfunded_channel(&mut self) -> bool {
+ self.unfunded_channel_age_ticks += 1;
+ self.unfunded_channel_age_ticks >= UNFUNDED_CHANNEL_AGE_LIMIT_TICKS
+ }
+}
+
/// Contains everything about the channel including state, and various flags.
-pub(super) struct ChannelContext<Signer: ChannelSigner> {
+pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
config: LegacyChannelConfig,
// Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
user_id: u128,
- channel_id: [u8; 32],
- temporary_channel_id: Option<[u8; 32]>, // Will be `None` for channels created prior to 0.0.115.
- channel_state: u32,
+ /// The current channel ID.
+ channel_id: ChannelId,
+ /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
+ /// Will be `None` for channels created prior to 0.0.115.
+ temporary_channel_id: Option<ChannelId>,
+ channel_state: ChannelState,
// When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
// our peer. However, we want to make sure they received it, or else rebroadcast it when we
latest_monitor_update_id: u64,
- holder_signer: Signer,
+ holder_signer: ChannelSignerType<SP>,
shutdown_scriptpubkey: Option<ShutdownScript>,
- destination_script: Script,
+ destination_script: ScriptBuf,
// Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
// generation start at 0 and count up...this simplifies some parts of implementation at the
cur_holder_commitment_transaction_number: u64,
cur_counterparty_commitment_transaction_number: u64,
- value_to_self_msat: u64, // Excluding all pending_htlcs, excluding fees
+ value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs
pending_inbound_htlcs: Vec<InboundHTLCOutput>,
pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
monitor_pending_finalized_fulfills: Vec<HTLCSource>,
+ /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`])
+ /// but our signer (initially) refused to give us a signature, we should retry at some point in
+ /// the future when the signer indicates it may have a signature for us.
+ ///
+ /// This flag is set in such a case. Note that we don't need to persist this as we'll end up
+ /// setting it again as a side-effect of [`Channel::channel_reestablish`].
+ signer_pending_commitment_update: bool,
+ /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send either a
+ /// [`msgs::FundingCreated`] or [`msgs::FundingSigned`] depending on if this channel is
+ /// outbound or inbound.
+ signer_pending_funding: bool,
+
// pending_update_fee is filled when sending and receiving update_fee.
//
// Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
#[cfg(not(test))]
closing_fee_limits: Option<(u64, u64)>,
- /// Flag that ensures that `accept_inbound_channel` must be called before `funding_created`
- /// is executed successfully. The reason for this flag is that when the
- /// `UserConfig::manually_accept_inbound_channels` config flag is set to true, inbound channels
- /// are required to be manually accepted by the node operator before the `msgs::AcceptChannel`
- /// message is created and sent out. During the manual accept process, `accept_inbound_channel`
- /// is called by `ChannelManager::accept_inbound_channel`.
+ /// If we remove an HTLC (or fee update), commit, and receive our counterparty's
+ /// `revoke_and_ack`, we remove all knowledge of said HTLC (or fee update). However, the latest
+ /// local commitment transaction that we can broadcast still contains the HTLC (or old fee)
+ /// until we receive a further `commitment_signed`. Thus we are not eligible for initiating the
+ /// `closing_signed` negotiation if we're expecting a counterparty `commitment_signed`.
+ ///
+ /// To ensure we don't send a `closing_signed` too early, we track this state here, waiting
+ /// until we see a `commitment_signed` before doing so.
///
- /// The flag counteracts that a counterparty node could theoretically send a
- /// `msgs::FundingCreated` message before the node operator has manually accepted an inbound
- /// channel request made by the counterparty node. That would execute `funding_created` before
- /// `accept_inbound_channel`, and `funding_created` should therefore not execute successfully.
- inbound_awaiting_accept: bool,
+ /// We don't bother to persist this - we anticipate this state won't last longer than a few
+ /// milliseconds, so any accidental force-closes here should be exceedingly rare.
+ expecting_peer_commitment_signed: bool,
/// The hash of the block in which the funding transaction was included.
funding_tx_confirmed_in: Option<BlockHash>,
pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
funding_transaction: Option<Transaction>,
+ is_batch_funding: Option<()>,
counterparty_cur_commitment_point: Option<PublicKey>,
counterparty_prev_commitment_point: Option<PublicKey>,
counterparty_node_id: PublicKey,
- counterparty_shutdown_scriptpubkey: Option<Script>,
+ counterparty_shutdown_scriptpubkey: Option<ScriptBuf>,
commitment_secrets: CounterpartyCommitmentSecrets,
/// [`SignerProvider::derive_channel_signer`].
channel_keys_id: [u8; 32],
- /// When we generate [`ChannelMonitorUpdate`]s to persist, they may not be persisted immediately.
- /// If we then persist the [`channelmanager::ChannelManager`] and crash before the persistence
- /// completes we still need to be able to complete the persistence. Thus, we have to keep a
- /// copy of the [`ChannelMonitorUpdate`] here until it is complete.
- pending_monitor_updates: Vec<PendingChannelMonitorUpdate>,
+ /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
+ /// store it here and only release it to the `ChannelManager` once it asks for it.
+ blocked_monitor_updates: Vec<PendingChannelMonitorUpdate>,
}
-impl<Signer: ChannelSigner> ChannelContext<Signer> {
- pub(crate) fn opt_anchors(&self) -> bool {
- self.channel_transaction_parameters.opt_anchors.is_some()
- }
-
+impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider {
/// Allowed in any state (including after shutdown)
pub fn get_update_time_counter(&self) -> u32 {
self.update_time_counter
/// Returns true if we've ever received a message from the remote end for this Channel
pub fn have_received_message(&self) -> bool {
- self.channel_state > (ChannelState::OurInitSent as u32)
+ self.channel_state > ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT)
}
/// Returns true if this channel is fully established and not known to be closing.
/// Allowed in any state (including after shutdown)
pub fn is_usable(&self) -> bool {
- let mask = ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK;
- (self.channel_state & mask) == (ChannelState::ChannelReady as u32) && !self.monitor_pending_channel_ready
+ matches!(self.channel_state, ChannelState::ChannelReady(_)) &&
+ !self.channel_state.is_local_shutdown_sent() &&
+ !self.channel_state.is_remote_shutdown_sent() &&
+ !self.monitor_pending_channel_ready
+ }
+
+ /// shutdown state returns the state of the channel in its various stages of shutdown
+ pub fn shutdown_state(&self) -> ChannelShutdownState {
+ match self.channel_state {
+ ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_) =>
+ if self.channel_state.is_local_shutdown_sent() && !self.channel_state.is_remote_shutdown_sent() {
+ ChannelShutdownState::ShutdownInitiated
+ } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && !self.closing_negotiation_ready() {
+ ChannelShutdownState::ResolvingHTLCs
+ } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && self.closing_negotiation_ready() {
+ ChannelShutdownState::NegotiatingClosingFee
+ } else {
+ ChannelShutdownState::NotShuttingDown
+ },
+ ChannelState::ShutdownComplete => ChannelShutdownState::ShutdownComplete,
+ _ => ChannelShutdownState::NotShuttingDown,
+ }
+ }
+
+ fn closing_negotiation_ready(&self) -> bool {
+ let is_ready_to_close = match self.channel_state {
+ ChannelState::AwaitingChannelReady(flags) =>
+ flags & FundedStateFlags::ALL == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
+ ChannelState::ChannelReady(flags) =>
+ flags == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
+ _ => false,
+ };
+ self.pending_inbound_htlcs.is_empty() &&
+ self.pending_outbound_htlcs.is_empty() &&
+ self.pending_update_fee.is_none() &&
+ is_ready_to_close
}
/// Returns true if this channel is currently available for use. This is a superset of
/// is_usable() and considers things like the channel being temporarily disabled.
/// Allowed in any state (including after shutdown)
pub fn is_live(&self) -> bool {
- self.is_usable() && (self.channel_state & (ChannelState::PeerDisconnected as u32) == 0)
+ self.is_usable() && !self.channel_state.is_peer_disconnected()
}
// Public utilities:
- pub fn channel_id(&self) -> [u8; 32] {
+ pub fn channel_id(&self) -> ChannelId {
self.channel_id
}
// Return the `temporary_channel_id` used during channel establishment.
//
// Will return `None` for channels created prior to LDK version 0.0.115.
- pub fn temporary_channel_id(&self) -> Option<[u8; 32]> {
+ pub fn temporary_channel_id(&self) -> Option<ChannelId> {
self.temporary_channel_id
}
&self.channel_type
}
- /// Guaranteed to be Some after both ChannelReady messages have been exchanged (and, thus,
- /// is_usable() returns true).
- /// Allowed in any state (including after shutdown)
+ /// Gets the channel's `short_channel_id`.
+ ///
+ /// Will return `None` if the channel hasn't been confirmed yet.
pub fn get_short_channel_id(&self) -> Option<u64> {
self.short_channel_id
}
self.outbound_scid_alias
}
+ /// Returns the holder signer for this channel.
+ #[cfg(test)]
+ pub fn get_signer(&self) -> &ChannelSignerType<SP> {
+ return &self.holder_signer
+ }
+
/// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
- /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases.
+ /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases
+ /// or prior to any channel actions during `Channel` initialization.
pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
- assert_eq!(self.outbound_scid_alias, 0);
+ debug_assert_eq!(self.outbound_scid_alias, 0);
self.outbound_scid_alias = outbound_scid_alias;
}
/// Returns the funding_txo we either got from our peer, or were given by
- /// get_outbound_funding_created.
+ /// get_funding_created.
pub fn get_funding_txo(&self) -> Option<OutPoint> {
self.channel_transaction_parameters.funding_outpoint
}
+ /// Returns the height in which our funding transaction was confirmed.
+ pub fn get_funding_tx_confirmation_height(&self) -> Option<u32> {
+ let conf_height = self.funding_tx_confirmation_height;
+ if conf_height > 0 {
+ Some(conf_height)
+ } else {
+ None
+ }
+ }
+
/// Returns the block hash in which our funding transaction was confirmed.
pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
self.funding_tx_confirmed_in
cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
}
- pub fn get_max_dust_htlc_exposure_msat(&self) -> u64 {
- self.config.options.max_dust_htlc_exposure_msat
+ pub fn get_max_dust_htlc_exposure_msat<F: Deref>(&self,
+ fee_estimator: &LowerBoundedFeeEstimator<F>) -> u64
+ where F::Target: FeeEstimator
+ {
+ match self.config.options.max_dust_htlc_exposure {
+ MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
+ let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(
+ ConfirmationTarget::OnChainSweep) as u64;
+ feerate_per_kw.saturating_mul(multiplier)
+ },
+ MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
+ }
}
/// Returns the previous [`ChannelConfig`] applied to this channel, if any.
// Checks whether we should emit a `ChannelPending` event.
pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
- self.is_funding_initiated() && !self.channel_pending_event_emitted
+ self.is_funding_broadcast() && !self.channel_pending_event_emitted
}
// Returns whether we already emitted a `ChannelPending` event.
did_channel_update
}
- /// Returns true if funding_created was sent/received.
- pub fn is_funding_initiated(&self) -> bool {
- self.channel_state >= ChannelState::FundingSent as u32
+ /// Returns true if funding_signed was sent/received and the
+ /// funding transaction has been broadcast if necessary.
+ pub fn is_funding_broadcast(&self) -> bool {
+ !self.channel_state.is_pre_funded_state() &&
+ !matches!(self.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH))
}
/// Transaction nomenclature is somewhat confusing here as there are many different cases - a
log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
- log_bytes!(self.channel_id), if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
+ &self.channel_id,
+ if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
macro_rules! get_htlc_in_commitment {
($htlc: expr, $offered: expr) => {
($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
if $outbound == local { // "offered HTLC output"
let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
- let htlc_tx_fee = if self.opt_anchors() {
+ let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
0
} else {
- feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000
+ feerate_per_kw as u64 * htlc_timeout_tx_weight(self.get_channel_type()) / 1000
};
if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
- log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat);
+ log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
included_non_dust_htlcs.push((htlc_in_tx, $source));
} else {
- log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat);
+ log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
included_dust_htlcs.push((htlc_in_tx, $source));
}
} else {
let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
- let htlc_tx_fee = if self.opt_anchors() {
+ let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
0
} else {
- feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000
+ feerate_per_kw as u64 * htlc_success_tx_weight(self.get_channel_type()) / 1000
};
if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
- log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat);
+ log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
included_non_dust_htlcs.push((htlc_in_tx, $source));
} else {
- log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat);
+ log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
included_dust_htlcs.push((htlc_in_tx, $source));
}
}
}
}
+ let mut inbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
+
for ref htlc in self.pending_inbound_htlcs.iter() {
let (include, state_name) = match htlc.state {
InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
add_htlc_output!(htlc, false, None, state_name);
remote_htlc_total_msat += htlc.amount_msat;
} else {
- log_trace!(logger, " ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, log_bytes!(htlc.payment_hash.0), htlc.amount_msat, state_name);
+ log_trace!(logger, " ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
match &htlc.state {
&InboundHTLCState::LocalRemoved(ref reason) => {
if generated_by_local {
- if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
+ if let &InboundHTLCRemovalReason::Fulfill(preimage) = reason {
+ inbound_htlc_preimages.push(preimage);
value_to_self_msat_offset += htlc.amount_msat as i64;
}
}
}
}
- let mut preimages: Vec<PaymentPreimage> = Vec::new();
+
+ let mut outbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
for ref htlc in self.pending_outbound_htlcs.iter() {
let (include, state_name) = match htlc.state {
};
if let Some(preimage) = preimage_opt {
- preimages.push(preimage);
+ outbound_htlc_preimages.push(preimage);
}
if include {
add_htlc_output!(htlc, true, Some(&htlc.source), state_name);
local_htlc_total_msat += htlc.amount_msat;
} else {
- log_trace!(logger, " ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, log_bytes!(htlc.payment_hash.0), htlc.amount_msat, state_name);
+ log_trace!(logger, " ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
match htlc.state {
OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => {
value_to_self_msat_offset -= htlc.amount_msat as i64;
broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
}
- let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), self.channel_transaction_parameters.opt_anchors.is_some());
- let anchors_val = if self.channel_transaction_parameters.opt_anchors.is_some() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
+ let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), &self.channel_transaction_parameters.channel_type_features);
+ let anchors_val = if self.channel_transaction_parameters.channel_type_features.supports_anchors_zero_fee_htlc_tx() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
let (value_to_self, value_to_remote) = if self.is_outbound() {
(value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
} else {
let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
value_to_a as u64,
value_to_b as u64,
- self.channel_transaction_parameters.opt_anchors.is_some(),
funding_pubkey_a,
funding_pubkey_b,
keys.clone(),
htlcs_included,
local_balance_msat: value_to_self_msat as u64,
remote_balance_msat: value_to_remote_msat as u64,
- preimages
+ inbound_htlc_preimages,
+ outbound_htlc_preimages,
}
}
/// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
/// TODO Some magic rust shit to compile-time check this?
fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
- let per_commitment_point = self.holder_signer.get_per_commitment_point(commitment_number, &self.secp_ctx);
+ let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx);
let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
let counterparty_pubkeys = self.get_counterparty_pubkeys();
#[inline]
/// Creates a set of keys for build_commitment_transaction to generate a transaction which we
/// will sign and send to our counterparty.
- /// If an Err is returned, it is a ChannelError::Close (for get_outbound_funding_created)
+ /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
fn build_remote_transaction_keys(&self) -> TxCreationKeys {
//TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we
//may see payments to it!
/// Gets the redeemscript for the funding transaction output (ie the funding transaction output
/// pays to get_funding_redeemscript().to_v0_p2wsh()).
- /// Panics if called before accept_channel/new_from_req
- pub fn get_funding_redeemscript(&self) -> Script {
+ /// Panics if called before accept_channel/InboundV1Channel::new
+ pub fn get_funding_redeemscript(&self) -> ScriptBuf {
make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
}
pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
self.counterparty_forwarding_info.clone()
}
-}
-
-// Internal utility functions for channels
-/// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
-/// `channel_value_satoshis` in msat, set through
-/// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
-///
-/// The effective percentage is lower bounded by 1% and upper bounded by 100%.
-///
-/// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
-fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
- let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
- 1
- } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
- 100
- } else {
- config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
- };
- channel_value_satoshis * 10 * configured_percent
-}
+ /// Returns a HTLCStats about inbound pending htlcs
+ fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
+ let context = self;
+ let mut stats = HTLCStats {
+ pending_htlcs: context.pending_inbound_htlcs.len() as u32,
+ pending_htlcs_value_msat: 0,
+ on_counterparty_tx_dust_exposure_msat: 0,
+ on_holder_tx_dust_exposure_msat: 0,
+ holding_cell_msat: 0,
+ on_holder_tx_holding_cell_htlcs_count: 0,
+ };
-/// Returns a minimum channel reserve value the remote needs to maintain,
-/// required by us according to the configured or default
-/// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
-///
-/// Guaranteed to return a value no larger than channel_value_satoshis
-///
-/// This is used both for outbound and inbound channels and has lower bound
-/// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
-pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
- let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
- cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
-}
-
-/// This is for legacy reasons, present for forward-compatibility.
-/// LDK versions older than 0.0.104 don't know how read/handle values other than default
-/// from storage. Hence, we use this function to not persist default values of
-/// `holder_selected_channel_reserve_satoshis` for channels into storage.
-pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
- let (q, _) = channel_value_satoshis.overflowing_div(100);
- cmp::min(channel_value_satoshis, cmp::max(q, 1000))
-}
-
-// Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
-// Note that num_htlcs should not include dust HTLCs.
-#[inline]
-fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, opt_anchors: bool) -> u64 {
- feerate_per_kw as u64 * (commitment_tx_base_weight(opt_anchors) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
-}
+ let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+ (0, 0)
+ } else {
+ let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
+ (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
+ dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
+ };
+ let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
+ let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
+ for ref htlc in context.pending_inbound_htlcs.iter() {
+ stats.pending_htlcs_value_msat += htlc.amount_msat;
+ if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
+ stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
+ }
+ if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
+ stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
+ }
+ }
+ stats
+ }
-// TODO: We should refactor this to be an Inbound/OutboundChannel until initial setup handshaking
-// has been completed, and then turn into a Channel to get compiler-time enforcement of things like
-// calling channel_id() before we're set up or things like get_outbound_funding_signed on an
-// inbound channel.
-//
-// Holder designates channel data owned for the benefit of the user client.
-// Counterparty designates channel data owned by the another channel participant entity.
-pub(super) struct Channel<Signer: ChannelSigner> {
- pub context: ChannelContext<Signer>,
-}
+ /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
+ fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
+ let context = self;
+ let mut stats = HTLCStats {
+ pending_htlcs: context.pending_outbound_htlcs.len() as u32,
+ pending_htlcs_value_msat: 0,
+ on_counterparty_tx_dust_exposure_msat: 0,
+ on_holder_tx_dust_exposure_msat: 0,
+ holding_cell_msat: 0,
+ on_holder_tx_holding_cell_htlcs_count: 0,
+ };
-#[cfg(any(test, fuzzing))]
-struct CommitmentTxInfoCached {
- fee: u64,
- total_pending_htlcs: usize,
- next_holder_htlc_id: u64,
- next_counterparty_htlc_id: u64,
- feerate: u32,
-}
+ let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+ (0, 0)
+ } else {
+ let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
+ (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
+ dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
+ };
+ let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
+ let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
+ for ref htlc in context.pending_outbound_htlcs.iter() {
+ stats.pending_htlcs_value_msat += htlc.amount_msat;
+ if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
+ stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
+ }
+ if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
+ stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
+ }
+ }
-pub const DEFAULT_MAX_HTLCS: u16 = 50;
+ for update in context.holding_cell_htlc_updates.iter() {
+ if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
+ stats.pending_htlcs += 1;
+ stats.pending_htlcs_value_msat += amount_msat;
+ stats.holding_cell_msat += amount_msat;
+ if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
+ stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
+ }
+ if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
+ stats.on_holder_tx_dust_exposure_msat += amount_msat;
+ } else {
+ stats.on_holder_tx_holding_cell_htlcs_count += 1;
+ }
+ }
+ }
+ stats
+ }
-pub(crate) fn commitment_tx_base_weight(opt_anchors: bool) -> u64 {
- const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
- const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
- if opt_anchors { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
-}
+ /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
+ /// Doesn't bother handling the
+ /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
+ /// corner case properly.
+ pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
+ -> AvailableBalances
+ where F::Target: FeeEstimator
+ {
+ let context = &self;
+ // Note that we have to handle overflow due to the above case.
+ let inbound_stats = context.get_inbound_pending_htlc_stats(None);
+ let outbound_stats = context.get_outbound_pending_htlc_stats(None);
-#[cfg(not(test))]
-const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
-#[cfg(test)]
-pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
+ let mut balance_msat = context.value_to_self_msat;
+ for ref htlc in context.pending_inbound_htlcs.iter() {
+ if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
+ balance_msat += htlc.amount_msat;
+ }
+ }
+ balance_msat -= outbound_stats.pending_htlcs_value_msat;
-pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
+ let outbound_capacity_msat = context.value_to_self_msat
+ .saturating_sub(outbound_stats.pending_htlcs_value_msat)
+ .saturating_sub(
+ context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
-/// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
-/// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
-/// although LDK 0.0.104+ enabled serialization of channels with a different value set for
-/// `holder_max_htlc_value_in_flight_msat`.
-pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
+ let mut available_capacity_msat = outbound_capacity_msat;
-/// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
-/// `option_support_large_channel` (aka wumbo channels) is not supported.
-/// It's 2^24 - 1.
-pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
+ let anchor_outputs_value_msat = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+ ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
+ } else {
+ 0
+ };
+ if context.is_outbound() {
+ // We should mind channel commit tx fee when computing how much of the available capacity
+ // can be used in the next htlc. Mirrors the logic in send_htlc.
+ //
+ // The fee depends on whether the amount we will be sending is above dust or not,
+ // and the answer will in turn change the amount itself — making it a circular
+ // dependency.
+ // This complicates the computation around dust-values, up to the one-htlc-value.
+ let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
+ if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+ real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000;
+ }
-/// Total bitcoin supply in satoshis.
-pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
+ let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
+ let mut max_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
+ let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
+ let mut min_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
+ if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+ max_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
+ min_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
+ }
-/// The maximum network dust limit for standard script formats. This currently represents the
-/// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
-/// transaction non-standard and thus refuses to relay it.
-/// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
-/// implementations use this value for their dust limit today.
-pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
+ // We will first subtract the fee as if we were above-dust. Then, if the resulting
+ // value ends up being below dust, we have this fee available again. In that case,
+ // match the value to right-below-dust.
+ let mut capacity_minus_commitment_fee_msat: i64 = available_capacity_msat as i64 -
+ max_reserved_commit_tx_fee_msat as i64 - anchor_outputs_value_msat as i64;
+ if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
+ let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
+ debug_assert!(one_htlc_difference_msat != 0);
+ capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
+ capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
+ available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
+ } else {
+ available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
+ }
+ } else {
+ // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
+ // sending a new HTLC won't reduce their balance below our reserve threshold.
+ let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
+ if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+ real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000;
+ }
-/// The maximum channel dust limit we will accept from our counterparty.
-pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
+ let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
+ let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
-/// The dust limit is used for both the commitment transaction outputs as well as the closing
-/// transactions. For cooperative closing transactions, we require segwit outputs, though accept
-/// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
-/// In order to avoid having to concern ourselves with standardness during the closing process, we
-/// simply require our counterparty to use a dust limit which will leave any segwit output
-/// standard.
-/// See <https://github.com/lightning/bolts/issues/905> for more details.
-pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
+ let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
+ let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
+ .saturating_sub(inbound_stats.pending_htlcs_value_msat);
-// Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
-pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
+ if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat + anchor_outputs_value_msat {
+ // If another HTLC's fee would reduce the remote's balance below the reserve limit
+ // we've selected for them, we can only send dust HTLCs.
+ available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
+ }
+ }
-/// Used to return a simple Error back to ChannelManager. Will get converted to a
-/// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
-/// channel_id in ChannelManager.
-pub(super) enum ChannelError {
- Ignore(String),
- Warn(String),
- Close(String),
-}
+ let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
-impl fmt::Debug for ChannelError {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- match self {
- &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
- &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
- &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
- }
- }
-}
+ // If we get close to our maximum dust exposure, we end up in a situation where we can send
+ // between zero and the remaining dust exposure limit remaining OR above the dust limit.
+ // Because we cannot express this as a simple min/max, we prefer to tell the user they can
+ // send above the dust limit (as the router can always overpay to meet the dust limit).
+ let mut remaining_msat_below_dust_exposure_limit = None;
+ let mut dust_exposure_dust_limit_msat = 0;
+ let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
-macro_rules! secp_check {
- ($res: expr, $err: expr) => {
- match $res {
- Ok(thing) => thing,
- Err(_) => return Err(ChannelError::Close($err)),
+ let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+ (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
+ } else {
+ let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
+ (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000,
+ context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
+ };
+ let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
+ if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
+ remaining_msat_below_dust_exposure_limit =
+ Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
+ dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
}
- };
-}
-impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
- fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
- // The default channel type (ie the first one we try) depends on whether the channel is
- // public - if it is, we just go with `only_static_remotekey` as it's the only option
- // available. If it's private, we first try `scid_privacy` as it provides better privacy
- // with no other changes, and fall back to `only_static_remotekey`.
- let mut ret = ChannelTypeFeatures::only_static_remote_key();
- if !config.channel_handshake_config.announced_channel &&
- config.channel_handshake_config.negotiate_scid_privacy &&
- their_features.supports_scid_privacy() {
- ret.set_scid_privacy_required();
+ let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
+ if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
+ remaining_msat_below_dust_exposure_limit = Some(cmp::min(
+ remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
+ max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
+ dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
}
- // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
- // set it now. If they don't understand it, we'll fall back to our default of
- // `only_static_remotekey`.
- #[cfg(anchors)]
- { // Attributes are not allowed on if expressions on our current MSRV of 1.41.
- if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
- their_features.supports_anchors_zero_fee_htlc_tx() {
- ret.set_anchors_zero_fee_htlc_tx_required();
+ if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
+ if available_capacity_msat < dust_exposure_dust_limit_msat {
+ available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
+ } else {
+ next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
}
}
- ret
- }
+ available_capacity_msat = cmp::min(available_capacity_msat,
+ context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
- /// If we receive an error message, it may only be a rejection of the channel type we tried,
- /// not of our ability to open any channel at all. Thus, on error, we should first call this
- /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
- pub(crate) fn maybe_handle_error_without_close(&mut self, chain_hash: BlockHash) -> Result<msgs::OpenChannel, ()> {
- if !self.context.is_outbound() || self.context.channel_state != ChannelState::OurInitSent as u32 { return Err(()); }
- if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
- // We've exhausted our options
- return Err(());
+ if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 {
+ available_capacity_msat = 0;
}
- // We support opening a few different types of channels. Try removing our additional
- // features one by one until we've either arrived at our default or the counterparty has
- // accepted one.
- //
- // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
- // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
- // checks whether the counterparty supports every feature, this would only happen if the
- // counterparty is advertising the feature, but rejecting channels proposing the feature for
- // whatever reason.
- if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
- self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
- assert!(self.context.channel_transaction_parameters.opt_non_zero_fee_anchors.is_none());
- self.context.channel_transaction_parameters.opt_anchors = None;
- } else if self.context.channel_type.supports_scid_privacy() {
- self.context.channel_type.clear_scid_privacy();
- } else {
- self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
+
+ AvailableBalances {
+ inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
+ - context.value_to_self_msat as i64
+ - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
+ - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
+ 0) as u64,
+ outbound_capacity_msat,
+ next_outbound_htlc_limit_msat: available_capacity_msat,
+ next_outbound_htlc_minimum_msat,
+ balance_msat,
}
- Ok(self.get_open_channel(chain_hash))
}
- // Constructors:
- pub fn new_outbound<ES: Deref, SP: Deref, F: Deref>(
- fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
- channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
- outbound_scid_alias: u64
- ) -> Result<Channel<Signer>, APIError>
- where ES::Target: EntropySource,
- SP::Target: SignerProvider<Signer = Signer>,
- F::Target: FeeEstimator,
- {
- let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
- let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
- let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
- let pubkeys = holder_signer.pubkeys().clone();
+ pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
+ let context = &self;
+ (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
+ }
- if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
- return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
+ /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
+ /// number of pending HTLCs that are on track to be in our next commitment tx.
+ ///
+ /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
+ /// `fee_spike_buffer_htlc` is `Some`.
+ ///
+ /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
+ /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
+ ///
+ /// Dust HTLCs are excluded.
+ fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
+ let context = &self;
+ assert!(context.is_outbound());
+
+ let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+ (0, 0)
+ } else {
+ (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
+ context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
+ };
+ let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
+ let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
+
+ let mut addl_htlcs = 0;
+ if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
+ match htlc.origin {
+ HTLCInitiator::LocalOffered => {
+ if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
+ addl_htlcs += 1;
+ }
+ },
+ HTLCInitiator::RemoteOffered => {
+ if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
+ addl_htlcs += 1;
+ }
+ }
}
- if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
- return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
+
+ let mut included_htlcs = 0;
+ for ref htlc in context.pending_inbound_htlcs.iter() {
+ if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
+ continue
+ }
+ // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
+ // transaction including this HTLC if it times out before they RAA.
+ included_htlcs += 1;
}
- let channel_value_msat = channel_value_satoshis * 1000;
- if push_msat > channel_value_msat {
- return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
+
+ for ref htlc in context.pending_outbound_htlcs.iter() {
+ if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
+ continue
+ }
+ match htlc.state {
+ OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
+ OutboundHTLCState::Committed => included_htlcs += 1,
+ OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
+ // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
+ // transaction won't be generated until they send us their next RAA, which will mean
+ // dropping any HTLCs in this state.
+ _ => {},
+ }
}
- if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
- return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
+
+ for htlc in context.holding_cell_htlc_updates.iter() {
+ match htlc {
+ &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
+ if amount_msat / 1000 < real_dust_limit_timeout_sat {
+ continue
+ }
+ included_htlcs += 1
+ },
+ _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
+ // ack we're guaranteed to never include them in commitment txs anymore.
+ }
}
- let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
- if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
- // Protocol level safety check in place, although it should never happen because
- // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
- return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
+
+ let num_htlcs = included_htlcs + addl_htlcs;
+ let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
+ #[cfg(any(test, fuzzing))]
+ {
+ let mut fee = res;
+ if fee_spike_buffer_htlc.is_some() {
+ fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
+ }
+ let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
+ + context.holding_cell_htlc_updates.len();
+ let commitment_tx_info = CommitmentTxInfoCached {
+ fee,
+ total_pending_htlcs,
+ next_holder_htlc_id: match htlc.origin {
+ HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
+ HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
+ },
+ next_counterparty_htlc_id: match htlc.origin {
+ HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
+ HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
+ },
+ feerate: context.feerate_per_kw,
+ };
+ *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
}
+ res
+ }
- let channel_type = Self::get_initial_channel_type(&config, their_features);
- debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
+ /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
+ /// pending HTLCs that are on track to be in their next commitment tx
+ ///
+ /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
+ /// `fee_spike_buffer_htlc` is `Some`.
+ ///
+ /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
+ /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
+ ///
+ /// Dust HTLCs are excluded.
+ fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
+ let context = &self;
+ assert!(!context.is_outbound());
- let feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
+ let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+ (0, 0)
+ } else {
+ (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
+ context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
+ };
+ let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
+ let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
- let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
- let commitment_tx_fee = Self::commit_tx_fee_msat(feerate, MIN_AFFORDABLE_HTLC_COUNT, channel_type.requires_anchors_zero_fee_htlc_tx());
- if value_to_self_msat < commitment_tx_fee {
- return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
+ let mut addl_htlcs = 0;
+ if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
+ match htlc.origin {
+ HTLCInitiator::LocalOffered => {
+ if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
+ addl_htlcs += 1;
+ }
+ },
+ HTLCInitiator::RemoteOffered => {
+ if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
+ addl_htlcs += 1;
+ }
+ }
}
- let mut secp_ctx = Secp256k1::new();
- secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
+ // When calculating the set of HTLCs which will be included in their next commitment_signed, all
+ // non-dust inbound HTLCs are included (as all states imply it will be included) and only
+ // committed outbound HTLCs, see below.
+ let mut included_htlcs = 0;
+ for ref htlc in context.pending_inbound_htlcs.iter() {
+ if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
+ continue
+ }
+ included_htlcs += 1;
+ }
- let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
- match signer_provider.get_shutdown_scriptpubkey() {
- Ok(scriptpubkey) => Some(scriptpubkey),
- Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
+ for ref htlc in context.pending_outbound_htlcs.iter() {
+ if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
+ continue
}
- } else { None };
+ // We only include outbound HTLCs if it will not be included in their next commitment_signed,
+ // i.e. if they've responded to us with an RAA after announcement.
+ match htlc.state {
+ OutboundHTLCState::Committed => included_htlcs += 1,
+ OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
+ OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
+ _ => {},
+ }
+ }
- if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
- if !shutdown_scriptpubkey.is_compatible(&their_features) {
- return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
+ let num_htlcs = included_htlcs + addl_htlcs;
+ let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
+ #[cfg(any(test, fuzzing))]
+ {
+ let mut fee = res;
+ if fee_spike_buffer_htlc.is_some() {
+ fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
}
+ let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
+ let commitment_tx_info = CommitmentTxInfoCached {
+ fee,
+ total_pending_htlcs,
+ next_holder_htlc_id: match htlc.origin {
+ HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
+ HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
+ },
+ next_counterparty_htlc_id: match htlc.origin {
+ HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
+ HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
+ },
+ feerate: context.feerate_per_kw,
+ };
+ *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
}
+ res
+ }
- let destination_script = match signer_provider.get_destination_script() {
- Ok(script) => script,
- Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
- };
+ fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O>
+ where F: Fn() -> Option<O> {
+ match self.channel_state {
+ ChannelState::FundingNegotiated => f(),
+ ChannelState::AwaitingChannelReady(flags) => if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) {
+ f()
+ } else {
+ None
+ },
+ _ => None,
+ }
+ }
- let temporary_channel_id = entropy_source.get_secure_random_bytes();
+ /// Returns the transaction if there is a pending funding transaction that is yet to be
+ /// broadcast.
+ pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
+ self.if_unbroadcasted_funding(|| self.funding_transaction.clone())
+ }
- Ok(Channel {
- context: ChannelContext {
- user_id,
+ /// Returns the transaction ID if there is a pending funding transaction that is yet to be
+ /// broadcast.
+ pub fn unbroadcasted_funding_txid(&self) -> Option<Txid> {
+ self.if_unbroadcasted_funding(||
+ self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid)
+ )
+ }
- config: LegacyChannelConfig {
- options: config.channel_config.clone(),
- announced_channel: config.channel_handshake_config.announced_channel,
- commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
- },
-
- prev_config: None,
-
- inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
-
- channel_id: temporary_channel_id,
- temporary_channel_id: Some(temporary_channel_id),
- channel_state: ChannelState::OurInitSent as u32,
- announcement_sigs_state: AnnouncementSigsState::NotSent,
- secp_ctx,
- channel_value_satoshis,
-
- latest_monitor_update_id: 0,
-
- holder_signer,
- shutdown_scriptpubkey,
- destination_script,
-
- cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
- cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
- value_to_self_msat,
-
- pending_inbound_htlcs: Vec::new(),
- pending_outbound_htlcs: Vec::new(),
- holding_cell_htlc_updates: Vec::new(),
- pending_update_fee: None,
- holding_cell_update_fee: None,
- next_holder_htlc_id: 0,
- next_counterparty_htlc_id: 0,
- update_time_counter: 1,
-
- resend_order: RAACommitmentOrder::CommitmentFirst,
-
- monitor_pending_channel_ready: false,
- monitor_pending_revoke_and_ack: false,
- monitor_pending_commitment_signed: false,
- monitor_pending_forwards: Vec::new(),
- monitor_pending_failures: Vec::new(),
- monitor_pending_finalized_fulfills: Vec::new(),
-
- #[cfg(debug_assertions)]
- holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
- #[cfg(debug_assertions)]
- counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
-
- last_sent_closing_fee: None,
- pending_counterparty_closing_signed: None,
- closing_fee_limits: None,
- target_closing_feerate_sats_per_kw: None,
-
- inbound_awaiting_accept: false,
-
- funding_tx_confirmed_in: None,
- funding_tx_confirmation_height: 0,
- short_channel_id: None,
- channel_creation_height: current_chain_height,
+ /// Returns whether the channel is funded in a batch.
+ pub fn is_batch_funding(&self) -> bool {
+ self.is_batch_funding.is_some()
+ }
- feerate_per_kw: feerate,
- counterparty_dust_limit_satoshis: 0,
- holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
- counterparty_max_htlc_value_in_flight_msat: 0,
- holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
- counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
- holder_selected_channel_reserve_satoshis,
- counterparty_htlc_minimum_msat: 0,
- holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
- counterparty_max_accepted_htlcs: 0,
- holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
- minimum_depth: None, // Filled in in accept_channel
+ /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be
+ /// broadcast.
+ pub fn unbroadcasted_batch_funding_txid(&self) -> Option<Txid> {
+ self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding())
+ }
- counterparty_forwarding_info: None,
+ /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
+ /// shutdown of this channel - no more calls into this Channel may be made afterwards except
+ /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
+ /// Also returns the list of payment_hashes for channels which we can safely fail backwards
+ /// immediately (others we will have to allow to time out).
+ pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
+ // Note that we MUST only generate a monitor update that indicates force-closure - we're
+ // called during initialization prior to the chain_monitor in the encompassing ChannelManager
+ // being fully configured in some cases. Thus, its likely any monitor events we generate will
+ // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
+ assert!(!matches!(self.channel_state, ChannelState::ShutdownComplete));
- channel_transaction_parameters: ChannelTransactionParameters {
- holder_pubkeys: pubkeys,
- holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
- is_outbound_from_holder: true,
- counterparty_parameters: None,
- funding_outpoint: None,
- opt_anchors: if channel_type.requires_anchors_zero_fee_htlc_tx() { Some(()) } else { None },
- opt_non_zero_fee_anchors: None
+ // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
+ // return them to fail the payment.
+ let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
+ let counterparty_node_id = self.get_counterparty_node_id();
+ for htlc_update in self.holding_cell_htlc_updates.drain(..) {
+ match htlc_update {
+ HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
+ dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
},
- funding_transaction: None,
+ _ => {}
+ }
+ }
+ let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
+ // If we haven't yet exchanged funding signatures (ie channel_state < AwaitingChannelReady),
+ // returning a channel monitor update here would imply a channel monitor update before
+ // we even registered the channel monitor to begin with, which is invalid.
+ // Thus, if we aren't actually at a point where we could conceivably broadcast the
+ // funding transaction, don't return a funding txo (which prevents providing the
+ // monitor update to the user, even if we return one).
+ // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
+ let generate_monitor_update = match self.channel_state {
+ ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)|ChannelState::ShutdownComplete => true,
+ _ => false,
+ };
+ if generate_monitor_update {
+ self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
+ Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
+ update_id: self.latest_monitor_update_id,
+ updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
+ }))
+ } else { None }
+ } else { None };
+ let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
- counterparty_cur_commitment_point: None,
- counterparty_prev_commitment_point: None,
- counterparty_node_id,
+ self.channel_state = ChannelState::ShutdownComplete;
+ self.update_time_counter += 1;
+ ShutdownResult {
+ monitor_update,
+ dropped_outbound_htlcs,
+ unbroadcasted_batch_funding_txid,
+ channel_id: self.channel_id,
+ counterparty_node_id: self.counterparty_node_id,
+ }
+ }
- counterparty_shutdown_scriptpubkey: None,
+ /// Only allowed after [`Self::channel_transaction_parameters`] is set.
+ fn get_funding_signed_msg<L: Deref>(&mut self, logger: &L) -> (CommitmentTransaction, Option<msgs::FundingSigned>) where L::Target: Logger {
+ let counterparty_keys = self.build_remote_transaction_keys();
+ let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number + 1, &counterparty_keys, false, false, logger).tx;
- commitment_secrets: CounterpartyCommitmentSecrets::new(),
+ let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
+ let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
+ log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
+ &self.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
+
+ match &self.holder_signer {
+ // TODO (arik): move match into calling method for Taproot
+ ChannelSignerType::Ecdsa(ecdsa) => {
+ let funding_signed = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.secp_ctx)
+ .map(|(signature, _)| msgs::FundingSigned {
+ channel_id: self.channel_id(),
+ signature,
+ #[cfg(taproot)]
+ partial_signature_with_nonce: None,
+ })
+ .ok();
+
+ if funding_signed.is_none() {
+ log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
+ self.signer_pending_funding = true;
+ } else if self.signer_pending_funding {
+ log_trace!(logger, "Counterparty commitment signature available for funding_signed message; clearing signer_pending_funding");
+ self.signer_pending_funding = false;
+ }
- channel_update_status: ChannelUpdateStatus::Enabled,
- closing_signed_in_flight: false,
+ // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
+ (counterparty_initial_commitment_tx, funding_signed)
+ },
+ // TODO (taproot|arik)
+ #[cfg(taproot)]
+ _ => todo!()
+ }
+ }
+}
- announcement_sigs: None,
+// Internal utility functions for channels
- #[cfg(any(test, fuzzing))]
- next_local_commitment_tx_fee_info_cached: Mutex::new(None),
- #[cfg(any(test, fuzzing))]
- next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
+/// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
+/// `channel_value_satoshis` in msat, set through
+/// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
+///
+/// The effective percentage is lower bounded by 1% and upper bounded by 100%.
+///
+/// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
+fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
+ let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
+ 1
+ } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
+ 100
+ } else {
+ config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
+ };
+ channel_value_satoshis * 10 * configured_percent
+}
- workaround_lnd_bug_4006: None,
- sent_message_awaiting_response: None,
+/// Returns a minimum channel reserve value the remote needs to maintain,
+/// required by us according to the configured or default
+/// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
+///
+/// Guaranteed to return a value no larger than channel_value_satoshis
+///
+/// This is used both for outbound and inbound channels and has lower bound
+/// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
+pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
+ let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
+ cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
+}
- latest_inbound_scid_alias: None,
- outbound_scid_alias,
+/// This is for legacy reasons, present for forward-compatibility.
+/// LDK versions older than 0.0.104 don't know how read/handle values other than default
+/// from storage. Hence, we use this function to not persist default values of
+/// `holder_selected_channel_reserve_satoshis` for channels into storage.
+pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
+ let (q, _) = channel_value_satoshis.overflowing_div(100);
+ cmp::min(channel_value_satoshis, cmp::max(q, 1000))
+}
- channel_pending_event_emitted: false,
- channel_ready_event_emitted: false,
+// Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
+// Note that num_htlcs should not include dust HTLCs.
+#[inline]
+fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
+ feerate_per_kw as u64 * (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
+}
- #[cfg(any(test, fuzzing))]
- historical_inbound_htlc_fulfills: HashSet::new(),
+// Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
+// Note that num_htlcs should not include dust HTLCs.
+pub(crate) fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
+ // Note that we need to divide before multiplying to round properly,
+ // since the lowest denomination of bitcoin on-chain is the satoshi.
+ (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
+}
- channel_type,
- channel_keys_id,
+// Holder designates channel data owned for the benefit of the user client.
+// Counterparty designates channel data owned by the another channel participant entity.
+pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
+ pub context: ChannelContext<SP>,
+}
- pending_monitor_updates: Vec::new(),
- }
- })
- }
+#[cfg(any(test, fuzzing))]
+struct CommitmentTxInfoCached {
+ fee: u64,
+ total_pending_htlcs: usize,
+ next_holder_htlc_id: u64,
+ next_counterparty_htlc_id: u64,
+ feerate: u32,
+}
- fn check_remote_fee<F: Deref, L: Deref>(fee_estimator: &LowerBoundedFeeEstimator<F>,
- feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L)
- -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
+impl<SP: Deref> Channel<SP> where
+ SP::Target: SignerProvider,
+ <SP::Target as SignerProvider>::EcdsaSigner: WriteableEcdsaChannelSigner
+{
+ fn check_remote_fee<F: Deref, L: Deref>(
+ channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
+ feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L
+ ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
{
- // We only bound the fee updates on the upper side to prevent completely absurd feerates,
- // always accepting up to 25 sat/vByte or 10x our fee estimator's "High Priority" fee.
- // We generally don't care too much if they set the feerate to something very high, but it
- // could result in the channel being useless due to everything being dust.
- let upper_limit = cmp::max(250 * 25,
- fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::HighPriority) as u64 * 10);
- if feerate_per_kw as u64 > upper_limit {
- return Err(ChannelError::Close(format!("Peer's feerate much too high. Actual: {}. Our expected upper limit: {}", feerate_per_kw, upper_limit)));
- }
- let lower_limit = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Background);
- // Some fee estimators round up to the next full sat/vbyte (ie 250 sats per kw), causing
- // occasional issues with feerate disagreements between an initiator that wants a feerate
- // of 1.1 sat/vbyte and a receiver that wants 1.1 rounded up to 2. Thus, we always add 250
- // sat/kw before the comparison here.
- if feerate_per_kw + 250 < lower_limit {
+ let lower_limit_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
+ ConfirmationTarget::MinAllowedAnchorChannelRemoteFee
+ } else {
+ ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee
+ };
+ let lower_limit = fee_estimator.bounded_sat_per_1000_weight(lower_limit_conf_target);
+ if feerate_per_kw < lower_limit {
if let Some(cur_feerate) = cur_feerate_per_kw {
if feerate_per_kw > cur_feerate {
log_warn!(logger,
return Ok(());
}
}
- return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {} (- 250)", feerate_per_kw, lower_limit)));
+ return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
}
Ok(())
}
- /// Creates a new channel from a remote sides' request for one.
- /// Assumes chain_hash has already been checked and corresponds with what we expect!
- pub fn new_from_req<ES: Deref, SP: Deref, F: Deref, L: Deref>(
- fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
- counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
- their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
- current_chain_height: u32, logger: &L, outbound_scid_alias: u64
- ) -> Result<Channel<Signer>, ChannelError>
- where ES::Target: EntropySource,
- SP::Target: SignerProvider<Signer = Signer>,
- F::Target: FeeEstimator,
- L::Target: Logger,
- {
- let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
+ #[inline]
+ fn get_closing_scriptpubkey(&self) -> ScriptBuf {
+ // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
+ // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
+ // outside of those situations will fail.
+ self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
+ }
- // First check the channel type is known, failing before we do anything else if we don't
- // support this channel type.
- let channel_type = if let Some(channel_type) = &msg.channel_type {
- if channel_type.supports_any_optional_bits() {
- return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
- }
+ #[inline]
+ fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
+ let mut ret =
+ (4 + // version
+ 1 + // input count
+ 36 + // prevout
+ 1 + // script length (0)
+ 4 + // sequence
+ 1 + // output count
+ 4 // lock time
+ )*4 + // * 4 for non-witness parts
+ 2 + // witness marker and flag
+ 1 + // witness element count
+ 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script)
+ self.context.get_funding_redeemscript().len() as u64 + // funding witness script
+ 2*(1 + 71); // two signatures + sighash type flags
+ if let Some(spk) = a_scriptpubkey {
+ ret += ((8+1) + // output values and script length
+ spk.len() as u64) * 4; // scriptpubkey and witness multiplier
+ }
+ if let Some(spk) = b_scriptpubkey {
+ ret += ((8+1) + // output values and script length
+ spk.len() as u64) * 4; // scriptpubkey and witness multiplier
+ }
+ ret
+ }
- // We only support the channel types defined by the `ChannelManager` in
- // `provided_channel_type_features`. The channel type must always support
- // `static_remote_key`.
- if !channel_type.requires_static_remote_key() {
- return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
- }
- // Make sure we support all of the features behind the channel type.
- if !channel_type.is_subset(our_supported_features) {
- return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
- }
- if channel_type.requires_scid_privacy() && announced_channel {
- return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
- }
- channel_type.clone()
- } else {
- let channel_type = ChannelTypeFeatures::from_init(&their_features);
- if channel_type != ChannelTypeFeatures::only_static_remote_key() {
- return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
- }
- channel_type
- };
- let opt_anchors = channel_type.supports_anchors_zero_fee_htlc_tx();
+ #[inline]
+ fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
+ assert!(self.context.pending_inbound_htlcs.is_empty());
+ assert!(self.context.pending_outbound_htlcs.is_empty());
+ assert!(self.context.pending_update_fee.is_none());
- let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
- let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
- let pubkeys = holder_signer.pubkeys().clone();
- let counterparty_pubkeys = ChannelPublicKeys {
- funding_pubkey: msg.funding_pubkey,
- revocation_basepoint: msg.revocation_basepoint,
- payment_point: msg.payment_point,
- delayed_payment_basepoint: msg.delayed_payment_basepoint,
- htlc_basepoint: msg.htlc_basepoint
- };
+ let mut total_fee_satoshis = proposed_total_fee_satoshis;
+ let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
+ let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
- if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
- return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
+ if value_to_holder < 0 {
+ assert!(self.context.is_outbound());
+ total_fee_satoshis += (-value_to_holder) as u64;
+ } else if value_to_counterparty < 0 {
+ assert!(!self.context.is_outbound());
+ total_fee_satoshis += (-value_to_counterparty) as u64;
}
- // Check sanity of message fields:
- if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis {
- return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis)));
- }
- if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
- return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
- }
- if msg.channel_reserve_satoshis > msg.funding_satoshis {
- return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
- }
- let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
- if msg.push_msat > full_channel_value_msat {
- return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat)));
- }
- if msg.dust_limit_satoshis > msg.funding_satoshis {
- return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis)));
- }
- if msg.htlc_minimum_msat >= full_channel_value_msat {
- return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
+ if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
+ value_to_counterparty = 0;
}
- Channel::<Signer>::check_remote_fee(fee_estimator, msg.feerate_per_kw, None, logger)?;
- let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
- if msg.to_self_delay > max_counterparty_selected_contest_delay {
- return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay)));
- }
- if msg.max_accepted_htlcs < 1 {
- return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
- }
- if msg.max_accepted_htlcs > MAX_HTLCS {
- return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
+ if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
+ value_to_holder = 0;
}
- // Now check against optional parameters as set by config...
- if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis {
- return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
- }
- if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
- return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
- }
- if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
- return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
- }
- if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
- return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
- }
- if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
- return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
- }
- if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
- return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
- }
- if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
- return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
- }
+ assert!(self.context.shutdown_scriptpubkey.is_some());
+ let holder_shutdown_script = self.get_closing_scriptpubkey();
+ let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
+ let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
- // Convert things into internal flags and prep our state:
+ let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
+ (closing_transaction, total_fee_satoshis)
+ }
- if config.channel_handshake_limits.force_announced_channel_preference {
- if config.channel_handshake_config.announced_channel != announced_channel {
- return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
- }
- }
+ fn funding_outpoint(&self) -> OutPoint {
+ self.context.channel_transaction_parameters.funding_outpoint.unwrap()
+ }
- let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config);
- if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
- // Protocol level safety check in place, although it should never happen because
- // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
- return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
- }
- if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
- return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
- }
- if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
- log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
- msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
- }
- if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis {
- return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
+ /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
+ /// entirely.
+ ///
+ /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
+ /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
+ ///
+ /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
+ /// disconnected).
+ pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
+ (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
+ where L::Target: Logger {
+ // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
+ // (see equivalent if condition there).
+ assert!(self.context.channel_state.should_force_holding_cell());
+ let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
+ let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
+ self.context.latest_monitor_update_id = mon_update_id;
+ if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
+ assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
}
+ }
- // check if the funder's amount for the initial commitment tx is sufficient
- // for full fee payment plus a few HTLCs to ensure the channel will be useful.
- let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
- let commitment_tx_fee = Self::commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, opt_anchors) / 1000;
- if funders_amount_msat / 1000 < commitment_tx_fee {
- return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", funders_amount_msat / 1000, commitment_tx_fee)));
+ fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
+ // Either ChannelReady got set (which means it won't be unset) or there is no way any
+ // caller thought we could have something claimed (cause we wouldn't have accepted in an
+ // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
+ // either.
+ if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
+ panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
}
- let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee;
- // While it's reasonable for us to not meet the channel reserve initially (if they don't
- // want to push much to us), our counterparty should always have more than our reserve.
- if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
- return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
- }
+ // ChannelManager may generate duplicate claims/fails due to HTLC update events from
+ // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
+ // these, but for now we just have to treat them as normal.
- let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
- match &msg.shutdown_scriptpubkey {
- &Some(ref script) => {
- // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
- if script.len() == 0 {
- None
- } else {
- if !script::is_bolt2_compliant(&script, their_features) {
- return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
+ let mut pending_idx = core::usize::MAX;
+ let mut htlc_value_msat = 0;
+ for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
+ if htlc.htlc_id == htlc_id_arg {
+ debug_assert_eq!(htlc.payment_hash, PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).to_byte_array()));
+ log_debug!(logger, "Claiming inbound HTLC id {} with payment hash {} with preimage {}",
+ htlc.htlc_id, htlc.payment_hash, payment_preimage_arg);
+ match htlc.state {
+ InboundHTLCState::Committed => {},
+ InboundHTLCState::LocalRemoved(ref reason) => {
+ if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
+ } else {
+ log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id());
+ debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
}
- Some(script.clone())
+ return UpdateFulfillFetch::DuplicateClaim {};
+ },
+ _ => {
+ debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
+ // Don't return in release mode here so that we can update channel_monitor
}
- },
- // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
- &None => {
- return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
}
+ pending_idx = idx;
+ htlc_value_msat = htlc.amount_msat;
+ break;
}
- } else { None };
+ }
+ if pending_idx == core::usize::MAX {
+ #[cfg(any(test, fuzzing))]
+ // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
+ // this is simply a duplicate claim, not previously failed and we lost funds.
+ debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
+ return UpdateFulfillFetch::DuplicateClaim {};
+ }
- let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
- match signer_provider.get_shutdown_scriptpubkey() {
- Ok(scriptpubkey) => Some(scriptpubkey),
- Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
+ // Now update local state:
+ //
+ // We have to put the payment_preimage in the channel_monitor right away here to ensure we
+ // can claim it even if the channel hits the chain before we see their next commitment.
+ self.context.latest_monitor_update_id += 1;
+ let monitor_update = ChannelMonitorUpdate {
+ update_id: self.context.latest_monitor_update_id,
+ updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
+ payment_preimage: payment_preimage_arg.clone(),
+ }],
+ };
+
+ if self.context.channel_state.should_force_holding_cell() {
+ // Note that this condition is the same as the assertion in
+ // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
+ // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
+ // do not not get into this branch.
+ for pending_update in self.context.holding_cell_htlc_updates.iter() {
+ match pending_update {
+ &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
+ if htlc_id_arg == htlc_id {
+ // Make sure we don't leave latest_monitor_update_id incremented here:
+ self.context.latest_monitor_update_id -= 1;
+ #[cfg(any(test, fuzzing))]
+ debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
+ return UpdateFulfillFetch::DuplicateClaim {};
+ }
+ },
+ &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
+ if htlc_id_arg == htlc_id {
+ log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
+ // TODO: We may actually be able to switch to a fulfill here, though its
+ // rare enough it may not be worth the complexity burden.
+ debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
+ return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
+ }
+ },
+ _ => {}
+ }
}
- } else { None };
+ log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state.to_u32());
+ self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
+ payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
+ });
+ #[cfg(any(test, fuzzing))]
+ self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
+ return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
+ }
+ #[cfg(any(test, fuzzing))]
+ self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
- if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
- if !shutdown_scriptpubkey.is_compatible(&their_features) {
- return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
+ {
+ let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
+ if let InboundHTLCState::Committed = htlc.state {
+ } else {
+ debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
+ return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
}
+ log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id);
+ htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
}
- let destination_script = match signer_provider.get_destination_script() {
- Ok(script) => script,
- Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
- };
-
- let mut secp_ctx = Secp256k1::new();
- secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
+ UpdateFulfillFetch::NewClaim {
+ monitor_update,
+ htlc_value_msat,
+ msg: Some(msgs::UpdateFulfillHTLC {
+ channel_id: self.context.channel_id(),
+ htlc_id: htlc_id_arg,
+ payment_preimage: payment_preimage_arg,
+ }),
+ }
+ }
- let chan = Channel {
- context: ChannelContext {
- user_id,
+ pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
+ let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
+ match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
+ UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
+ // Even if we aren't supposed to let new monitor updates with commitment state
+ // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
+ // matter what. Sadly, to push a new monitor update which flies before others
+ // already queued, we have to insert it into the pending queue and update the
+ // update_ids of all the following monitors.
+ if release_cs_monitor && msg.is_some() {
+ let mut additional_update = self.build_commitment_no_status_check(logger);
+ // build_commitment_no_status_check may bump latest_monitor_id but we want them
+ // to be strictly increasing by one, so decrement it here.
+ self.context.latest_monitor_update_id = monitor_update.update_id;
+ monitor_update.updates.append(&mut additional_update.updates);
+ } else {
+ let new_mon_id = self.context.blocked_monitor_updates.get(0)
+ .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
+ monitor_update.update_id = new_mon_id;
+ for held_update in self.context.blocked_monitor_updates.iter_mut() {
+ held_update.update.update_id += 1;
+ }
+ if msg.is_some() {
+ debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
+ let update = self.build_commitment_no_status_check(logger);
+ self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
+ update,
+ });
+ }
+ }
- config: LegacyChannelConfig {
- options: config.channel_config.clone(),
- announced_channel,
- commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
- },
+ self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
+ UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
+ },
+ UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
+ }
+ }
- prev_config: None,
+ /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
+ /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
+ /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
+ /// before we fail backwards.
+ ///
+ /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
+ /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
+ /// [`ChannelError::Ignore`].
+ pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
+ -> Result<(), ChannelError> where L::Target: Logger {
+ self.fail_htlc(htlc_id_arg, err_packet, true, logger)
+ .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
+ }
- inbound_handshake_limits_override: None,
+ /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
+ /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
+ /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
+ /// before we fail backwards.
+ ///
+ /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
+ /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
+ /// [`ChannelError::Ignore`].
+ fn fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, mut force_holding_cell: bool, logger: &L)
+ -> Result<Option<msgs::UpdateFailHTLC>, ChannelError> where L::Target: Logger {
+ if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
+ panic!("Was asked to fail an HTLC when channel was not in an operational state");
+ }
- temporary_channel_id: Some(msg.temporary_channel_id),
- channel_id: msg.temporary_channel_id,
- channel_state: (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32),
- announcement_sigs_state: AnnouncementSigsState::NotSent,
- secp_ctx,
+ // ChannelManager may generate duplicate claims/fails due to HTLC update events from
+ // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
+ // these, but for now we just have to treat them as normal.
- latest_monitor_update_id: 0,
+ let mut pending_idx = core::usize::MAX;
+ for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
+ if htlc.htlc_id == htlc_id_arg {
+ match htlc.state {
+ InboundHTLCState::Committed => {},
+ InboundHTLCState::LocalRemoved(ref reason) => {
+ if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
+ } else {
+ debug_assert!(false, "Tried to fail an HTLC that was already failed");
+ }
+ return Ok(None);
+ },
+ _ => {
+ debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
+ return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
+ }
+ }
+ pending_idx = idx;
+ }
+ }
+ if pending_idx == core::usize::MAX {
+ #[cfg(any(test, fuzzing))]
+ // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
+ // is simply a duplicate fail, not previously failed and we failed-back too early.
+ debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
+ return Ok(None);
+ }
- holder_signer,
- shutdown_scriptpubkey,
- destination_script,
+ if self.context.channel_state.should_force_holding_cell() {
+ debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
+ force_holding_cell = true;
+ }
- cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
- cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
- value_to_self_msat: msg.push_msat,
+ // Now update local state:
+ if force_holding_cell {
+ for pending_update in self.context.holding_cell_htlc_updates.iter() {
+ match pending_update {
+ &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
+ if htlc_id_arg == htlc_id {
+ #[cfg(any(test, fuzzing))]
+ debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
+ return Ok(None);
+ }
+ },
+ &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
+ if htlc_id_arg == htlc_id {
+ debug_assert!(false, "Tried to fail an HTLC that was already failed");
+ return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
+ }
+ },
+ _ => {}
+ }
+ }
+ log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
+ self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC {
+ htlc_id: htlc_id_arg,
+ err_packet,
+ });
+ return Ok(None);
+ }
- pending_inbound_htlcs: Vec::new(),
- pending_outbound_htlcs: Vec::new(),
- holding_cell_htlc_updates: Vec::new(),
- pending_update_fee: None,
- holding_cell_update_fee: None,
- next_holder_htlc_id: 0,
- next_counterparty_htlc_id: 0,
- update_time_counter: 1,
+ log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, &self.context.channel_id());
+ {
+ let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
+ htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone()));
+ }
- resend_order: RAACommitmentOrder::CommitmentFirst,
+ Ok(Some(msgs::UpdateFailHTLC {
+ channel_id: self.context.channel_id(),
+ htlc_id: htlc_id_arg,
+ reason: err_packet
+ }))
+ }
- monitor_pending_channel_ready: false,
- monitor_pending_revoke_and_ack: false,
- monitor_pending_commitment_signed: false,
- monitor_pending_forwards: Vec::new(),
- monitor_pending_failures: Vec::new(),
- monitor_pending_finalized_fulfills: Vec::new(),
+ // Message handlers:
+ /// Updates the state of the channel to indicate that all channels in the batch have received
+ /// funding_signed and persisted their monitors.
+ /// The funding transaction is consequently allowed to be broadcast, and the channel can be
+ /// treated as a non-batch channel going forward.
+ pub fn set_batch_ready(&mut self) {
+ self.context.is_batch_funding = None;
+ self.context.channel_state.clear_waiting_for_batch();
+ }
- #[cfg(debug_assertions)]
- holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
- #[cfg(debug_assertions)]
- counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
+ /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
+ /// and the channel is now usable (and public), this may generate an announcement_signatures to
+ /// reply with.
+ pub fn channel_ready<NS: Deref, L: Deref>(
+ &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash,
+ user_config: &UserConfig, best_block: &BestBlock, logger: &L
+ ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
+ where
+ NS::Target: NodeSigner,
+ L::Target: Logger
+ {
+ if self.context.channel_state.is_peer_disconnected() {
+ self.context.workaround_lnd_bug_4006 = Some(msg.clone());
+ return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
+ }
- last_sent_closing_fee: None,
- pending_counterparty_closing_signed: None,
- closing_fee_limits: None,
- target_closing_feerate_sats_per_kw: None,
+ if let Some(scid_alias) = msg.short_channel_id_alias {
+ if Some(scid_alias) != self.context.short_channel_id {
+ // The scid alias provided can be used to route payments *from* our counterparty,
+ // i.e. can be used for inbound payments and provided in invoices, but is not used
+ // when routing outbound payments.
+ self.context.latest_inbound_scid_alias = Some(scid_alias);
+ }
+ }
- inbound_awaiting_accept: true,
+ // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
+ // batch, but we can receive channel_ready messages.
+ let mut check_reconnection = false;
+ match &self.context.channel_state {
+ ChannelState::AwaitingChannelReady(flags) => {
+ let flags = *flags & !FundedStateFlags::ALL;
+ debug_assert!(!flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY) || !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
+ if flags & !AwaitingChannelReadyFlags::WAITING_FOR_BATCH == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY {
+ // If we reconnected before sending our `channel_ready` they may still resend theirs.
+ check_reconnection = true;
+ } else if (flags & !AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty() {
+ self.context.channel_state.set_their_channel_ready();
+ } else if flags == AwaitingChannelReadyFlags::OUR_CHANNEL_READY {
+ self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
+ self.context.update_time_counter += 1;
+ } else {
+ // We're in `WAITING_FOR_BATCH`, so we should wait until we're ready.
+ debug_assert!(flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
+ }
+ }
+ // If we reconnected before sending our `channel_ready` they may still resend theirs.
+ ChannelState::ChannelReady(_) => check_reconnection = true,
+ _ => return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned())),
+ }
+ if check_reconnection {
+ // They probably disconnected/reconnected and re-sent the channel_ready, which is
+ // required, or they're sending a fresh SCID alias.
+ let expected_point =
+ if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
+ // If they haven't ever sent an updated point, the point they send should match
+ // the current one.
+ self.context.counterparty_cur_commitment_point
+ } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
+ // If we've advanced the commitment number once, the second commitment point is
+ // at `counterparty_prev_commitment_point`, which is not yet revoked.
+ debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
+ self.context.counterparty_prev_commitment_point
+ } else {
+ // If they have sent updated points, channel_ready is always supposed to match
+ // their "first" point, which we re-derive here.
+ Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
+ &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
+ ).expect("We already advanced, so previous secret keys should have been validated already")))
+ };
+ if expected_point != Some(msg.next_per_commitment_point) {
+ return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
+ }
+ return Ok(None);
+ }
- funding_tx_confirmed_in: None,
- funding_tx_confirmation_height: 0,
- short_channel_id: None,
- channel_creation_height: current_chain_height,
+ self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
+ self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
- feerate_per_kw: msg.feerate_per_kw,
- channel_value_satoshis: msg.funding_satoshis,
- counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
- holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
- counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
- holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config),
- counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
- holder_selected_channel_reserve_satoshis,
- counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
- holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
- counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
- holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
- minimum_depth: Some(cmp::max(config.channel_handshake_config.minimum_depth, 1)),
+ log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
- counterparty_forwarding_info: None,
+ Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger))
+ }
- channel_transaction_parameters: ChannelTransactionParameters {
- holder_pubkeys: pubkeys,
- holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
- is_outbound_from_holder: false,
- counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
- selected_contest_delay: msg.to_self_delay,
- pubkeys: counterparty_pubkeys,
- }),
- funding_outpoint: None,
- opt_anchors: if opt_anchors { Some(()) } else { None },
- opt_non_zero_fee_anchors: None
- },
- funding_transaction: None,
-
- counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
- counterparty_prev_commitment_point: None,
- counterparty_node_id,
-
- counterparty_shutdown_scriptpubkey,
-
- commitment_secrets: CounterpartyCommitmentSecrets::new(),
-
- channel_update_status: ChannelUpdateStatus::Enabled,
- closing_signed_in_flight: false,
+ pub fn update_add_htlc<F, FE: Deref, L: Deref>(
+ &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
+ create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
+ ) -> Result<(), ChannelError>
+ where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
+ FE::Target: FeeEstimator, L::Target: Logger,
+ {
+ if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
+ return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
+ }
+ // We can't accept HTLCs sent after we've sent a shutdown.
+ if self.context.channel_state.is_local_shutdown_sent() {
+ pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
+ }
+ // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
+ if self.context.channel_state.is_remote_shutdown_sent() {
+ return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
+ }
+ if self.context.channel_state.is_peer_disconnected() {
+ return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
+ }
+ if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
+ return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
+ }
+ if msg.amount_msat == 0 {
+ return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
+ }
+ if msg.amount_msat < self.context.holder_htlc_minimum_msat {
+ return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
+ }
- announcement_sigs: None,
+ let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
+ let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
+ if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
+ return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
+ }
+ if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
+ return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
+ }
- #[cfg(any(test, fuzzing))]
- next_local_commitment_tx_fee_info_cached: Mutex::new(None),
- #[cfg(any(test, fuzzing))]
- next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
+ // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
+ // the reserve_satoshis we told them to always have as direct payment so that they lose
+ // something if we punish them for broadcasting an old state).
+ // Note that we don't really care about having a small/no to_remote output in our local
+ // commitment transactions, as the purpose of the channel reserve is to ensure we can
+ // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
+ // present in the next commitment transaction we send them (at least for fulfilled ones,
+ // failed ones won't modify value_to_self).
+ // Note that we will send HTLCs which another instance of rust-lightning would think
+ // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
+ // Channel state once they will not be present in the next received commitment
+ // transaction).
+ let mut removed_outbound_total_msat = 0;
+ for ref htlc in self.context.pending_outbound_htlcs.iter() {
+ if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
+ removed_outbound_total_msat += htlc.amount_msat;
+ } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
+ removed_outbound_total_msat += htlc.amount_msat;
+ }
+ }
- workaround_lnd_bug_4006: None,
- sent_message_awaiting_response: None,
+ let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
+ let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+ (0, 0)
+ } else {
+ let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
+ (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
+ dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
+ };
+ let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
+ if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
+ let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
+ if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
+ log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
+ on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
+ pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
+ }
+ }
- latest_inbound_scid_alias: None,
- outbound_scid_alias,
+ let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
+ if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
+ let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
+ if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
+ log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
+ on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
+ pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
+ }
+ }
- channel_pending_event_emitted: false,
- channel_ready_event_emitted: false,
+ let pending_value_to_self_msat =
+ self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
+ let pending_remote_value_msat =
+ self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
+ if pending_remote_value_msat < msg.amount_msat {
+ return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
+ }
- #[cfg(any(test, fuzzing))]
- historical_inbound_htlc_fulfills: HashSet::new(),
+ // Check that the remote can afford to pay for this HTLC on-chain at the current
+ // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
+ {
+ let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
+ let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
+ self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
+ };
+ let anchor_outputs_value_msat = if !self.context.is_outbound() && self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+ ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
+ } else {
+ 0
+ };
+ if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(anchor_outputs_value_msat) < remote_commit_tx_fee_msat {
+ return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
+ };
+ if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(remote_commit_tx_fee_msat).saturating_sub(anchor_outputs_value_msat) < self.context.holder_selected_channel_reserve_satoshis * 1000 {
+ return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
+ }
+ }
- channel_type,
- channel_keys_id,
+ let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+ ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
+ } else {
+ 0
+ };
+ if !self.context.is_outbound() {
+ // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
+ // the spec because the fee spike buffer requirement doesn't exist on the receiver's
+ // side, only on the sender's. Note that with anchor outputs we are no longer as
+ // sensitive to fee spikes, so we need to account for them.
+ let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
+ let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
+ if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+ remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
+ }
+ if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
+ // Note that if the pending_forward_status is not updated here, then it's because we're already failing
+ // the HTLC, i.e. its status is already set to failing.
+ log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
+ pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
+ }
+ } else {
+ // Check that they won't violate our local required channel reserve by adding this HTLC.
+ let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
+ let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
+ if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat + anchor_outputs_value_msat {
+ return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
+ }
+ }
+ if self.context.next_counterparty_htlc_id != msg.htlc_id {
+ return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
+ }
+ if msg.cltv_expiry >= 500000000 {
+ return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
+ }
- pending_monitor_updates: Vec::new(),
+ if self.context.channel_state.is_local_shutdown_sent() {
+ if let PendingHTLCStatus::Forward(_) = pending_forward_status {
+ panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
}
- };
+ }
- Ok(chan)
+ // Now update local state:
+ self.context.next_counterparty_htlc_id += 1;
+ self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
+ htlc_id: msg.htlc_id,
+ amount_msat: msg.amount_msat,
+ payment_hash: msg.payment_hash,
+ cltv_expiry: msg.cltv_expiry,
+ state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
+ });
+ Ok(())
}
+ /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
#[inline]
- fn get_closing_scriptpubkey(&self) -> Script {
- // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
- // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
- // outside of those situations will fail.
- self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
+ fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
+ assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
+ for htlc in self.context.pending_outbound_htlcs.iter_mut() {
+ if htlc.htlc_id == htlc_id {
+ let outcome = match check_preimage {
+ None => fail_reason.into(),
+ Some(payment_preimage) => {
+ let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array());
+ if payment_hash != htlc.payment_hash {
+ return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
+ }
+ OutboundHTLCOutcome::Success(Some(payment_preimage))
+ }
+ };
+ match htlc.state {
+ OutboundHTLCState::LocalAnnounced(_) =>
+ return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
+ OutboundHTLCState::Committed => {
+ htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
+ },
+ OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
+ return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
+ }
+ return Ok(htlc);
+ }
+ }
+ Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
}
- #[inline]
- fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
- let mut ret =
- (4 + // version
- 1 + // input count
- 36 + // prevout
- 1 + // script length (0)
- 4 + // sequence
- 1 + // output count
- 4 // lock time
- )*4 + // * 4 for non-witness parts
- 2 + // witness marker and flag
- 1 + // witness element count
- 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script)
- self.context.get_funding_redeemscript().len() as u64 + // funding witness script
- 2*(1 + 71); // two signatures + sighash type flags
- if let Some(spk) = a_scriptpubkey {
- ret += ((8+1) + // output values and script length
- spk.len() as u64) * 4; // scriptpubkey and witness multiplier
+ pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
+ if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
+ return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
}
- if let Some(spk) = b_scriptpubkey {
- ret += ((8+1) + // output values and script length
- spk.len() as u64) * 4; // scriptpubkey and witness multiplier
+ if self.context.channel_state.is_peer_disconnected() {
+ return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
}
- ret
+
+ self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
}
- #[inline]
- fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
- assert!(self.context.pending_inbound_htlcs.is_empty());
- assert!(self.context.pending_outbound_htlcs.is_empty());
- assert!(self.context.pending_update_fee.is_none());
+ pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
+ if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
+ return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
+ }
+ if self.context.channel_state.is_peer_disconnected() {
+ return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
+ }
- let mut total_fee_satoshis = proposed_total_fee_satoshis;
- let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
- let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
+ self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
+ Ok(())
+ }
- if value_to_holder < 0 {
- assert!(self.context.is_outbound());
- total_fee_satoshis += (-value_to_holder) as u64;
- } else if value_to_counterparty < 0 {
- assert!(!self.context.is_outbound());
- total_fee_satoshis += (-value_to_counterparty) as u64;
+ pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
+ if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
+ return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
}
-
- if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
- value_to_counterparty = 0;
+ if self.context.channel_state.is_peer_disconnected() {
+ return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
}
- if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
- value_to_holder = 0;
+ self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
+ Ok(())
+ }
+
+ pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
+ where L::Target: Logger
+ {
+ if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
+ return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
+ }
+ if self.context.channel_state.is_peer_disconnected() {
+ return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
+ }
+ if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
+ return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
}
- assert!(self.context.shutdown_scriptpubkey.is_some());
- let holder_shutdown_script = self.get_closing_scriptpubkey();
- let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
- let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
+ let funding_script = self.context.get_funding_redeemscript();
- let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
- (closing_transaction, total_fee_satoshis)
- }
+ let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
- fn funding_outpoint(&self) -> OutPoint {
- self.context.channel_transaction_parameters.funding_outpoint.unwrap()
- }
+ let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
+ let commitment_txid = {
+ let trusted_tx = commitment_stats.tx.trust();
+ let bitcoin_tx = trusted_tx.built_transaction();
+ let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
- /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
- /// entirely.
- ///
- /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
- /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
- ///
- /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
- /// disconnected).
- pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
- (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
- where L::Target: Logger {
- // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
- // (see equivalent if condition there).
- assert!(self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0);
- let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
- let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
- self.context.latest_monitor_update_id = mon_update_id;
- if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
- assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
- }
- }
+ log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
+ log_bytes!(msg.signature.serialize_compact()[..]),
+ log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
+ log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
+ if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
+ return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
+ }
+ bitcoin_tx.txid
+ };
+ let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
- fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
- // Either ChannelReady got set (which means it won't be unset) or there is no way any
- // caller thought we could have something claimed (cause we wouldn't have accepted in an
- // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
- // either.
- if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
- panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
+ // If our counterparty updated the channel fee in this commitment transaction, check that
+ // they can actually afford the new fee now.
+ let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
+ update_state == FeeUpdateState::RemoteAnnounced
+ } else { false };
+ if update_fee {
+ debug_assert!(!self.context.is_outbound());
+ let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
+ if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
+ return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
+ }
}
- assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
-
- let payment_hash_calc = PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).into_inner());
-
- // ChannelManager may generate duplicate claims/fails due to HTLC update events from
- // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
- // these, but for now we just have to treat them as normal.
-
- let mut pending_idx = core::usize::MAX;
- let mut htlc_value_msat = 0;
- for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
- if htlc.htlc_id == htlc_id_arg {
- assert_eq!(htlc.payment_hash, payment_hash_calc);
- match htlc.state {
- InboundHTLCState::Committed => {},
- InboundHTLCState::LocalRemoved(ref reason) => {
- if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
- } else {
- log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id()));
- debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
+ #[cfg(any(test, fuzzing))]
+ {
+ if self.context.is_outbound() {
+ let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
+ *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
+ if let Some(info) = projected_commit_tx_info {
+ let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
+ + self.context.holding_cell_htlc_updates.len();
+ if info.total_pending_htlcs == total_pending_htlcs
+ && info.next_holder_htlc_id == self.context.next_holder_htlc_id
+ && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
+ && info.feerate == self.context.feerate_per_kw {
+ assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
}
- return UpdateFulfillFetch::DuplicateClaim {};
- },
- _ => {
- debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
- // Don't return in release mode here so that we can update channel_monitor
- }
}
- pending_idx = idx;
- htlc_value_msat = htlc.amount_msat;
- break;
}
}
- if pending_idx == core::usize::MAX {
- #[cfg(any(test, fuzzing))]
- // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
- // this is simply a duplicate claim, not previously failed and we lost funds.
- debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
- return UpdateFulfillFetch::DuplicateClaim {};
+
+ if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
+ return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
}
- // Now update local state:
- //
- // We have to put the payment_preimage in the channel_monitor right away here to ensure we
- // can claim it even if the channel hits the chain before we see their next commitment.
- self.context.latest_monitor_update_id += 1;
- let monitor_update = ChannelMonitorUpdate {
- update_id: self.context.latest_monitor_update_id,
- updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
- payment_preimage: payment_preimage_arg.clone(),
- }],
- };
+ // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
+ // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
+ // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
+ // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
+ // backwards compatibility, we never use it in production. To provide test coverage, here,
+ // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
+ #[allow(unused_assignments, unused_mut)]
+ let mut separate_nondust_htlc_sources = false;
+ #[cfg(all(feature = "std", any(test, fuzzing)))] {
+ use core::hash::{BuildHasher, Hasher};
+ // Get a random value using the only std API to do so - the DefaultHasher
+ let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
+ separate_nondust_htlc_sources = rand_val % 2 == 0;
+ }
- if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
- // Note that this condition is the same as the assertion in
- // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
- // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
- // do not not get into this branch.
- for pending_update in self.context.holding_cell_htlc_updates.iter() {
- match pending_update {
- &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
- if htlc_id_arg == htlc_id {
- // Make sure we don't leave latest_monitor_update_id incremented here:
- self.context.latest_monitor_update_id -= 1;
- #[cfg(any(test, fuzzing))]
- debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
- return UpdateFulfillFetch::DuplicateClaim {};
- }
- },
- &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
- if htlc_id_arg == htlc_id {
- log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", log_bytes!(self.context.channel_id()));
- // TODO: We may actually be able to switch to a fulfill here, though its
- // rare enough it may not be worth the complexity burden.
- debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
- return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
- }
- },
- _ => {}
+ let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
+ let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
+ for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
+ if let Some(_) = htlc.transaction_output_index {
+ let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
+ self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, &self.context.channel_type,
+ &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
+
+ let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys);
+ let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
+ let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
+ log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
+ log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.to_public_key().serialize()),
+ encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
+ if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key.to_public_key()) {
+ return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
+ }
+ if !separate_nondust_htlc_sources {
+ htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
}
+ } else {
+ htlcs_and_sigs.push((htlc, None, source_opt.take()));
}
- log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", log_bytes!(self.context.channel_id()), self.context.channel_state);
- self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
- payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
- });
- #[cfg(any(test, fuzzing))]
- self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
- return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
+ if separate_nondust_htlc_sources {
+ if let Some(source) = source_opt.take() {
+ nondust_htlc_sources.push(source);
+ }
+ }
+ debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
}
- #[cfg(any(test, fuzzing))]
- self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
- {
- let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
- if let InboundHTLCState::Committed = htlc.state {
- } else {
- debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
- return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
+ let holder_commitment_tx = HolderCommitmentTransaction::new(
+ commitment_stats.tx,
+ msg.signature,
+ msg.htlc_signatures.clone(),
+ &self.context.get_holder_pubkeys().funding_pubkey,
+ self.context.counterparty_funding_pubkey()
+ );
+
+ self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.outbound_htlc_preimages)
+ .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
+
+ // Update state now that we've passed all the can-fail calls...
+ let mut need_commitment = false;
+ if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
+ if *update_state == FeeUpdateState::RemoteAnnounced {
+ *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
+ need_commitment = true;
}
- log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id));
- htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
}
- UpdateFulfillFetch::NewClaim {
- monitor_update,
- htlc_value_msat,
- msg: Some(msgs::UpdateFulfillHTLC {
- channel_id: self.context.channel_id(),
- htlc_id: htlc_id_arg,
- payment_preimage: payment_preimage_arg,
- }),
+ for htlc in self.context.pending_inbound_htlcs.iter_mut() {
+ let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
+ Some(forward_info.clone())
+ } else { None };
+ if let Some(forward_info) = new_forward {
+ log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
+ &htlc.payment_hash, &self.context.channel_id);
+ htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
+ need_commitment = true;
+ }
+ }
+ let mut claimed_htlcs = Vec::new();
+ for htlc in self.context.pending_outbound_htlcs.iter_mut() {
+ if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
+ log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
+ &htlc.payment_hash, &self.context.channel_id);
+ // Grab the preimage, if it exists, instead of cloning
+ let mut reason = OutboundHTLCOutcome::Success(None);
+ mem::swap(outcome, &mut reason);
+ if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
+ // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
+ // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
+ // have a `Success(None)` reason. In this case we could forget some HTLC
+ // claims, but such an upgrade is unlikely and including claimed HTLCs here
+ // fixes a bug which the user was exposed to on 0.0.104 when they started the
+ // claim anyway.
+ claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
+ }
+ htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
+ need_commitment = true;
+ }
}
- }
- pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
- let release_cs_monitor = self.context.pending_monitor_updates.iter().all(|upd| !upd.blocked);
- match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
- UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
- // Even if we aren't supposed to let new monitor updates with commitment state
- // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
- // matter what. Sadly, to push a new monitor update which flies before others
- // already queued, we have to insert it into the pending queue and update the
- // update_ids of all the following monitors.
- let unblocked_update_pos = if release_cs_monitor && msg.is_some() {
- let mut additional_update = self.build_commitment_no_status_check(logger);
- // build_commitment_no_status_check may bump latest_monitor_id but we want them
- // to be strictly increasing by one, so decrement it here.
- self.context.latest_monitor_update_id = monitor_update.update_id;
- monitor_update.updates.append(&mut additional_update.updates);
- self.context.pending_monitor_updates.push(PendingChannelMonitorUpdate {
- update: monitor_update, blocked: false,
- });
- self.context.pending_monitor_updates.len() - 1
- } else {
- let insert_pos = self.context.pending_monitor_updates.iter().position(|upd| upd.blocked)
- .unwrap_or(self.context.pending_monitor_updates.len());
- let new_mon_id = self.context.pending_monitor_updates.get(insert_pos)
- .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
- monitor_update.update_id = new_mon_id;
- self.context.pending_monitor_updates.insert(insert_pos, PendingChannelMonitorUpdate {
- update: monitor_update, blocked: false,
- });
- for held_update in self.context.pending_monitor_updates.iter_mut().skip(insert_pos + 1) {
- held_update.update.update_id += 1;
- }
- if msg.is_some() {
- debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
- let update = self.build_commitment_no_status_check(logger);
- self.context.pending_monitor_updates.push(PendingChannelMonitorUpdate {
- update, blocked: true,
- });
- }
- insert_pos
- };
- self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
- UpdateFulfillCommitFetch::NewClaim {
- monitor_update: &self.context.pending_monitor_updates.get(unblocked_update_pos)
- .expect("We just pushed the monitor update").update,
- htlc_value_msat,
- }
- },
- UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
+ self.context.latest_monitor_update_id += 1;
+ let mut monitor_update = ChannelMonitorUpdate {
+ update_id: self.context.latest_monitor_update_id,
+ updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
+ commitment_tx: holder_commitment_tx,
+ htlc_outputs: htlcs_and_sigs,
+ claimed_htlcs,
+ nondust_htlc_sources,
+ }]
+ };
+
+ self.context.cur_holder_commitment_transaction_number -= 1;
+ self.context.expecting_peer_commitment_signed = false;
+ // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
+ // build_commitment_no_status_check() next which will reset this to RAAFirst.
+ self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
+
+ if self.context.channel_state.is_monitor_update_in_progress() {
+ // In case we initially failed monitor updating without requiring a response, we need
+ // to make sure the RAA gets sent first.
+ self.context.monitor_pending_revoke_and_ack = true;
+ if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
+ // If we were going to send a commitment_signed after the RAA, go ahead and do all
+ // the corresponding HTLC status updates so that
+ // get_last_commitment_update_for_send includes the right HTLCs.
+ self.context.monitor_pending_commitment_signed = true;
+ let mut additional_update = self.build_commitment_no_status_check(logger);
+ // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
+ // strictly increasing by one, so decrement it here.
+ self.context.latest_monitor_update_id = monitor_update.update_id;
+ monitor_update.updates.append(&mut additional_update.updates);
+ }
+ log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
+ &self.context.channel_id);
+ return Ok(self.push_ret_blockable_mon_update(monitor_update));
}
+
+ let need_commitment_signed = if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
+ // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
+ // we'll send one right away when we get the revoke_and_ack when we
+ // free_holding_cell_htlcs().
+ let mut additional_update = self.build_commitment_no_status_check(logger);
+ // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
+ // strictly increasing by one, so decrement it here.
+ self.context.latest_monitor_update_id = monitor_update.update_id;
+ monitor_update.updates.append(&mut additional_update.updates);
+ true
+ } else { false };
+
+ log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
+ &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" });
+ self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
+ return Ok(self.push_ret_blockable_mon_update(monitor_update));
}
- /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
- /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
- /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
- /// before we fail backwards.
- ///
- /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
- /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
- /// [`ChannelError::Ignore`].
- pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
- -> Result<(), ChannelError> where L::Target: Logger {
- self.fail_htlc(htlc_id_arg, err_packet, true, logger)
- .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
+ /// Public version of the below, checking relevant preconditions first.
+ /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
+ /// returns `(None, Vec::new())`.
+ pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
+ &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+ ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
+ where F::Target: FeeEstimator, L::Target: Logger
+ {
+ if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && !self.context.channel_state.should_force_holding_cell() {
+ self.free_holding_cell_htlcs(fee_estimator, logger)
+ } else { (None, Vec::new()) }
}
- /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
- /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
- /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
- /// before we fail backwards.
- ///
- /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
- /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
- /// [`ChannelError::Ignore`].
- fn fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, mut force_holding_cell: bool, logger: &L)
- -> Result<Option<msgs::UpdateFailHTLC>, ChannelError> where L::Target: Logger {
- if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
- panic!("Was asked to fail an HTLC when channel was not in an operational state");
- }
- assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
+ /// Frees any pending commitment updates in the holding cell, generating the relevant messages
+ /// for our counterparty.
+ fn free_holding_cell_htlcs<F: Deref, L: Deref>(
+ &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+ ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
+ where F::Target: FeeEstimator, L::Target: Logger
+ {
+ assert!(!self.context.channel_state.is_monitor_update_in_progress());
+ if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
+ log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
+ if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
- // ChannelManager may generate duplicate claims/fails due to HTLC update events from
- // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
- // these, but for now we just have to treat them as normal.
+ let mut monitor_update = ChannelMonitorUpdate {
+ update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
+ updates: Vec::new(),
+ };
- let mut pending_idx = core::usize::MAX;
- for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
- if htlc.htlc_id == htlc_id_arg {
- match htlc.state {
- InboundHTLCState::Committed => {},
- InboundHTLCState::LocalRemoved(ref reason) => {
- if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
- } else {
- debug_assert!(false, "Tried to fail an HTLC that was already failed");
+ let mut htlc_updates = Vec::new();
+ mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
+ let mut update_add_count = 0;
+ let mut update_fulfill_count = 0;
+ let mut update_fail_count = 0;
+ let mut htlcs_to_fail = Vec::new();
+ for htlc_update in htlc_updates.drain(..) {
+ // Note that this *can* fail, though it should be due to rather-rare conditions on
+ // fee races with adding too many outputs which push our total payments just over
+ // the limit. In case it's less rare than I anticipate, we may want to revisit
+ // handling this case better and maybe fulfilling some of the HTLCs while attempting
+ // to rebalance channels.
+ match &htlc_update {
+ &HTLCUpdateAwaitingACK::AddHTLC {
+ amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
+ skimmed_fee_msat, blinding_point, ..
+ } => {
+ match self.send_htlc(
+ amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(),
+ false, skimmed_fee_msat, blinding_point, fee_estimator, logger
+ ) {
+ Ok(_) => update_add_count += 1,
+ Err(e) => {
+ match e {
+ ChannelError::Ignore(ref msg) => {
+ log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id());
+ // If we fail to send here, then this HTLC should
+ // be failed backwards. Failing to send here
+ // indicates that this HTLC may keep being put back
+ // into the holding cell without ever being
+ // successfully forwarded/failed/fulfilled, causing
+ // our counterparty to eventually close on us.
+ htlcs_to_fail.push((source.clone(), *payment_hash));
+ },
+ _ => {
+ panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
+ },
+ }
+ }
}
- return Ok(None);
},
- _ => {
- debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
- return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
- }
- }
- pending_idx = idx;
- }
- }
- if pending_idx == core::usize::MAX {
- #[cfg(any(test, fuzzing))]
- // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
- // is simply a duplicate fail, not previously failed and we failed-back too early.
- debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
- return Ok(None);
- }
-
- if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
- debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
- force_holding_cell = true;
- }
-
- // Now update local state:
- if force_holding_cell {
- for pending_update in self.context.holding_cell_htlc_updates.iter() {
- match pending_update {
- &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
- if htlc_id_arg == htlc_id {
- #[cfg(any(test, fuzzing))]
- debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
- return Ok(None);
- }
+ &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
+ // If an HTLC claim was previously added to the holding cell (via
+ // `get_update_fulfill_htlc`, then generating the claim message itself must
+ // not fail - any in between attempts to claim the HTLC will have resulted
+ // in it hitting the holding cell again and we cannot change the state of a
+ // holding cell HTLC from fulfill to anything else.
+ let mut additional_monitor_update =
+ if let UpdateFulfillFetch::NewClaim { monitor_update, .. } =
+ self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger)
+ { monitor_update } else { unreachable!() };
+ update_fulfill_count += 1;
+ monitor_update.updates.append(&mut additional_monitor_update.updates);
},
- &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
- if htlc_id_arg == htlc_id {
- debug_assert!(false, "Tried to fail an HTLC that was already failed");
- return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
+ &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
+ match self.fail_htlc(htlc_id, err_packet.clone(), false, logger) {
+ Ok(update_fail_msg_option) => {
+ // If an HTLC failure was previously added to the holding cell (via
+ // `queue_fail_htlc`) then generating the fail message itself must
+ // not fail - we should never end up in a state where we double-fail
+ // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
+ // for a full revocation before failing.
+ debug_assert!(update_fail_msg_option.is_some());
+ update_fail_count += 1;
+ },
+ Err(e) => {
+ if let ChannelError::Ignore(_) = e {}
+ else {
+ panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
+ }
+ }
}
},
- _ => {}
}
}
- log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, log_bytes!(self.context.channel_id()));
- self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC {
- htlc_id: htlc_id_arg,
- err_packet,
- });
- return Ok(None);
- }
-
- log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, log_bytes!(self.context.channel_id()));
- {
- let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
- htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone()));
- }
+ if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
+ return (None, htlcs_to_fail);
+ }
+ let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
+ self.send_update_fee(feerate, false, fee_estimator, logger)
+ } else {
+ None
+ };
- Ok(Some(msgs::UpdateFailHTLC {
- channel_id: self.context.channel_id(),
- htlc_id: htlc_id_arg,
- reason: err_packet
- }))
- }
+ let mut additional_update = self.build_commitment_no_status_check(logger);
+ // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
+ // but we want them to be strictly increasing by one, so reset it here.
+ self.context.latest_monitor_update_id = monitor_update.update_id;
+ monitor_update.updates.append(&mut additional_update.updates);
- // Message handlers:
+ log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
+ &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" },
+ update_add_count, update_fulfill_count, update_fail_count);
- pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
- let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
+ self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
+ (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
+ } else {
+ (None, Vec::new())
+ }
+ }
- // Check sanity of message fields:
- if !self.context.is_outbound() {
- return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
+ /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
+ /// commitment_signed message here in case we had pending outbound HTLCs to add which were
+ /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
+ /// generating an appropriate error *after* the channel state has been updated based on the
+ /// revoke_and_ack message.
+ pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
+ fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L, hold_mon_update: bool,
+ ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
+ where F::Target: FeeEstimator, L::Target: Logger,
+ {
+ if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
+ return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
}
- if self.context.channel_state != ChannelState::OurInitSent as u32 {
- return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
+ if self.context.channel_state.is_peer_disconnected() {
+ return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
}
- if msg.dust_limit_satoshis > 21000000 * 100000000 {
- return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis)));
+ if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
+ return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
}
- if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
- return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
- }
- if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
- return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
- }
- if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
- return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
- msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
- }
- let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
- if msg.htlc_minimum_msat >= full_channel_value_msat {
- return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
- }
- let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
- if msg.to_self_delay > max_delay_acceptable {
- return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay)));
- }
- if msg.max_accepted_htlcs < 1 {
- return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
- }
- if msg.max_accepted_htlcs > MAX_HTLCS {
- return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
+
+ let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
+
+ if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
+ if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
+ return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
+ }
}
- // Now check against optional parameters as set by config...
- if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
- return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
- }
- if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
- return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
- }
- if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
- return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
- }
- if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
- return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
- }
- if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
- return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
- }
- if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
- return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
+ if !self.context.channel_state.is_awaiting_remote_revoke() {
+ // Our counterparty seems to have burned their coins to us (by revoking a state when we
+ // haven't given them a new commitment transaction to broadcast). We should probably
+ // take advantage of this by updating our channel monitor, sending them an error, and
+ // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
+ // lot of work, and there's some chance this is all a misunderstanding anyway.
+ // We have to do *something*, though, since our signer may get mad at us for otherwise
+ // jumping a remote commitment number, so best to just force-close and move on.
+ return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
}
- if msg.minimum_depth > peer_limits.max_minimum_depth {
- return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth)));
+
+ #[cfg(any(test, fuzzing))]
+ {
+ *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
+ *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
}
- if let Some(ty) = &msg.channel_type {
- if *ty != self.context.channel_type {
- return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
- }
- } else if their_features.supports_channel_type() {
- // Assume they've accepted the channel type as they said they understand it.
- } else {
- let channel_type = ChannelTypeFeatures::from_init(&their_features);
- if channel_type != ChannelTypeFeatures::only_static_remote_key() {
- return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
- }
- self.context.channel_type = channel_type;
+ match &self.context.holder_signer {
+ ChannelSignerType::Ecdsa(ecdsa) => {
+ ecdsa.validate_counterparty_revocation(
+ self.context.cur_counterparty_commitment_transaction_number + 1,
+ &secret
+ ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
+ },
+ // TODO (taproot|arik)
+ #[cfg(taproot)]
+ _ => todo!()
+ };
+
+ self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
+ .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
+ self.context.latest_monitor_update_id += 1;
+ let mut monitor_update = ChannelMonitorUpdate {
+ update_id: self.context.latest_monitor_update_id,
+ updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
+ idx: self.context.cur_counterparty_commitment_transaction_number + 1,
+ secret: msg.per_commitment_secret,
+ }],
+ };
+
+ // Update state now that we've passed all the can-fail calls...
+ // (note that we may still fail to generate the new commitment_signed message, but that's
+ // OK, we step the channel here and *then* if the new generation fails we can fail the
+ // channel based on that, but stepping stuff here should be safe either way.
+ self.context.channel_state.clear_awaiting_remote_revoke();
+ self.context.sent_message_awaiting_response = None;
+ self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
+ self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
+ self.context.cur_counterparty_commitment_transaction_number -= 1;
+
+ if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
+ self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
}
- let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
- match &msg.shutdown_scriptpubkey {
- &Some(ref script) => {
- // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
- if script.len() == 0 {
- None
+ log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
+ let mut to_forward_infos = Vec::new();
+ let mut revoked_htlcs = Vec::new();
+ let mut finalized_claimed_htlcs = Vec::new();
+ let mut update_fail_htlcs = Vec::new();
+ let mut update_fail_malformed_htlcs = Vec::new();
+ let mut require_commitment = false;
+ let mut value_to_self_msat_diff: i64 = 0;
+
+ {
+ // Take references explicitly so that we can hold multiple references to self.context.
+ let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
+ let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
+ let expecting_peer_commitment_signed = &mut self.context.expecting_peer_commitment_signed;
+
+ // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
+ pending_inbound_htlcs.retain(|htlc| {
+ if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
+ log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash);
+ if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
+ value_to_self_msat_diff += htlc.amount_msat as i64;
+ }
+ *expecting_peer_commitment_signed = true;
+ false
+ } else { true }
+ });
+ pending_outbound_htlcs.retain(|htlc| {
+ if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
+ log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", &htlc.payment_hash);
+ if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
+ revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
} else {
- if !script::is_bolt2_compliant(&script, their_features) {
- return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
+ finalized_claimed_htlcs.push(htlc.source.clone());
+ // They fulfilled, so we sent them money
+ value_to_self_msat_diff -= htlc.amount_msat as i64;
+ }
+ false
+ } else { true }
+ });
+ for htlc in pending_inbound_htlcs.iter_mut() {
+ let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
+ true
+ } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
+ true
+ } else { false };
+ if swap {
+ let mut state = InboundHTLCState::Committed;
+ mem::swap(&mut state, &mut htlc.state);
+
+ if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
+ log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
+ htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
+ require_commitment = true;
+ } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
+ match forward_info {
+ PendingHTLCStatus::Fail(fail_msg) => {
+ log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
+ require_commitment = true;
+ match fail_msg {
+ HTLCFailureMsg::Relay(msg) => {
+ htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
+ update_fail_htlcs.push(msg)
+ },
+ HTLCFailureMsg::Malformed(msg) => {
+ htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
+ update_fail_malformed_htlcs.push(msg)
+ },
+ }
+ },
+ PendingHTLCStatus::Forward(forward_info) => {
+ log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash);
+ to_forward_infos.push((forward_info, htlc.htlc_id));
+ htlc.state = InboundHTLCState::Committed;
+ }
}
- Some(script.clone())
}
- },
- // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
- &None => {
- return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
}
}
- } else { None };
+ for htlc in pending_outbound_htlcs.iter_mut() {
+ if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
+ log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", &htlc.payment_hash);
+ htlc.state = OutboundHTLCState::Committed;
+ *expecting_peer_commitment_signed = true;
+ }
+ if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
+ log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
+ // Grab the preimage, if it exists, instead of cloning
+ let mut reason = OutboundHTLCOutcome::Success(None);
+ mem::swap(outcome, &mut reason);
+ htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
+ require_commitment = true;
+ }
+ }
+ }
+ self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
- self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis;
- self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
- self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
- self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
- self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
+ if let Some((feerate, update_state)) = self.context.pending_update_fee {
+ match update_state {
+ FeeUpdateState::Outbound => {
+ debug_assert!(self.context.is_outbound());
+ log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
+ self.context.feerate_per_kw = feerate;
+ self.context.pending_update_fee = None;
+ self.context.expecting_peer_commitment_signed = true;
+ },
+ FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
+ FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
+ debug_assert!(!self.context.is_outbound());
+ log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
+ require_commitment = true;
+ self.context.feerate_per_kw = feerate;
+ self.context.pending_update_fee = None;
+ },
+ }
+ }
- if peer_limits.trust_own_funding_0conf {
- self.context.minimum_depth = Some(msg.minimum_depth);
- } else {
- self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth));
+ let release_monitor = self.context.blocked_monitor_updates.is_empty() && !hold_mon_update;
+ let release_state_str =
+ if hold_mon_update { "Holding" } else if release_monitor { "Releasing" } else { "Blocked" };
+ macro_rules! return_with_htlcs_to_fail {
+ ($htlcs_to_fail: expr) => {
+ if !release_monitor {
+ self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
+ update: monitor_update,
+ });
+ return Ok(($htlcs_to_fail, None));
+ } else {
+ return Ok(($htlcs_to_fail, Some(monitor_update)));
+ }
+ }
}
- let counterparty_pubkeys = ChannelPublicKeys {
- funding_pubkey: msg.funding_pubkey,
- revocation_basepoint: msg.revocation_basepoint,
- payment_point: msg.payment_point,
- delayed_payment_basepoint: msg.delayed_payment_basepoint,
- htlc_basepoint: msg.htlc_basepoint
- };
+ if self.context.channel_state.is_monitor_update_in_progress() {
+ // We can't actually generate a new commitment transaction (incl by freeing holding
+ // cells) while we can't update the monitor, so we just return what we have.
+ if require_commitment {
+ self.context.monitor_pending_commitment_signed = true;
+ // When the monitor updating is restored we'll call
+ // get_last_commitment_update_for_send(), which does not update state, but we're
+ // definitely now awaiting a remote revoke before we can step forward any more, so
+ // set it here.
+ let mut additional_update = self.build_commitment_no_status_check(logger);
+ // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
+ // strictly increasing by one, so decrement it here.
+ self.context.latest_monitor_update_id = monitor_update.update_id;
+ monitor_update.updates.append(&mut additional_update.updates);
+ }
+ self.context.monitor_pending_forwards.append(&mut to_forward_infos);
+ self.context.monitor_pending_failures.append(&mut revoked_htlcs);
+ self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
+ log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id());
+ return_with_htlcs_to_fail!(Vec::new());
+ }
- self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
- selected_contest_delay: msg.to_self_delay,
- pubkeys: counterparty_pubkeys,
- });
+ match self.free_holding_cell_htlcs(fee_estimator, logger) {
+ (Some(mut additional_update), htlcs_to_fail) => {
+ // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
+ // strictly increasing by one, so decrement it here.
+ self.context.latest_monitor_update_id = monitor_update.update_id;
+ monitor_update.updates.append(&mut additional_update.updates);
- self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
- self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
+ log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.",
+ &self.context.channel_id(), release_state_str);
- self.context.channel_state = ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32;
- self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
+ self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
+ return_with_htlcs_to_fail!(htlcs_to_fail);
+ },
+ (None, htlcs_to_fail) => {
+ if require_commitment {
+ let mut additional_update = self.build_commitment_no_status_check(logger);
- Ok(())
- }
+ // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
+ // strictly increasing by one, so decrement it here.
+ self.context.latest_monitor_update_id = monitor_update.update_id;
+ monitor_update.updates.append(&mut additional_update.updates);
- fn funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<(Txid, CommitmentTransaction, Signature), ChannelError> where L::Target: Logger {
- let funding_script = self.context.get_funding_redeemscript();
+ log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
+ &self.context.channel_id(),
+ update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
+ release_state_str);
- let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
- let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
- {
- let trusted_tx = initial_commitment_tx.trust();
- let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
- let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
- // They sign the holder commitment transaction...
- log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
- log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
- encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
- encode::serialize_hex(&funding_script), log_bytes!(self.context.channel_id()));
- secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
- }
-
- let counterparty_keys = self.context.build_remote_transaction_keys();
- let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
-
- let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
- let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
- log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
- log_bytes!(self.context.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
+ self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
+ return_with_htlcs_to_fail!(htlcs_to_fail);
+ } else {
+ log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.",
+ &self.context.channel_id(), release_state_str);
- let counterparty_signature = self.context.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx)
- .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0;
+ self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
+ return_with_htlcs_to_fail!(htlcs_to_fail);
+ }
+ }
+ }
+ }
- // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
- Ok((counterparty_initial_bitcoin_tx.txid, initial_commitment_tx, counterparty_signature))
+ /// Queues up an outbound update fee by placing it in the holding cell. You should call
+ /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
+ /// commitment update.
+ pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
+ fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
+ where F::Target: FeeEstimator, L::Target: Logger
+ {
+ let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
+ assert!(msg_opt.is_none(), "We forced holding cell?");
}
- pub fn funding_created<SP: Deref, L: Deref>(
- &mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
- ) -> Result<(msgs::FundingSigned, ChannelMonitor<Signer>), ChannelError>
- where
- SP::Target: SignerProvider<Signer = Signer>,
- L::Target: Logger
+ /// Adds a pending update to this channel. See the doc for send_htlc for
+ /// further details on the optionness of the return value.
+ /// If our balance is too low to cover the cost of the next commitment transaction at the
+ /// new feerate, the update is cancelled.
+ ///
+ /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
+ /// [`Channel`] if `force_holding_cell` is false.
+ fn send_update_fee<F: Deref, L: Deref>(
+ &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
+ fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+ ) -> Option<msgs::UpdateFee>
+ where F::Target: FeeEstimator, L::Target: Logger
{
- if self.context.is_outbound() {
- return Err(ChannelError::Close("Received funding_created for an outbound channel?".to_owned()));
- }
- if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
- // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
- // remember the channel, so it's safe to just send an error_message here and drop the
- // channel.
- return Err(ChannelError::Close("Received funding_created after we got the channel!".to_owned()));
+ if !self.context.is_outbound() {
+ panic!("Cannot send fee from inbound channel");
}
- if self.context.inbound_awaiting_accept {
- return Err(ChannelError::Close("FundingCreated message received before the channel was accepted".to_owned()));
+ if !self.context.is_usable() {
+ panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
}
- if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
- self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
- self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
- panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
+ if !self.context.is_live() {
+ panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
}
- let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
- self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
- // This is an externally observable change before we finish all our checks. In particular
- // funding_created_signature may fail.
- self.context.holder_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
-
- let (counterparty_initial_commitment_txid, initial_commitment_tx, signature) = match self.funding_created_signature(&msg.signature, logger) {
- Ok(res) => res,
- Err(ChannelError::Close(e)) => {
- self.context.channel_transaction_parameters.funding_outpoint = None;
- return Err(ChannelError::Close(e));
- },
- Err(e) => {
- // The only error we know how to handle is ChannelError::Close, so we fall over here
- // to make sure we don't continue with an inconsistent state.
- panic!("unexpected error type from funding_created_signature {:?}", e);
- }
- };
-
- let holder_commitment_tx = HolderCommitmentTransaction::new(
- initial_commitment_tx,
- msg.signature,
- Vec::new(),
- &self.context.get_holder_pubkeys().funding_pubkey,
- self.context.counterparty_funding_pubkey()
- );
-
- self.context.holder_signer.validate_holder_commitment(&holder_commitment_tx, Vec::new())
- .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
-
- // Now that we're past error-generating stuff, update our local state:
-
- let funding_redeemscript = self.context.get_funding_redeemscript();
- let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
- let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
- let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
- let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
- monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
- let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
- shutdown_script, self.context.get_holder_selected_contest_delay(),
- &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
- &self.context.channel_transaction_parameters,
- funding_redeemscript.clone(), self.context.channel_value_satoshis,
- obscure_factor,
- holder_commitment_tx, best_block, self.context.counterparty_node_id);
+ // Before proposing a feerate update, check that we can actually afford the new fee.
+ let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
+ let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
+ let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
+ let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
+ let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
+ let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
+ if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
+ //TODO: auto-close after a number of failures?
+ log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
+ return None;
+ }
- channel_monitor.provide_latest_counterparty_commitment_tx(counterparty_initial_commitment_txid, Vec::new(), self.context.cur_counterparty_commitment_transaction_number, self.context.counterparty_cur_commitment_point.unwrap(), logger);
+ // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
+ let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
+ let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
+ let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
+ if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
+ log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
+ return None;
+ }
+ if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
+ log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
+ return None;
+ }
- self.context.channel_state = ChannelState::FundingSent as u32;
- self.context.channel_id = funding_txo.to_channel_id();
- self.context.cur_counterparty_commitment_transaction_number -= 1;
- self.context.cur_holder_commitment_transaction_number -= 1;
+ if self.context.channel_state.is_awaiting_remote_revoke() || self.context.channel_state.is_monitor_update_in_progress() {
+ force_holding_cell = true;
+ }
- log_info!(logger, "Generated funding_signed for peer for channel {}", log_bytes!(self.context.channel_id()));
+ if force_holding_cell {
+ self.context.holding_cell_update_fee = Some(feerate_per_kw);
+ return None;
+ }
- let need_channel_ready = self.check_get_channel_ready(0).is_some();
- self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
+ debug_assert!(self.context.pending_update_fee.is_none());
+ self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
- Ok((msgs::FundingSigned {
+ Some(msgs::UpdateFee {
channel_id: self.context.channel_id,
- signature,
- #[cfg(taproot)]
- partial_signature_with_nonce: None,
- }, channel_monitor))
+ feerate_per_kw,
+ })
}
- /// Handles a funding_signed message from the remote end.
- /// If this call is successful, broadcast the funding transaction (and not before!)
- pub fn funding_signed<SP: Deref, L: Deref>(
- &mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
- ) -> Result<ChannelMonitor<Signer>, ChannelError>
- where
- SP::Target: SignerProvider<Signer = Signer>,
- L::Target: Logger
- {
- if !self.context.is_outbound() {
- return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned()));
- }
- if self.context.channel_state & !(ChannelState::MonitorUpdateInProgress as u32) != ChannelState::FundingCreated as u32 {
- return Err(ChannelError::Close("Received funding_signed in strange state!".to_owned()));
+ /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
+ /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
+ /// resent.
+ /// No further message handling calls may be made until a channel_reestablish dance has
+ /// completed.
+ /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
+ pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
+ assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
+ if self.context.channel_state.is_pre_funded_state() {
+ return Err(())
}
- if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
- self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
- self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
- panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
+
+ if self.context.channel_state.is_peer_disconnected() {
+ // While the below code should be idempotent, it's simpler to just return early, as
+ // redundant disconnect events can fire, though they should be rare.
+ return Ok(());
}
- let funding_script = self.context.get_funding_redeemscript();
+ if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
+ self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
+ }
- let counterparty_keys = self.context.build_remote_transaction_keys();
- let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
- let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
- let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
+ // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
+ // will be retransmitted.
+ self.context.last_sent_closing_fee = None;
+ self.context.pending_counterparty_closing_signed = None;
+ self.context.closing_fee_limits = None;
- log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
- log_bytes!(self.context.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
+ let mut inbound_drop_count = 0;
+ self.context.pending_inbound_htlcs.retain(|htlc| {
+ match htlc.state {
+ InboundHTLCState::RemoteAnnounced(_) => {
+ // They sent us an update_add_htlc but we never got the commitment_signed.
+ // We'll tell them what commitment_signed we're expecting next and they'll drop
+ // this HTLC accordingly
+ inbound_drop_count += 1;
+ false
+ },
+ InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
+ // We received a commitment_signed updating this HTLC and (at least hopefully)
+ // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
+ // in response to it yet, so don't touch it.
+ true
+ },
+ InboundHTLCState::Committed => true,
+ InboundHTLCState::LocalRemoved(_) => {
+ // We (hopefully) sent a commitment_signed updating this HTLC (which we can
+ // re-transmit if needed) and they may have even sent a revoke_and_ack back
+ // (that we missed). Keep this around for now and if they tell us they missed
+ // the commitment_signed we can re-transmit the update then.
+ true
+ },
+ }
+ });
+ self.context.next_counterparty_htlc_id -= inbound_drop_count;
- let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
- let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
- {
- let trusted_tx = initial_commitment_tx.trust();
- let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
- let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
- // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
- if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
- return Err(ChannelError::Close("Invalid funding_signed signature from peer".to_owned()));
+ if let Some((_, update_state)) = self.context.pending_update_fee {
+ if update_state == FeeUpdateState::RemoteAnnounced {
+ debug_assert!(!self.context.is_outbound());
+ self.context.pending_update_fee = None;
}
}
- let holder_commitment_tx = HolderCommitmentTransaction::new(
- initial_commitment_tx,
- msg.signature,
- Vec::new(),
- &self.context.get_holder_pubkeys().funding_pubkey,
- self.context.counterparty_funding_pubkey()
- );
-
- self.context.holder_signer.validate_holder_commitment(&holder_commitment_tx, Vec::new())
- .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
-
-
- let funding_redeemscript = self.context.get_funding_redeemscript();
- let funding_txo = self.context.get_funding_txo().unwrap();
- let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
- let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
- let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
- let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
- monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
- let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
- shutdown_script, self.context.get_holder_selected_contest_delay(),
- &self.context.destination_script, (funding_txo, funding_txo_script),
- &self.context.channel_transaction_parameters,
- funding_redeemscript.clone(), self.context.channel_value_satoshis,
- obscure_factor,
- holder_commitment_tx, best_block, self.context.counterparty_node_id);
-
- channel_monitor.provide_latest_counterparty_commitment_tx(counterparty_initial_bitcoin_tx.txid, Vec::new(), self.context.cur_counterparty_commitment_transaction_number, self.context.counterparty_cur_commitment_point.unwrap(), logger);
+ for htlc in self.context.pending_outbound_htlcs.iter_mut() {
+ if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
+ // They sent us an update to remove this but haven't yet sent the corresponding
+ // commitment_signed, we need to move it back to Committed and they can re-send
+ // the update upon reconnection.
+ htlc.state = OutboundHTLCState::Committed;
+ }
+ }
- assert_eq!(self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32), 0); // We have no had any monitor(s) yet to fail update!
- self.context.channel_state = ChannelState::FundingSent as u32;
- self.context.cur_holder_commitment_transaction_number -= 1;
- self.context.cur_counterparty_commitment_transaction_number -= 1;
+ self.context.sent_message_awaiting_response = None;
- log_info!(logger, "Received funding_signed from peer for channel {}", log_bytes!(self.context.channel_id()));
+ self.context.channel_state.set_peer_disconnected();
+ log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
+ Ok(())
+ }
- let need_channel_ready = self.check_get_channel_ready(0).is_some();
- self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
- Ok(channel_monitor)
+ /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
+ /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
+ /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
+ /// update completes (potentially immediately).
+ /// The messages which were generated with the monitor update must *not* have been sent to the
+ /// remote end, and must instead have been dropped. They will be regenerated when
+ /// [`Self::monitor_updating_restored`] is called.
+ ///
+ /// [`ChannelManager`]: super::channelmanager::ChannelManager
+ /// [`chain::Watch`]: crate::chain::Watch
+ /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
+ fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
+ resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
+ mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
+ mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
+ ) {
+ self.context.monitor_pending_revoke_and_ack |= resend_raa;
+ self.context.monitor_pending_commitment_signed |= resend_commitment;
+ self.context.monitor_pending_channel_ready |= resend_channel_ready;
+ self.context.monitor_pending_forwards.append(&mut pending_forwards);
+ self.context.monitor_pending_failures.append(&mut pending_fails);
+ self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
+ self.context.channel_state.set_monitor_update_in_progress();
}
- /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
- /// and the channel is now usable (and public), this may generate an announcement_signatures to
- /// reply with.
- pub fn channel_ready<NS: Deref, L: Deref>(
- &mut self, msg: &msgs::ChannelReady, node_signer: &NS, genesis_block_hash: BlockHash,
- user_config: &UserConfig, best_block: &BestBlock, logger: &L
- ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
+ /// Indicates that the latest ChannelMonitor update has been committed by the client
+ /// successfully and we should restore normal operation. Returns messages which should be sent
+ /// to the remote side.
+ pub fn monitor_updating_restored<L: Deref, NS: Deref>(
+ &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash,
+ user_config: &UserConfig, best_block_height: u32
+ ) -> MonitorRestoreUpdates
where
- NS::Target: NodeSigner,
- L::Target: Logger
+ L::Target: Logger,
+ NS::Target: NodeSigner
{
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
- self.context.workaround_lnd_bug_4006 = Some(msg.clone());
- return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
- }
+ assert!(self.context.channel_state.is_monitor_update_in_progress());
+ self.context.channel_state.clear_monitor_update_in_progress();
- if let Some(scid_alias) = msg.short_channel_id_alias {
- if Some(scid_alias) != self.context.short_channel_id {
- // The scid alias provided can be used to route payments *from* our counterparty,
- // i.e. can be used for inbound payments and provided in invoices, but is not used
- // when routing outbound payments.
- self.context.latest_inbound_scid_alias = Some(scid_alias);
- }
+ // If we're past (or at) the AwaitingChannelReady stage on an outbound channel, try to
+ // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
+ // first received the funding_signed.
+ let mut funding_broadcastable =
+ if self.context.is_outbound() &&
+ matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH)) ||
+ matches!(self.context.channel_state, ChannelState::ChannelReady(_))
+ {
+ self.context.funding_transaction.take()
+ } else { None };
+ // That said, if the funding transaction is already confirmed (ie we're active with a
+ // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
+ if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.minimum_depth != Some(0) {
+ funding_broadcastable = None;
}
- let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
+ // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
+ // (and we assume the user never directly broadcasts the funding transaction and waits for
+ // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
+ // * an inbound channel that failed to persist the monitor on funding_created and we got
+ // the funding transaction confirmed before the monitor was persisted, or
+ // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
+ let channel_ready = if self.context.monitor_pending_channel_ready {
+ assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
+ "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
+ self.context.monitor_pending_channel_ready = false;
+ let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
+ Some(msgs::ChannelReady {
+ channel_id: self.context.channel_id(),
+ next_per_commitment_point,
+ short_channel_id_alias: Some(self.context.outbound_scid_alias),
+ })
+ } else { None };
- if non_shutdown_state == ChannelState::FundingSent as u32 {
- self.context.channel_state |= ChannelState::TheirChannelReady as u32;
- } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
- self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
- self.context.update_time_counter += 1;
- } else if self.context.channel_state & (ChannelState::ChannelReady as u32) != 0 ||
- // If we reconnected before sending our `channel_ready` they may still resend theirs:
- (self.context.channel_state & (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) ==
- (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32))
- {
- // They probably disconnected/reconnected and re-sent the channel_ready, which is
- // required, or they're sending a fresh SCID alias.
- let expected_point =
- if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
- // If they haven't ever sent an updated point, the point they send should match
- // the current one.
- self.context.counterparty_cur_commitment_point
- } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
- // If we've advanced the commitment number once, the second commitment point is
- // at `counterparty_prev_commitment_point`, which is not yet revoked.
- debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
- self.context.counterparty_prev_commitment_point
- } else {
- // If they have sent updated points, channel_ready is always supposed to match
- // their "first" point, which we re-derive here.
- Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
- &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
- ).expect("We already advanced, so previous secret keys should have been validated already")))
- };
- if expected_point != Some(msg.next_per_commitment_point) {
- return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
- }
- return Ok(None);
- } else {
- return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned()));
- }
+ let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger);
- self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
- self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
+ let mut accepted_htlcs = Vec::new();
+ mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
+ let mut failed_htlcs = Vec::new();
+ mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
+ let mut finalized_claimed_htlcs = Vec::new();
+ mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
- log_info!(logger, "Received channel_ready from peer for channel {}", log_bytes!(self.context.channel_id()));
+ if self.context.channel_state.is_peer_disconnected() {
+ self.context.monitor_pending_revoke_and_ack = false;
+ self.context.monitor_pending_commitment_signed = false;
+ return MonitorRestoreUpdates {
+ raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
+ accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
+ };
+ }
- Ok(self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger))
- }
+ let raa = if self.context.monitor_pending_revoke_and_ack {
+ Some(self.get_last_revoke_and_ack())
+ } else { None };
+ let commitment_update = if self.context.monitor_pending_commitment_signed {
+ self.get_last_commitment_update_for_send(logger).ok()
+ } else { None };
+ if commitment_update.is_some() {
+ self.mark_awaiting_response();
+ }
- /// Returns transaction if there is pending funding transaction that is yet to broadcast
- pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
- if self.context.channel_state & (ChannelState::FundingCreated as u32) != 0 {
- self.context.funding_transaction.clone()
- } else {
- None
+ self.context.monitor_pending_revoke_and_ack = false;
+ self.context.monitor_pending_commitment_signed = false;
+ let order = self.context.resend_order.clone();
+ log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
+ &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
+ if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
+ match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
+ MonitorRestoreUpdates {
+ raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
}
}
- /// Returns a HTLCStats about inbound pending htlcs
- fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
- let context = &self.context;
- let mut stats = HTLCStats {
- pending_htlcs: context.pending_inbound_htlcs.len() as u32,
- pending_htlcs_value_msat: 0,
- on_counterparty_tx_dust_exposure_msat: 0,
- on_holder_tx_dust_exposure_msat: 0,
- holding_cell_msat: 0,
- on_holder_tx_holding_cell_htlcs_count: 0,
- };
+ pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
+ where F::Target: FeeEstimator, L::Target: Logger
+ {
+ if self.context.is_outbound() {
+ return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
+ }
+ if self.context.channel_state.is_peer_disconnected() {
+ return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
+ }
+ Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
- let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.opt_anchors() {
- (0, 0)
- } else {
- let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
- (dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000,
- dust_buffer_feerate * htlc_success_tx_weight(false) / 1000)
- };
- let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
- let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
- for ref htlc in context.pending_inbound_htlcs.iter() {
- stats.pending_htlcs_value_msat += htlc.amount_msat;
- if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
- stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
+ self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
+ self.context.update_time_counter += 1;
+ // Check that we won't be pushed over our dust exposure limit by the feerate increase.
+ if !self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
+ let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
+ let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
+ let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
+ let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
+ let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
+ if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
+ return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
+ msg.feerate_per_kw, holder_tx_dust_exposure)));
}
- if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
- stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
+ if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
+ return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
+ msg.feerate_per_kw, counterparty_tx_dust_exposure)));
}
}
- stats
+ Ok(())
}
- /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
- fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
- let context = &self.context;
- let mut stats = HTLCStats {
- pending_htlcs: context.pending_outbound_htlcs.len() as u32,
- pending_htlcs_value_msat: 0,
- on_counterparty_tx_dust_exposure_msat: 0,
- on_holder_tx_dust_exposure_msat: 0,
- holding_cell_msat: 0,
- on_holder_tx_holding_cell_htlcs_count: 0,
- };
+ /// Indicates that the signer may have some signatures for us, so we should retry if we're
+ /// blocked.
+ #[allow(unused)]
+ pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
+ let commitment_update = if self.context.signer_pending_commitment_update {
+ self.get_last_commitment_update_for_send(logger).ok()
+ } else { None };
+ let funding_signed = if self.context.signer_pending_funding && !self.context.is_outbound() {
+ self.context.get_funding_signed_msg(logger).1
+ } else { None };
+ let channel_ready = if funding_signed.is_some() {
+ self.check_get_channel_ready(0)
+ } else { None };
- let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.opt_anchors() {
- (0, 0)
- } else {
- let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
- (dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000,
- dust_buffer_feerate * htlc_success_tx_weight(false) / 1000)
- };
- let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
- let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
- for ref htlc in context.pending_outbound_htlcs.iter() {
- stats.pending_htlcs_value_msat += htlc.amount_msat;
- if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
- stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
- }
- if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
- stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
- }
- }
+ log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed and {} channel_ready",
+ if commitment_update.is_some() { "a" } else { "no" },
+ if funding_signed.is_some() { "a" } else { "no" },
+ if channel_ready.is_some() { "a" } else { "no" });
- for update in context.holding_cell_htlc_updates.iter() {
- if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
- stats.pending_htlcs += 1;
- stats.pending_htlcs_value_msat += amount_msat;
- stats.holding_cell_msat += amount_msat;
- if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
- stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
- }
- if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
- stats.on_holder_tx_dust_exposure_msat += amount_msat;
- } else {
- stats.on_holder_tx_holding_cell_htlcs_count += 1;
- }
- }
+ SignerResumeUpdates {
+ commitment_update,
+ funding_signed,
+ channel_ready,
}
- stats
}
- /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
- /// Doesn't bother handling the
- /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
- /// corner case properly.
- pub fn get_available_balances(&self) -> AvailableBalances {
- let context = &self.context;
- // Note that we have to handle overflow due to the above case.
- let inbound_stats = self.get_inbound_pending_htlc_stats(None);
- let outbound_stats = self.get_outbound_pending_htlc_stats(None);
-
- let mut balance_msat = context.value_to_self_msat;
- for ref htlc in context.pending_inbound_htlcs.iter() {
- if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
- balance_msat += htlc.amount_msat;
- }
+ fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
+ let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
+ let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
+ msgs::RevokeAndACK {
+ channel_id: self.context.channel_id,
+ per_commitment_secret,
+ next_per_commitment_point,
+ #[cfg(taproot)]
+ next_local_nonce: None,
}
- balance_msat -= outbound_stats.pending_htlcs_value_msat;
-
- let outbound_capacity_msat = context.value_to_self_msat
- .saturating_sub(outbound_stats.pending_htlcs_value_msat)
- .saturating_sub(
- context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
+ }
- let mut available_capacity_msat = outbound_capacity_msat;
+ /// Gets the last commitment update for immediate sending to our peer.
+ fn get_last_commitment_update_for_send<L: Deref>(&mut self, logger: &L) -> Result<msgs::CommitmentUpdate, ()> where L::Target: Logger {
+ let mut update_add_htlcs = Vec::new();
+ let mut update_fulfill_htlcs = Vec::new();
+ let mut update_fail_htlcs = Vec::new();
+ let mut update_fail_malformed_htlcs = Vec::new();
- if context.is_outbound() {
- // We should mind channel commit tx fee when computing how much of the available capacity
- // can be used in the next htlc. Mirrors the logic in send_htlc.
- //
- // The fee depends on whether the amount we will be sending is above dust or not,
- // and the answer will in turn change the amount itself — making it a circular
- // dependency.
- // This complicates the computation around dust-values, up to the one-htlc-value.
- let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
- if !context.opt_anchors() {
- real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000;
+ for htlc in self.context.pending_outbound_htlcs.iter() {
+ if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
+ update_add_htlcs.push(msgs::UpdateAddHTLC {
+ channel_id: self.context.channel_id(),
+ htlc_id: htlc.htlc_id,
+ amount_msat: htlc.amount_msat,
+ payment_hash: htlc.payment_hash,
+ cltv_expiry: htlc.cltv_expiry,
+ onion_routing_packet: (**onion_packet).clone(),
+ skimmed_fee_msat: htlc.skimmed_fee_msat,
+ blinding_point: htlc.blinding_point,
+ });
}
+ }
- let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
- let max_reserved_commit_tx_fee_msat = FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * self.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
- let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
- let min_reserved_commit_tx_fee_msat = FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * self.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
-
- // We will first subtract the fee as if we were above-dust. Then, if the resulting
- // value ends up being below dust, we have this fee available again. In that case,
- // match the value to right-below-dust.
- let mut capacity_minus_commitment_fee_msat: i64 = (available_capacity_msat as i64) - (max_reserved_commit_tx_fee_msat as i64);
- if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
- let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
- debug_assert!(one_htlc_difference_msat != 0);
- capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
- capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
- available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
- } else {
- available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
- }
- } else {
- // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
- // sending a new HTLC won't reduce their balance below our reserve threshold.
- let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
- if !context.opt_anchors() {
- real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000;
- }
-
- let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
- let max_reserved_commit_tx_fee_msat = self.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
-
- let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
- let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
- .saturating_sub(inbound_stats.pending_htlcs_value_msat);
-
- if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat {
- // If another HTLC's fee would reduce the remote's balance below the reserve limit
- // we've selected for them, we can only send dust HTLCs.
- available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
+ for htlc in self.context.pending_inbound_htlcs.iter() {
+ if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
+ match reason {
+ &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
+ update_fail_htlcs.push(msgs::UpdateFailHTLC {
+ channel_id: self.context.channel_id(),
+ htlc_id: htlc.htlc_id,
+ reason: err_packet.clone()
+ });
+ },
+ &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
+ update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
+ channel_id: self.context.channel_id(),
+ htlc_id: htlc.htlc_id,
+ sha256_of_onion: sha256_of_onion.clone(),
+ failure_code: failure_code.clone(),
+ });
+ },
+ &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
+ update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
+ channel_id: self.context.channel_id(),
+ htlc_id: htlc.htlc_id,
+ payment_preimage: payment_preimage.clone(),
+ });
+ },
+ }
}
}
- let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
-
- // If we get close to our maximum dust exposure, we end up in a situation where we can send
- // between zero and the remaining dust exposure limit remaining OR above the dust limit.
- // Because we cannot express this as a simple min/max, we prefer to tell the user they can
- // send above the dust limit (as the router can always overpay to meet the dust limit).
- let mut remaining_msat_below_dust_exposure_limit = None;
- let mut dust_exposure_dust_limit_msat = 0;
+ let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
+ Some(msgs::UpdateFee {
+ channel_id: self.context.channel_id(),
+ feerate_per_kw: self.context.pending_update_fee.unwrap().0,
+ })
+ } else { None };
- let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.opt_anchors() {
- (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
+ log_trace!(logger, "Regenerating latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
+ &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
+ update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
+ let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger).map(|(cu, _)| cu) {
+ if self.context.signer_pending_commitment_update {
+ log_trace!(logger, "Commitment update generated: clearing signer_pending_commitment_update");
+ self.context.signer_pending_commitment_update = false;
+ }
+ update
} else {
- let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
- (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(false) / 1000,
- context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000)
+ if !self.context.signer_pending_commitment_update {
+ log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
+ self.context.signer_pending_commitment_update = true;
+ }
+ return Err(());
};
- let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
- if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > context.get_max_dust_htlc_exposure_msat() as i64 {
- remaining_msat_below_dust_exposure_limit =
- Some(context.get_max_dust_htlc_exposure_msat().saturating_sub(on_counterparty_dust_htlc_exposure_msat));
- dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
- }
+ Ok(msgs::CommitmentUpdate {
+ update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
+ commitment_signed,
+ })
+ }
- let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
- if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > context.get_max_dust_htlc_exposure_msat() as i64 {
- remaining_msat_below_dust_exposure_limit = Some(cmp::min(
- remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
- context.get_max_dust_htlc_exposure_msat().saturating_sub(on_holder_dust_htlc_exposure_msat)));
- dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
- }
+ /// Gets the `Shutdown` message we should send our peer on reconnect, if any.
+ pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
+ if self.context.channel_state.is_local_shutdown_sent() {
+ assert!(self.context.shutdown_scriptpubkey.is_some());
+ Some(msgs::Shutdown {
+ channel_id: self.context.channel_id,
+ scriptpubkey: self.get_closing_scriptpubkey(),
+ })
+ } else { None }
+ }
- if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
- if available_capacity_msat < dust_exposure_dust_limit_msat {
- available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
- } else {
- next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
- }
+ /// May panic if some calls other than message-handling calls (which will all Err immediately)
+ /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
+ ///
+ /// Some links printed in log lines are included here to check them during build (when run with
+ /// `cargo doc --document-private-items`):
+ /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
+ /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
+ pub fn channel_reestablish<L: Deref, NS: Deref>(
+ &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
+ chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock
+ ) -> Result<ReestablishResponses, ChannelError>
+ where
+ L::Target: Logger,
+ NS::Target: NodeSigner
+ {
+ if !self.context.channel_state.is_peer_disconnected() {
+ // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
+ // almost certainly indicates we are going to end up out-of-sync in some way, so we
+ // just close here instead of trying to recover.
+ return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
}
- available_capacity_msat = cmp::min(available_capacity_msat,
- context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
-
- if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 {
- available_capacity_msat = 0;
+ if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
+ msg.next_local_commitment_number == 0 {
+ return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
}
- AvailableBalances {
- inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
- - context.value_to_self_msat as i64
- - self.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
- - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
- 0) as u64,
- outbound_capacity_msat,
- next_outbound_htlc_limit_msat: available_capacity_msat,
- next_outbound_htlc_minimum_msat,
- balance_msat,
+ let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
+ if msg.next_remote_commitment_number > 0 {
+ let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
+ let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
+ .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
+ if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
+ return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
+ }
+ if msg.next_remote_commitment_number > our_commitment_transaction {
+ macro_rules! log_and_panic {
+ ($err_msg: expr) => {
+ log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
+ panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
+ }
+ }
+ log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
+ This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
+ More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
+ If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
+ ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
+ ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
+ Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
+ See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
+ }
}
- }
- pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
- let context = &self.context;
- (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
- }
+ // Before we change the state of the channel, we check if the peer is sending a very old
+ // commitment transaction number, if yes we send a warning message.
+ if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
+ return Err(ChannelError::Warn(format!(
+ "Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)",
+ msg.next_remote_commitment_number,
+ our_commitment_transaction
+ )));
+ }
- // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
- // Note that num_htlcs should not include dust HTLCs.
- fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, opt_anchors: bool) -> u64 {
- // Note that we need to divide before multiplying to round properly,
- // since the lowest denomination of bitcoin on-chain is the satoshi.
- (commitment_tx_base_weight(opt_anchors) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
- }
+ // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
+ // remaining cases either succeed or ErrorMessage-fail).
+ self.context.channel_state.clear_peer_disconnected();
+ self.context.sent_message_awaiting_response = None;
- /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
- /// number of pending HTLCs that are on track to be in our next commitment tx.
- ///
- /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
- /// `fee_spike_buffer_htlc` is `Some`.
- ///
- /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
- /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
- ///
- /// Dust HTLCs are excluded.
- fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
- let context = &self.context;
- assert!(context.is_outbound());
+ let shutdown_msg = self.get_outbound_shutdown();
- let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.opt_anchors() {
- (0, 0)
- } else {
- (context.feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000,
- context.feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000)
- };
- let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
- let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
+ let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger);
- let mut addl_htlcs = 0;
- if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
- match htlc.origin {
- HTLCInitiator::LocalOffered => {
- if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
- addl_htlcs += 1;
- }
- },
- HTLCInitiator::RemoteOffered => {
- if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
- addl_htlcs += 1;
+ if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(_)) {
+ // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
+ if !self.context.channel_state.is_our_channel_ready() ||
+ self.context.channel_state.is_monitor_update_in_progress() {
+ if msg.next_remote_commitment_number != 0 {
+ return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
}
+ // Short circuit the whole handler as there is nothing we can resend them
+ return Ok(ReestablishResponses {
+ channel_ready: None,
+ raa: None, commitment_update: None,
+ order: RAACommitmentOrder::CommitmentFirst,
+ shutdown_msg, announcement_sigs,
+ });
}
- }
- let mut included_htlcs = 0;
- for ref htlc in context.pending_inbound_htlcs.iter() {
- if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
- continue
- }
- // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
- // transaction including this HTLC if it times out before they RAA.
- included_htlcs += 1;
+ // We have OurChannelReady set!
+ let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
+ return Ok(ReestablishResponses {
+ channel_ready: Some(msgs::ChannelReady {
+ channel_id: self.context.channel_id(),
+ next_per_commitment_point,
+ short_channel_id_alias: Some(self.context.outbound_scid_alias),
+ }),
+ raa: None, commitment_update: None,
+ order: RAACommitmentOrder::CommitmentFirst,
+ shutdown_msg, announcement_sigs,
+ });
}
- for ref htlc in context.pending_outbound_htlcs.iter() {
- if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
- continue
- }
- match htlc.state {
- OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
- OutboundHTLCState::Committed => included_htlcs += 1,
- OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
- // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
- // transaction won't be generated until they send us their next RAA, which will mean
- // dropping any HTLCs in this state.
- _ => {},
+ let required_revoke = if msg.next_remote_commitment_number == our_commitment_transaction {
+ // Remote isn't waiting on any RevokeAndACK from us!
+ // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
+ None
+ } else if msg.next_remote_commitment_number + 1 == our_commitment_transaction {
+ if self.context.channel_state.is_monitor_update_in_progress() {
+ self.context.monitor_pending_revoke_and_ack = true;
+ None
+ } else {
+ Some(self.get_last_revoke_and_ack())
}
- }
+ } else {
+ debug_assert!(false, "All values should have been handled in the four cases above");
+ return Err(ChannelError::Close(format!(
+ "Peer attempted to reestablish channel expecting a future local commitment transaction: {} (received) vs {} (expected)",
+ msg.next_remote_commitment_number,
+ our_commitment_transaction
+ )));
+ };
- for htlc in context.holding_cell_htlc_updates.iter() {
- match htlc {
- &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
- if amount_msat / 1000 < real_dust_limit_timeout_sat {
- continue
- }
- included_htlcs += 1
- },
- _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
- // ack we're guaranteed to never include them in commitment txs anymore.
- }
+ // We increment cur_counterparty_commitment_transaction_number only upon receipt of
+ // revoke_and_ack, not on sending commitment_signed, so we add one if have
+ // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
+ // the corresponding revoke_and_ack back yet.
+ let is_awaiting_remote_revoke = self.context.channel_state.is_awaiting_remote_revoke();
+ if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
+ self.mark_awaiting_response();
}
+ let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
- let num_htlcs = included_htlcs + addl_htlcs;
- let res = Self::commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, context.opt_anchors());
- #[cfg(any(test, fuzzing))]
- {
- let mut fee = res;
- if fee_spike_buffer_htlc.is_some() {
- fee = Self::commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, context.opt_anchors());
- }
- let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
- + context.holding_cell_htlc_updates.len();
- let commitment_tx_info = CommitmentTxInfoCached {
- fee,
- total_pending_htlcs,
- next_holder_htlc_id: match htlc.origin {
- HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
- HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
- },
- next_counterparty_htlc_id: match htlc.origin {
- HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
- HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
- },
- feerate: context.feerate_per_kw,
- };
- *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
- }
- res
- }
-
- /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
- /// pending HTLCs that are on track to be in their next commitment tx
- ///
- /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
- /// `fee_spike_buffer_htlc` is `Some`.
- ///
- /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
- /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
- ///
- /// Dust HTLCs are excluded.
- fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
- let context = &self.context;
- assert!(!context.is_outbound());
+ let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
+ // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
+ let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
+ Some(msgs::ChannelReady {
+ channel_id: self.context.channel_id(),
+ next_per_commitment_point,
+ short_channel_id_alias: Some(self.context.outbound_scid_alias),
+ })
+ } else { None };
- let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.opt_anchors() {
- (0, 0)
- } else {
- (context.feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000,
- context.feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000)
- };
- let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
- let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
+ if msg.next_local_commitment_number == next_counterparty_commitment_number {
+ if required_revoke.is_some() {
+ log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id());
+ } else {
+ log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id());
+ }
- let mut addl_htlcs = 0;
- if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
- match htlc.origin {
- HTLCInitiator::LocalOffered => {
- if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
- addl_htlcs += 1;
- }
- },
- HTLCInitiator::RemoteOffered => {
- if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
- addl_htlcs += 1;
- }
+ Ok(ReestablishResponses {
+ channel_ready, shutdown_msg, announcement_sigs,
+ raa: required_revoke,
+ commitment_update: None,
+ order: self.context.resend_order.clone(),
+ })
+ } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
+ if required_revoke.is_some() {
+ log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id());
+ } else {
+ log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
}
- }
- // When calculating the set of HTLCs which will be included in their next commitment_signed, all
- // non-dust inbound HTLCs are included (as all states imply it will be included) and only
- // committed outbound HTLCs, see below.
- let mut included_htlcs = 0;
- for ref htlc in context.pending_inbound_htlcs.iter() {
- if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
- continue
+ if self.context.channel_state.is_monitor_update_in_progress() {
+ self.context.monitor_pending_commitment_signed = true;
+ Ok(ReestablishResponses {
+ channel_ready, shutdown_msg, announcement_sigs,
+ commitment_update: None, raa: None,
+ order: self.context.resend_order.clone(),
+ })
+ } else {
+ Ok(ReestablishResponses {
+ channel_ready, shutdown_msg, announcement_sigs,
+ raa: required_revoke,
+ commitment_update: self.get_last_commitment_update_for_send(logger).ok(),
+ order: self.context.resend_order.clone(),
+ })
}
- included_htlcs += 1;
+ } else if msg.next_local_commitment_number < next_counterparty_commitment_number {
+ Err(ChannelError::Close(format!(
+ "Peer attempted to reestablish channel with a very old remote commitment transaction: {} (received) vs {} (expected)",
+ msg.next_local_commitment_number,
+ next_counterparty_commitment_number,
+ )))
+ } else {
+ Err(ChannelError::Close(format!(
+ "Peer attempted to reestablish channel with a future remote commitment transaction: {} (received) vs {} (expected)",
+ msg.next_local_commitment_number,
+ next_counterparty_commitment_number,
+ )))
}
+ }
- for ref htlc in context.pending_outbound_htlcs.iter() {
- if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
- continue
- }
- // We only include outbound HTLCs if it will not be included in their next commitment_signed,
- // i.e. if they've responded to us with an RAA after announcement.
- match htlc.state {
- OutboundHTLCState::Committed => included_htlcs += 1,
- OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
- OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
- _ => {},
- }
+ /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
+ /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
+ /// at which point they will be recalculated.
+ fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
+ -> (u64, u64)
+ where F::Target: FeeEstimator
+ {
+ if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
+
+ // Propose a range from our current Background feerate to our Normal feerate plus our
+ // force_close_avoidance_max_fee_satoshis.
+ // If we fail to come to consensus, we'll have to force-close.
+ let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::ChannelCloseMinimum);
+ // Use NonAnchorChannelFee because this should be an estimate for a channel close
+ // that we don't expect to need fee bumping
+ let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
+ let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
+
+ // The spec requires that (when the channel does not have anchors) we only send absolute
+ // channel fees no greater than the absolute channel fee on the current commitment
+ // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
+ // very good reason to apply such a limit in any case. We don't bother doing so, risking
+ // some force-closure by old nodes, but we wanted to close the channel anyway.
+
+ if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
+ let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
+ proposed_feerate = cmp::max(proposed_feerate, min_feerate);
+ proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
}
- let num_htlcs = included_htlcs + addl_htlcs;
- let res = Self::commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, context.opt_anchors());
- #[cfg(any(test, fuzzing))]
- {
- let mut fee = res;
- if fee_spike_buffer_htlc.is_some() {
- fee = Self::commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, context.opt_anchors());
- }
- let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
- let commitment_tx_info = CommitmentTxInfoCached {
- fee,
- total_pending_htlcs,
- next_holder_htlc_id: match htlc.origin {
- HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
- HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
- },
- next_counterparty_htlc_id: match htlc.origin {
- HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
- HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
- },
- feerate: context.feerate_per_kw,
+ // Note that technically we could end up with a lower minimum fee if one sides' balance is
+ // below our dust limit, causing the output to disappear. We don't bother handling this
+ // case, however, as this should only happen if a channel is closed before any (material)
+ // payments have been made on it. This may cause slight fee overpayment and/or failure to
+ // come to consensus with our counterparty on appropriate fees, however it should be a
+ // relatively rare case. We can revisit this later, though note that in order to determine
+ // if the funders' output is dust we have to know the absolute fee we're going to use.
+ let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
+ let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
+ let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
+ // We always add force_close_avoidance_max_fee_satoshis to our normal
+ // feerate-calculated fee, but allow the max to be overridden if we're using a
+ // target feerate-calculated fee.
+ cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
+ proposed_max_feerate as u64 * tx_weight / 1000)
+ } else {
+ self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
};
- *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
- }
- res
+
+ self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
+ self.context.closing_fee_limits.clone().unwrap()
}
- pub fn update_add_htlc<F, L: Deref>(&mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus, create_pending_htlc_status: F, logger: &L) -> Result<(), ChannelError>
- where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus, L::Target: Logger {
- // We can't accept HTLCs sent after we've sent a shutdown.
- let local_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::LocalShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
- if local_sent_shutdown {
- pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
- }
- // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
- let remote_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::RemoteShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
- if remote_sent_shutdown {
- return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
- }
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
- return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
- }
- if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
- return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
- }
- if msg.amount_msat == 0 {
- return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
- }
- if msg.amount_msat < self.context.holder_htlc_minimum_msat {
- return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
- }
+ /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
+ /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
+ /// this point if we're the funder we should send the initial closing_signed, and in any case
+ /// shutdown should complete within a reasonable timeframe.
+ fn closing_negotiation_ready(&self) -> bool {
+ self.context.closing_negotiation_ready()
+ }
- let inbound_stats = self.get_inbound_pending_htlc_stats(None);
- let outbound_stats = self.get_outbound_pending_htlc_stats(None);
- if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
- return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
- }
- if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
- return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
- }
- // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
- // the reserve_satoshis we told them to always have as direct payment so that they lose
- // something if we punish them for broadcasting an old state).
- // Note that we don't really care about having a small/no to_remote output in our local
- // commitment transactions, as the purpose of the channel reserve is to ensure we can
- // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
- // present in the next commitment transaction we send them (at least for fulfilled ones,
- // failed ones won't modify value_to_self).
- // Note that we will send HTLCs which another instance of rust-lightning would think
- // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
- // Channel state once they will not be present in the next received commitment
- // transaction).
- let mut removed_outbound_total_msat = 0;
- for ref htlc in self.context.pending_outbound_htlcs.iter() {
- if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
- removed_outbound_total_msat += htlc.amount_msat;
- } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
- removed_outbound_total_msat += htlc.amount_msat;
+ /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
+ /// an Err if no progress is being made and the channel should be force-closed instead.
+ /// Should be called on a one-minute timer.
+ pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
+ if self.closing_negotiation_ready() {
+ if self.context.closing_signed_in_flight {
+ return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
+ } else {
+ self.context.closing_signed_in_flight = true;
}
}
+ Ok(())
+ }
- let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.opt_anchors() {
- (0, 0)
- } else {
- let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
- (dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000,
- dust_buffer_feerate * htlc_success_tx_weight(false) / 1000)
- };
- let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
- if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
- let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
- if on_counterparty_tx_dust_htlc_exposure_msat > self.context.get_max_dust_htlc_exposure_msat() {
- log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
- on_counterparty_tx_dust_htlc_exposure_msat, self.context.get_max_dust_htlc_exposure_msat());
- pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
- }
+ pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
+ &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
+ -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
+ where F::Target: FeeEstimator, L::Target: Logger
+ {
+ // If we're waiting on a monitor persistence, that implies we're also waiting to send some
+ // message to our counterparty (probably a `revoke_and_ack`). In such a case, we shouldn't
+ // initiate `closing_signed` negotiation until we're clear of all pending messages. Note
+ // that closing_negotiation_ready checks this case (as well as a few others).
+ if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
+ return Ok((None, None, None));
}
- let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
- if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
- let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
- if on_holder_tx_dust_htlc_exposure_msat > self.context.get_max_dust_htlc_exposure_msat() {
- log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
- on_holder_tx_dust_htlc_exposure_msat, self.context.get_max_dust_htlc_exposure_msat());
- pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
+ if !self.context.is_outbound() {
+ if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
+ return self.closing_signed(fee_estimator, &msg);
}
+ return Ok((None, None, None));
}
- let pending_value_to_self_msat =
- self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
- let pending_remote_value_msat =
- self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
- if pending_remote_value_msat < msg.amount_msat {
- return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
+ // If we're waiting on a counterparty `commitment_signed` to clear some updates from our
+ // local commitment transaction, we can't yet initiate `closing_signed` negotiation.
+ if self.context.expecting_peer_commitment_signed {
+ return Ok((None, None, None));
}
- // Check that the remote can afford to pay for this HTLC on-chain at the current
- // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
- let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
- let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
- self.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
- };
- if pending_remote_value_msat - msg.amount_msat < remote_commit_tx_fee_msat {
- return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
- };
+ let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
+
+ assert!(self.context.shutdown_scriptpubkey.is_some());
+ let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
+ log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
+ our_min_fee, our_max_fee, total_fee_satoshis);
- if pending_remote_value_msat - msg.amount_msat - remote_commit_tx_fee_msat < self.context.holder_selected_channel_reserve_satoshis * 1000 {
- return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
+ match &self.context.holder_signer {
+ ChannelSignerType::Ecdsa(ecdsa) => {
+ let sig = ecdsa
+ .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
+ .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
+
+ self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
+ Ok((Some(msgs::ClosingSigned {
+ channel_id: self.context.channel_id,
+ fee_satoshis: total_fee_satoshis,
+ signature: sig,
+ fee_range: Some(msgs::ClosingSignedFeeRange {
+ min_fee_satoshis: our_min_fee,
+ max_fee_satoshis: our_max_fee,
+ }),
+ }), None, None))
+ },
+ // TODO (taproot|arik)
+ #[cfg(taproot)]
+ _ => todo!()
}
+ }
- if !self.context.is_outbound() {
- // `2 *` and `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
- // the spec because in the spec, the fee spike buffer requirement doesn't exist on the
- // receiver's side, only on the sender's.
- // Note that when we eventually remove support for fee updates and switch to anchor output
- // fees, we will drop the `2 *`, since we no longer be as sensitive to fee spikes. But, keep
- // the extra htlc when calculating the next remote commitment transaction fee as we should
- // still be able to afford adding this HTLC plus one more future HTLC, regardless of being
- // sensitive to fee spikes.
- let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
- let remote_fee_cost_incl_stuck_buffer_msat = 2 * self.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
- if pending_remote_value_msat - msg.amount_msat - self.context.holder_selected_channel_reserve_satoshis * 1000 < remote_fee_cost_incl_stuck_buffer_msat {
- // Note that if the pending_forward_status is not updated here, then it's because we're already failing
- // the HTLC, i.e. its status is already set to failing.
- log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", log_bytes!(self.context.channel_id()));
- pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
- }
+ // Marks a channel as waiting for a response from the counterparty. If it's not received
+ // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
+ // a reconnection.
+ fn mark_awaiting_response(&mut self) {
+ self.context.sent_message_awaiting_response = Some(0);
+ }
+
+ /// Determines whether we should disconnect the counterparty due to not receiving a response
+ /// within our expected timeframe.
+ ///
+ /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
+ pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
+ let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
+ ticks_elapsed
} else {
- // Check that they won't violate our local required channel reserve by adding this HTLC.
- let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
- let local_commit_tx_fee_msat = self.next_local_commit_tx_fee_msat(htlc_candidate, None);
- if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat {
- return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
- }
+ // Don't disconnect when we're not waiting on a response.
+ return false;
+ };
+ *ticks_elapsed += 1;
+ *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
+ }
+
+ pub fn shutdown(
+ &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
+ ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
+ {
+ if self.context.channel_state.is_peer_disconnected() {
+ return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
}
- if self.context.next_counterparty_htlc_id != msg.htlc_id {
- return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
+ if self.context.channel_state.is_pre_funded_state() {
+ // Spec says we should fail the connection, not the channel, but that's nonsense, there
+ // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
+ // can do that via error message without getting a connection fail anyway...
+ return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
}
- if msg.cltv_expiry >= 500000000 {
- return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
+ for htlc in self.context.pending_inbound_htlcs.iter() {
+ if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
+ return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
+ }
}
+ assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
- if self.context.channel_state & ChannelState::LocalShutdownSent as u32 != 0 {
- if let PendingHTLCStatus::Forward(_) = pending_forward_status {
- panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
+ if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
+ return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_hex_string())));
+ }
+
+ if self.context.counterparty_shutdown_scriptpubkey.is_some() {
+ if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
+ return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_hex_string())));
}
+ } else {
+ self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
}
- // Now update local state:
- self.context.next_counterparty_htlc_id += 1;
- self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
- htlc_id: msg.htlc_id,
- amount_msat: msg.amount_msat,
- payment_hash: msg.payment_hash,
- cltv_expiry: msg.cltv_expiry,
- state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
- });
- Ok(())
- }
+ // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
+ // immediately after the commitment dance, but we can send a Shutdown because we won't send
+ // any further commitment updates after we set LocalShutdownSent.
+ let send_shutdown = !self.context.channel_state.is_local_shutdown_sent();
- /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
- #[inline]
- fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
- assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
- for htlc in self.context.pending_outbound_htlcs.iter_mut() {
- if htlc.htlc_id == htlc_id {
- let outcome = match check_preimage {
- None => fail_reason.into(),
- Some(payment_preimage) => {
- let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner());
- if payment_hash != htlc.payment_hash {
- return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
- }
- OutboundHTLCOutcome::Success(Some(payment_preimage))
- }
+ let update_shutdown_script = match self.context.shutdown_scriptpubkey {
+ Some(_) => false,
+ None => {
+ assert!(send_shutdown);
+ let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
+ Ok(scriptpubkey) => scriptpubkey,
+ Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
};
- match htlc.state {
- OutboundHTLCState::LocalAnnounced(_) =>
- return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
- OutboundHTLCState::Committed => {
- htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
- },
- OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
- return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
+ if !shutdown_scriptpubkey.is_compatible(their_features) {
+ return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
}
- return Ok(htlc);
+ self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
+ true
+ },
+ };
+
+ // From here on out, we may not fail!
+
+ self.context.channel_state.set_remote_shutdown_sent();
+ self.context.update_time_counter += 1;
+
+ let monitor_update = if update_shutdown_script {
+ self.context.latest_monitor_update_id += 1;
+ let monitor_update = ChannelMonitorUpdate {
+ update_id: self.context.latest_monitor_update_id,
+ updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
+ scriptpubkey: self.get_closing_scriptpubkey(),
+ }],
+ };
+ self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
+ self.push_ret_blockable_mon_update(monitor_update)
+ } else { None };
+ let shutdown = if send_shutdown {
+ Some(msgs::Shutdown {
+ channel_id: self.context.channel_id,
+ scriptpubkey: self.get_closing_scriptpubkey(),
+ })
+ } else { None };
+
+ // We can't send our shutdown until we've committed all of our pending HTLCs, but the
+ // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
+ // cell HTLCs and return them to fail the payment.
+ self.context.holding_cell_update_fee = None;
+ let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
+ self.context.holding_cell_htlc_updates.retain(|htlc_update| {
+ match htlc_update {
+ &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
+ dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
+ false
+ },
+ _ => true
}
- }
- Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
- }
+ });
- pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
- if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
- return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
- }
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
- return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
- }
+ self.context.channel_state.set_local_shutdown_sent();
+ self.context.update_time_counter += 1;
- self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
+ Ok((shutdown, monitor_update, dropped_outbound_htlcs))
}
- pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
- if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
- return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
- }
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
- return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
- }
+ fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
+ let mut tx = closing_tx.trust().built_transaction().clone();
- self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
- Ok(())
- }
+ tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
- pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
- if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
- return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
- }
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
- return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
+ let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
+ let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
+ let mut holder_sig = sig.serialize_der().to_vec();
+ holder_sig.push(EcdsaSighashType::All as u8);
+ let mut cp_sig = counterparty_sig.serialize_der().to_vec();
+ cp_sig.push(EcdsaSighashType::All as u8);
+ if funding_key[..] < counterparty_funding_key[..] {
+ tx.input[0].witness.push(holder_sig);
+ tx.input[0].witness.push(cp_sig);
+ } else {
+ tx.input[0].witness.push(cp_sig);
+ tx.input[0].witness.push(holder_sig);
}
- self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
- Ok(())
+ tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
+ tx
}
- pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<&ChannelMonitorUpdate>, ChannelError>
- where L::Target: Logger
+ pub fn closing_signed<F: Deref>(
+ &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
+ -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
+ where F::Target: FeeEstimator
{
- if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
- return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
+ if !self.context.channel_state.is_both_sides_shutdown() {
+ return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
}
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
- return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
+ if self.context.channel_state.is_peer_disconnected() {
+ return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
}
- if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
- return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
+ if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
+ return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
+ }
+ if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
+ return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
}
- let funding_script = self.context.get_funding_redeemscript();
-
- let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
+ if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
+ return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
+ }
- let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
- let commitment_txid = {
- let trusted_tx = commitment_stats.tx.trust();
- let bitcoin_tx = trusted_tx.built_transaction();
- let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
+ if self.context.channel_state.is_monitor_update_in_progress() {
+ self.context.pending_counterparty_closing_signed = Some(msg.clone());
+ return Ok((None, None, None));
+ }
- log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
- log_bytes!(msg.signature.serialize_compact()[..]),
- log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
- log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), log_bytes!(self.context.channel_id()));
- if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
- return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
- }
- bitcoin_tx.txid
- };
- let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
+ let funding_redeemscript = self.context.get_funding_redeemscript();
+ let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
+ if used_total_fee != msg.fee_satoshis {
+ return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
+ }
+ let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
- // If our counterparty updated the channel fee in this commitment transaction, check that
- // they can actually afford the new fee now.
- let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
- update_state == FeeUpdateState::RemoteAnnounced
- } else { false };
- if update_fee {
- debug_assert!(!self.context.is_outbound());
- let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
- if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
- return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
+ match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
+ Ok(_) => {},
+ Err(_e) => {
+ // The remote end may have decided to revoke their output due to inconsistent dust
+ // limits, so check for that case by re-checking the signature here.
+ closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
+ let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
+ secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
+ },
+ };
+
+ for outp in closing_tx.trust().built_transaction().output.iter() {
+ if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
+ return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
}
}
- #[cfg(any(test, fuzzing))]
- {
- if self.context.is_outbound() {
- let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
- *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
- if let Some(info) = projected_commit_tx_info {
- let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
- + self.context.holding_cell_htlc_updates.len();
- if info.total_pending_htlcs == total_pending_htlcs
- && info.next_holder_htlc_id == self.context.next_holder_htlc_id
- && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
- && info.feerate == self.context.feerate_per_kw {
- assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
- }
- }
+
+ assert!(self.context.shutdown_scriptpubkey.is_some());
+ if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
+ if last_fee == msg.fee_satoshis {
+ let shutdown_result = ShutdownResult {
+ monitor_update: None,
+ dropped_outbound_htlcs: Vec::new(),
+ unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
+ channel_id: self.context.channel_id,
+ counterparty_node_id: self.context.counterparty_node_id,
+ };
+ let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
+ self.context.channel_state = ChannelState::ShutdownComplete;
+ self.context.update_time_counter += 1;
+ return Ok((None, Some(tx), Some(shutdown_result)));
}
}
- if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
- return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
- }
+ let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
- // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
- // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
- // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
- // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
- // backwards compatibility, we never use it in production. To provide test coverage, here,
- // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
- #[allow(unused_assignments, unused_mut)]
- let mut separate_nondust_htlc_sources = false;
- #[cfg(all(feature = "std", any(test, fuzzing)))] {
- use core::hash::{BuildHasher, Hasher};
- // Get a random value using the only std API to do so - the DefaultHasher
- let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
- separate_nondust_htlc_sources = rand_val % 2 == 0;
+ macro_rules! propose_fee {
+ ($new_fee: expr) => {
+ let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
+ (closing_tx, $new_fee)
+ } else {
+ self.build_closing_transaction($new_fee, false)
+ };
+
+ return match &self.context.holder_signer {
+ ChannelSignerType::Ecdsa(ecdsa) => {
+ let sig = ecdsa
+ .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
+ .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
+ let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
+ let shutdown_result = ShutdownResult {
+ monitor_update: None,
+ dropped_outbound_htlcs: Vec::new(),
+ unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
+ channel_id: self.context.channel_id,
+ counterparty_node_id: self.context.counterparty_node_id,
+ };
+ self.context.channel_state = ChannelState::ShutdownComplete;
+ self.context.update_time_counter += 1;
+ let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
+ (Some(tx), Some(shutdown_result))
+ } else {
+ (None, None)
+ };
+
+ self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
+ Ok((Some(msgs::ClosingSigned {
+ channel_id: self.context.channel_id,
+ fee_satoshis: used_fee,
+ signature: sig,
+ fee_range: Some(msgs::ClosingSignedFeeRange {
+ min_fee_satoshis: our_min_fee,
+ max_fee_satoshis: our_max_fee,
+ }),
+ }), signed_tx, shutdown_result))
+ },
+ // TODO (taproot|arik)
+ #[cfg(taproot)]
+ _ => todo!()
+ }
+ }
}
- let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
- let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
- for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
- if let Some(_) = htlc.transaction_output_index {
- let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
- self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, self.context.opt_anchors(),
- false, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
+ if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
+ if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
+ return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
+ }
+ if max_fee_satoshis < our_min_fee {
+ return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
+ }
+ if min_fee_satoshis > our_max_fee {
+ return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
+ }
- let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, self.context.opt_anchors(), &keys);
- let htlc_sighashtype = if self.context.opt_anchors() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
- let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
- log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
- log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.serialize()),
- encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), log_bytes!(self.context.channel_id()));
- if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key) {
- return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
+ if !self.context.is_outbound() {
+ // They have to pay, so pick the highest fee in the overlapping range.
+ // We should never set an upper bound aside from their full balance
+ debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
+ propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
+ } else {
+ if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
+ return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
+ msg.fee_satoshis, our_min_fee, our_max_fee)));
}
- if !separate_nondust_htlc_sources {
- htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
+ // The proposed fee is in our acceptable range, accept it and broadcast!
+ propose_fee!(msg.fee_satoshis);
+ }
+ } else {
+ // Old fee style negotiation. We don't bother to enforce whether they are complying
+ // with the "making progress" requirements, we just comply and hope for the best.
+ if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
+ if msg.fee_satoshis > last_fee {
+ if msg.fee_satoshis < our_max_fee {
+ propose_fee!(msg.fee_satoshis);
+ } else if last_fee < our_max_fee {
+ propose_fee!(our_max_fee);
+ } else {
+ return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
+ }
+ } else {
+ if msg.fee_satoshis > our_min_fee {
+ propose_fee!(msg.fee_satoshis);
+ } else if last_fee > our_min_fee {
+ propose_fee!(our_min_fee);
+ } else {
+ return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
+ }
}
} else {
- htlcs_and_sigs.push((htlc, None, source_opt.take()));
- }
- if separate_nondust_htlc_sources {
- if let Some(source) = source_opt.take() {
- nondust_htlc_sources.push(source);
+ if msg.fee_satoshis < our_min_fee {
+ propose_fee!(our_min_fee);
+ } else if msg.fee_satoshis > our_max_fee {
+ propose_fee!(our_max_fee);
+ } else {
+ propose_fee!(msg.fee_satoshis);
}
}
- debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
}
+ }
- let holder_commitment_tx = HolderCommitmentTransaction::new(
- commitment_stats.tx,
- msg.signature,
- msg.htlc_signatures.clone(),
- &self.context.get_holder_pubkeys().funding_pubkey,
- self.context.counterparty_funding_pubkey()
- );
-
- self.context.holder_signer.validate_holder_commitment(&holder_commitment_tx, commitment_stats.preimages)
- .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
-
- // Update state now that we've passed all the can-fail calls...
- let mut need_commitment = false;
- if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
- if *update_state == FeeUpdateState::RemoteAnnounced {
- *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
- need_commitment = true;
- }
+ fn internal_htlc_satisfies_config(
+ &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
+ ) -> Result<(), (&'static str, u16)> {
+ let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
+ .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
+ if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
+ (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
+ return Err((
+ "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
+ 0x1000 | 12, // fee_insufficient
+ ));
}
-
- for htlc in self.context.pending_inbound_htlcs.iter_mut() {
- let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
- Some(forward_info.clone())
- } else { None };
- if let Some(forward_info) = new_forward {
- log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
- log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id));
- htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
- need_commitment = true;
- }
+ if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
+ return Err((
+ "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
+ 0x1000 | 13, // incorrect_cltv_expiry
+ ));
}
- let mut claimed_htlcs = Vec::new();
- for htlc in self.context.pending_outbound_htlcs.iter_mut() {
- if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
- log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
- log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id));
- // Grab the preimage, if it exists, instead of cloning
- let mut reason = OutboundHTLCOutcome::Success(None);
- mem::swap(outcome, &mut reason);
- if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
- // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
- // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
- // have a `Success(None)` reason. In this case we could forget some HTLC
- // claims, but such an upgrade is unlikely and including claimed HTLCs here
- // fixes a bug which the user was exposed to on 0.0.104 when they started the
- // claim anyway.
- claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
+ Ok(())
+ }
+
+ /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
+ /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
+ /// unsuccessful, falls back to the previous one if one exists.
+ pub fn htlc_satisfies_config(
+ &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
+ ) -> Result<(), (&'static str, u16)> {
+ self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
+ .or_else(|err| {
+ if let Some(prev_config) = self.context.prev_config() {
+ self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
+ } else {
+ Err(err)
}
- htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
- need_commitment = true;
- }
- }
+ })
+ }
- self.context.latest_monitor_update_id += 1;
- let mut monitor_update = ChannelMonitorUpdate {
- update_id: self.context.latest_monitor_update_id,
- updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
- commitment_tx: holder_commitment_tx,
- htlc_outputs: htlcs_and_sigs,
- claimed_htlcs,
- nondust_htlc_sources,
- }]
- };
+ pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
+ self.context.cur_holder_commitment_transaction_number + 1
+ }
- self.context.cur_holder_commitment_transaction_number -= 1;
- // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
- // build_commitment_no_status_check() next which will reset this to RAAFirst.
- self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
+ pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
+ self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state.is_awaiting_remote_revoke() { 1 } else { 0 }
+ }
- if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 {
- // In case we initially failed monitor updating without requiring a response, we need
- // to make sure the RAA gets sent first.
- self.context.monitor_pending_revoke_and_ack = true;
- if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
- // If we were going to send a commitment_signed after the RAA, go ahead and do all
- // the corresponding HTLC status updates so that get_last_commitment_update
- // includes the right HTLCs.
- self.context.monitor_pending_commitment_signed = true;
- let mut additional_update = self.build_commitment_no_status_check(logger);
- // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
- // strictly increasing by one, so decrement it here.
- self.context.latest_monitor_update_id = monitor_update.update_id;
- monitor_update.updates.append(&mut additional_update.updates);
- }
- log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
- log_bytes!(self.context.channel_id));
- return Ok(self.push_ret_blockable_mon_update(monitor_update));
- }
+ pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
+ self.context.cur_counterparty_commitment_transaction_number + 2
+ }
- let need_commitment_signed = if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
- // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
- // we'll send one right away when we get the revoke_and_ack when we
- // free_holding_cell_htlcs().
- let mut additional_update = self.build_commitment_no_status_check(logger);
- // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
- // strictly increasing by one, so decrement it here.
- self.context.latest_monitor_update_id = monitor_update.update_id;
- monitor_update.updates.append(&mut additional_update.updates);
- true
- } else { false };
-
- log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
- log_bytes!(self.context.channel_id()), if need_commitment_signed { " our own commitment_signed and" } else { "" });
- self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
- return Ok(self.push_ret_blockable_mon_update(monitor_update));
- }
-
- /// Public version of the below, checking relevant preconditions first.
- /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
- /// returns `(None, Vec::new())`.
- pub fn maybe_free_holding_cell_htlcs<L: Deref>(&mut self, logger: &L) -> (Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger {
- if self.context.channel_state >= ChannelState::ChannelReady as u32 &&
- (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
- self.free_holding_cell_htlcs(logger)
- } else { (None, Vec::new()) }
- }
-
- /// Frees any pending commitment updates in the holding cell, generating the relevant messages
- /// for our counterparty.
- fn free_holding_cell_htlcs<L: Deref>(&mut self, logger: &L) -> (Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger {
- assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0);
- if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
- log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
- if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, log_bytes!(self.context.channel_id()));
-
- let mut monitor_update = ChannelMonitorUpdate {
- update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
- updates: Vec::new(),
- };
-
- let mut htlc_updates = Vec::new();
- mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
- let mut update_add_htlcs = Vec::with_capacity(htlc_updates.len());
- let mut update_fulfill_htlcs = Vec::with_capacity(htlc_updates.len());
- let mut update_fail_htlcs = Vec::with_capacity(htlc_updates.len());
- let mut htlcs_to_fail = Vec::new();
- for htlc_update in htlc_updates.drain(..) {
- // Note that this *can* fail, though it should be due to rather-rare conditions on
- // fee races with adding too many outputs which push our total payments just over
- // the limit. In case it's less rare than I anticipate, we may want to revisit
- // handling this case better and maybe fulfilling some of the HTLCs while attempting
- // to rebalance channels.
- match &htlc_update {
- &HTLCUpdateAwaitingACK::AddHTLC {amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet, ..} => {
- match self.send_htlc(amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(), false, logger) {
- Ok(update_add_msg_option) => update_add_htlcs.push(update_add_msg_option.unwrap()),
- Err(e) => {
- match e {
- ChannelError::Ignore(ref msg) => {
- log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}",
- log_bytes!(payment_hash.0), msg, log_bytes!(self.context.channel_id()));
- // If we fail to send here, then this HTLC should
- // be failed backwards. Failing to send here
- // indicates that this HTLC may keep being put back
- // into the holding cell without ever being
- // successfully forwarded/failed/fulfilled, causing
- // our counterparty to eventually close on us.
- htlcs_to_fail.push((source.clone(), *payment_hash));
- },
- _ => {
- panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
- },
- }
- }
- }
- },
- &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
- // If an HTLC claim was previously added to the holding cell (via
- // `get_update_fulfill_htlc`, then generating the claim message itself must
- // not fail - any in between attempts to claim the HTLC will have resulted
- // in it hitting the holding cell again and we cannot change the state of a
- // holding cell HTLC from fulfill to anything else.
- let (update_fulfill_msg_option, mut additional_monitor_update) =
- if let UpdateFulfillFetch::NewClaim { msg, monitor_update, .. } = self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger) {
- (msg, monitor_update)
- } else { unreachable!() };
- update_fulfill_htlcs.push(update_fulfill_msg_option.unwrap());
- monitor_update.updates.append(&mut additional_monitor_update.updates);
- },
- &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
- match self.fail_htlc(htlc_id, err_packet.clone(), false, logger) {
- Ok(update_fail_msg_option) => {
- // If an HTLC failure was previously added to the holding cell (via
- // `queue_fail_htlc`) then generating the fail message itself must
- // not fail - we should never end up in a state where we double-fail
- // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
- // for a full revocation before failing.
- update_fail_htlcs.push(update_fail_msg_option.unwrap())
- },
- Err(e) => {
- if let ChannelError::Ignore(_) = e {}
- else {
- panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
- }
- }
- }
- },
- }
- }
- if update_add_htlcs.is_empty() && update_fulfill_htlcs.is_empty() && update_fail_htlcs.is_empty() && self.context.holding_cell_update_fee.is_none() {
- return (None, htlcs_to_fail);
- }
- let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
- self.send_update_fee(feerate, false, logger)
- } else {
- None
- };
-
- let mut additional_update = self.build_commitment_no_status_check(logger);
- // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
- // but we want them to be strictly increasing by one, so reset it here.
- self.context.latest_monitor_update_id = monitor_update.update_id;
- monitor_update.updates.append(&mut additional_update.updates);
-
- log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
- log_bytes!(self.context.channel_id()), if update_fee.is_some() { "a fee update, " } else { "" },
- update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len());
-
- self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
- (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
- } else {
- (None, Vec::new())
- }
+ #[cfg(test)]
+ pub fn get_signer(&self) -> &ChannelSignerType<SP> {
+ &self.context.holder_signer
}
- /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
- /// commitment_signed message here in case we had pending outbound HTLCs to add which were
- /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
- /// generating an appropriate error *after* the channel state has been updated based on the
- /// revoke_and_ack message.
- pub fn revoke_and_ack<L: Deref>(&mut self, msg: &msgs::RevokeAndACK, logger: &L) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<&ChannelMonitorUpdate>), ChannelError>
- where L::Target: Logger,
- {
- if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
- return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
- }
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
- return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
- }
- if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
- return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
- }
-
- let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
-
- if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
- if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
- return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
- }
- }
-
- if self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 == 0 {
- // Our counterparty seems to have burned their coins to us (by revoking a state when we
- // haven't given them a new commitment transaction to broadcast). We should probably
- // take advantage of this by updating our channel monitor, sending them an error, and
- // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
- // lot of work, and there's some chance this is all a misunderstanding anyway.
- // We have to do *something*, though, since our signer may get mad at us for otherwise
- // jumping a remote commitment number, so best to just force-close and move on.
- return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
- }
-
- #[cfg(any(test, fuzzing))]
- {
- *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
- *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
- }
-
- self.context.holder_signer.validate_counterparty_revocation(
- self.context.cur_counterparty_commitment_transaction_number + 1,
- &secret
- ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
-
- self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
- .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
- self.context.latest_monitor_update_id += 1;
- let mut monitor_update = ChannelMonitorUpdate {
- update_id: self.context.latest_monitor_update_id,
- updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
- idx: self.context.cur_counterparty_commitment_transaction_number + 1,
- secret: msg.per_commitment_secret,
- }],
- };
-
- // Update state now that we've passed all the can-fail calls...
- // (note that we may still fail to generate the new commitment_signed message, but that's
- // OK, we step the channel here and *then* if the new generation fails we can fail the
- // channel based on that, but stepping stuff here should be safe either way.
- self.context.channel_state &= !(ChannelState::AwaitingRemoteRevoke as u32);
- self.context.sent_message_awaiting_response = None;
- self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
- self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
- self.context.cur_counterparty_commitment_transaction_number -= 1;
-
- if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
- self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
- }
-
- log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", log_bytes!(self.context.channel_id()));
- let mut to_forward_infos = Vec::new();
- let mut revoked_htlcs = Vec::new();
- let mut finalized_claimed_htlcs = Vec::new();
- let mut update_fail_htlcs = Vec::new();
- let mut update_fail_malformed_htlcs = Vec::new();
- let mut require_commitment = false;
- let mut value_to_self_msat_diff: i64 = 0;
-
- {
- // Take references explicitly so that we can hold multiple references to self.context.
- let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
- let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
-
- // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
- pending_inbound_htlcs.retain(|htlc| {
- if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
- log_trace!(logger, " ...removing inbound LocalRemoved {}", log_bytes!(htlc.payment_hash.0));
- if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
- value_to_self_msat_diff += htlc.amount_msat as i64;
- }
- false
- } else { true }
- });
- pending_outbound_htlcs.retain(|htlc| {
- if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
- log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", log_bytes!(htlc.payment_hash.0));
- if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
- revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
- } else {
- finalized_claimed_htlcs.push(htlc.source.clone());
- // They fulfilled, so we sent them money
- value_to_self_msat_diff -= htlc.amount_msat as i64;
- }
- false
- } else { true }
- });
- for htlc in pending_inbound_htlcs.iter_mut() {
- let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
- true
- } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
- true
- } else { false };
- if swap {
- let mut state = InboundHTLCState::Committed;
- mem::swap(&mut state, &mut htlc.state);
-
- if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
- log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", log_bytes!(htlc.payment_hash.0));
- htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
- require_commitment = true;
- } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
- match forward_info {
- PendingHTLCStatus::Fail(fail_msg) => {
- log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", log_bytes!(htlc.payment_hash.0));
- require_commitment = true;
- match fail_msg {
- HTLCFailureMsg::Relay(msg) => {
- htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
- update_fail_htlcs.push(msg)
- },
- HTLCFailureMsg::Malformed(msg) => {
- htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
- update_fail_malformed_htlcs.push(msg)
- },
- }
- },
- PendingHTLCStatus::Forward(forward_info) => {
- log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", log_bytes!(htlc.payment_hash.0));
- to_forward_infos.push((forward_info, htlc.htlc_id));
- htlc.state = InboundHTLCState::Committed;
- }
+ #[cfg(test)]
+ pub fn get_value_stat(&self) -> ChannelValueStat {
+ ChannelValueStat {
+ value_to_self_msat: self.context.value_to_self_msat,
+ channel_value_msat: self.context.channel_value_satoshis * 1000,
+ channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
+ pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
+ pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
+ holding_cell_outbound_amount_msat: {
+ let mut res = 0;
+ for h in self.context.holding_cell_htlc_updates.iter() {
+ match h {
+ &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
+ res += amount_msat;
}
+ _ => {}
}
}
- }
- for htlc in pending_outbound_htlcs.iter_mut() {
- if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
- log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", log_bytes!(htlc.payment_hash.0));
- htlc.state = OutboundHTLCState::Committed;
- }
- if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
- log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", log_bytes!(htlc.payment_hash.0));
- // Grab the preimage, if it exists, instead of cloning
- let mut reason = OutboundHTLCOutcome::Success(None);
- mem::swap(outcome, &mut reason);
- htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
- require_commitment = true;
- }
- }
- }
- self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
-
- if let Some((feerate, update_state)) = self.context.pending_update_fee {
- match update_state {
- FeeUpdateState::Outbound => {
- debug_assert!(self.context.is_outbound());
- log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
- self.context.feerate_per_kw = feerate;
- self.context.pending_update_fee = None;
- },
- FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
- FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
- debug_assert!(!self.context.is_outbound());
- log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
- require_commitment = true;
- self.context.feerate_per_kw = feerate;
- self.context.pending_update_fee = None;
- },
- }
- }
-
- if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) == ChannelState::MonitorUpdateInProgress as u32 {
- // We can't actually generate a new commitment transaction (incl by freeing holding
- // cells) while we can't update the monitor, so we just return what we have.
- if require_commitment {
- self.context.monitor_pending_commitment_signed = true;
- // When the monitor updating is restored we'll call get_last_commitment_update(),
- // which does not update state, but we're definitely now awaiting a remote revoke
- // before we can step forward any more, so set it here.
- let mut additional_update = self.build_commitment_no_status_check(logger);
- // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
- // strictly increasing by one, so decrement it here.
- self.context.latest_monitor_update_id = monitor_update.update_id;
- monitor_update.updates.append(&mut additional_update.updates);
- }
- self.context.monitor_pending_forwards.append(&mut to_forward_infos);
- self.context.monitor_pending_failures.append(&mut revoked_htlcs);
- self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
- log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", log_bytes!(self.context.channel_id()));
- return Ok((Vec::new(), self.push_ret_blockable_mon_update(monitor_update)));
+ res
+ },
+ counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
+ counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
}
+ }
- match self.free_holding_cell_htlcs(logger) {
- (Some(_), htlcs_to_fail) => {
- let mut additional_update = self.context.pending_monitor_updates.pop().unwrap().update;
- // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
- // strictly increasing by one, so decrement it here.
- self.context.latest_monitor_update_id = monitor_update.update_id;
- monitor_update.updates.append(&mut additional_update.updates);
-
- self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
- Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
- },
- (None, htlcs_to_fail) => {
- if require_commitment {
- let mut additional_update = self.build_commitment_no_status_check(logger);
+ /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
+ /// Allowed in any state (including after shutdown)
+ pub fn is_awaiting_monitor_update(&self) -> bool {
+ self.context.channel_state.is_monitor_update_in_progress()
+ }
- // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
- // strictly increasing by one, so decrement it here.
- self.context.latest_monitor_update_id = monitor_update.update_id;
- monitor_update.updates.append(&mut additional_update.updates);
+ /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
+ pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
+ if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
+ self.context.blocked_monitor_updates[0].update.update_id - 1
+ }
- log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed.",
- log_bytes!(self.context.channel_id()), update_fail_htlcs.len() + update_fail_malformed_htlcs.len());
- self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
- Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
- } else {
- log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary.", log_bytes!(self.context.channel_id()));
- self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
- Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
- }
- }
+ /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
+ /// further blocked monitor update exists after the next.
+ pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
+ if self.context.blocked_monitor_updates.is_empty() { return None; }
+ Some((self.context.blocked_monitor_updates.remove(0).update,
+ !self.context.blocked_monitor_updates.is_empty()))
+ }
+
+ /// Pushes a new monitor update into our monitor update queue, returning it if it should be
+ /// immediately given to the user for persisting or `None` if it should be held as blocked.
+ fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
+ -> Option<ChannelMonitorUpdate> {
+ let release_monitor = self.context.blocked_monitor_updates.is_empty();
+ if !release_monitor {
+ self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
+ update,
+ });
+ None
+ } else {
+ Some(update)
}
}
- /// Queues up an outbound update fee by placing it in the holding cell. You should call
- /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
- /// commitment update.
- pub fn queue_update_fee<L: Deref>(&mut self, feerate_per_kw: u32, logger: &L) where L::Target: Logger {
- let msg_opt = self.send_update_fee(feerate_per_kw, true, logger);
- assert!(msg_opt.is_none(), "We forced holding cell?");
+ pub fn blocked_monitor_updates_pending(&self) -> usize {
+ self.context.blocked_monitor_updates.len()
}
- /// Adds a pending update to this channel. See the doc for send_htlc for
- /// further details on the optionness of the return value.
- /// If our balance is too low to cover the cost of the next commitment transaction at the
- /// new feerate, the update is cancelled.
- ///
- /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
- /// [`Channel`] if `force_holding_cell` is false.
- fn send_update_fee<L: Deref>(&mut self, feerate_per_kw: u32, mut force_holding_cell: bool, logger: &L) -> Option<msgs::UpdateFee> where L::Target: Logger {
- if !self.context.is_outbound() {
- panic!("Cannot send fee from inbound channel");
- }
- if !self.context.is_usable() {
- panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
+ /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
+ /// If the channel is outbound, this implies we have not yet broadcasted the funding
+ /// transaction. If the channel is inbound, this implies simply that the channel has not
+ /// advanced state.
+ pub fn is_awaiting_initial_mon_persist(&self) -> bool {
+ if !self.is_awaiting_monitor_update() { return false; }
+ if matches!(
+ self.context.channel_state, ChannelState::AwaitingChannelReady(flags)
+ if (flags & !(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY | FundedStateFlags::PEER_DISCONNECTED | FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS | AwaitingChannelReadyFlags::WAITING_FOR_BATCH)).is_empty()
+ ) {
+ // If we're not a 0conf channel, we'll be waiting on a monitor update with only
+ // AwaitingChannelReady set, though our peer could have sent their channel_ready.
+ debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
+ return true;
}
- if !self.context.is_live() {
- panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
+ if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
+ self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
+ // If we're a 0-conf channel, we'll move beyond AwaitingChannelReady immediately even while
+ // waiting for the initial monitor persistence. Thus, we check if our commitment
+ // transaction numbers have both been iterated only exactly once (for the
+ // funding_signed), and we're awaiting monitor update.
+ //
+ // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
+ // only way to get an awaiting-monitor-update state during initial funding is if the
+ // initial monitor persistence is still pending).
+ //
+ // Because deciding we're awaiting initial broadcast spuriously could result in
+ // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
+ // we hard-assert here, even in production builds.
+ if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
+ assert!(self.context.monitor_pending_channel_ready);
+ assert_eq!(self.context.latest_monitor_update_id, 0);
+ return true;
}
+ false
+ }
- // Before proposing a feerate update, check that we can actually afford the new fee.
- let inbound_stats = self.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
- let outbound_stats = self.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
- let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
- let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
- let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.opt_anchors()) * 1000;
- let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
- if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
- //TODO: auto-close after a number of failures?
- log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
+ /// Returns true if our channel_ready has been sent
+ pub fn is_our_channel_ready(&self) -> bool {
+ matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY)) ||
+ matches!(self.context.channel_state, ChannelState::ChannelReady(_))
+ }
+
+ /// Returns true if our peer has either initiated or agreed to shut down the channel.
+ pub fn received_shutdown(&self) -> bool {
+ self.context.channel_state.is_remote_shutdown_sent()
+ }
+
+ /// Returns true if we either initiated or agreed to shut down the channel.
+ pub fn sent_shutdown(&self) -> bool {
+ self.context.channel_state.is_local_shutdown_sent()
+ }
+
+ /// Returns true if this channel is fully shut down. True here implies that no further actions
+ /// may/will be taken on this channel, and thus this object should be freed. Any future changes
+ /// will be handled appropriately by the chain monitor.
+ pub fn is_shutdown(&self) -> bool {
+ matches!(self.context.channel_state, ChannelState::ShutdownComplete)
+ }
+
+ pub fn channel_update_status(&self) -> ChannelUpdateStatus {
+ self.context.channel_update_status
+ }
+
+ pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
+ self.context.update_time_counter += 1;
+ self.context.channel_update_status = status;
+ }
+
+ fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
+ // Called:
+ // * always when a new block/transactions are confirmed with the new height
+ // * when funding is signed with a height of 0
+ if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
return None;
}
- // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
- let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
- let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
- if holder_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
- log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
+ let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
+ if funding_tx_confirmations <= 0 {
+ self.context.funding_tx_confirmation_height = 0;
+ }
+
+ if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
return None;
}
- if counterparty_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
- log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
+
+ // If we're still pending the signature on a funding transaction, then we're not ready to send a
+ // channel_ready yet.
+ if self.context.signer_pending_funding {
return None;
}
- if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
- force_holding_cell = true;
+ // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
+ // channel_ready until the entire batch is ready.
+ let need_commitment_update = if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if (f & !FundedStateFlags::ALL).is_empty()) {
+ self.context.channel_state.set_our_channel_ready();
+ true
+ } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f & !FundedStateFlags::ALL == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY) {
+ self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
+ self.context.update_time_counter += 1;
+ true
+ } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f & !FundedStateFlags::ALL == AwaitingChannelReadyFlags::OUR_CHANNEL_READY) {
+ // We got a reorg but not enough to trigger a force close, just ignore.
+ false
+ } else {
+ if self.context.funding_tx_confirmation_height != 0 &&
+ self.context.channel_state < ChannelState::ChannelReady(ChannelReadyFlags::new())
+ {
+ // We should never see a funding transaction on-chain until we've received
+ // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
+ // an inbound channel - before that we have no known funding TXID). The fuzzer,
+ // however, may do this and we shouldn't treat it as a bug.
+ #[cfg(not(fuzzing))]
+ panic!("Started confirming a channel in a state pre-AwaitingChannelReady: {}.\n\
+ Do NOT broadcast a funding transaction manually - let LDK do it for you!",
+ self.context.channel_state.to_u32());
+ }
+ // We got a reorg but not enough to trigger a force close, just ignore.
+ false
+ };
+
+ if need_commitment_update {
+ if !self.context.channel_state.is_monitor_update_in_progress() {
+ if !self.context.channel_state.is_peer_disconnected() {
+ let next_per_commitment_point =
+ self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
+ return Some(msgs::ChannelReady {
+ channel_id: self.context.channel_id,
+ next_per_commitment_point,
+ short_channel_id_alias: Some(self.context.outbound_scid_alias),
+ });
+ }
+ } else {
+ self.context.monitor_pending_channel_ready = true;
+ }
}
+ None
+ }
- if force_holding_cell {
- self.context.holding_cell_update_fee = Some(feerate_per_kw);
- return None;
+ /// When a transaction is confirmed, we check whether it is or spends the funding transaction
+ /// In the first case, we store the confirmation height and calculating the short channel id.
+ /// In the second, we simply return an Err indicating we need to be force-closed now.
+ pub fn transactions_confirmed<NS: Deref, L: Deref>(
+ &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
+ chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L
+ ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
+ where
+ NS::Target: NodeSigner,
+ L::Target: Logger
+ {
+ let mut msgs = (None, None);
+ if let Some(funding_txo) = self.context.get_funding_txo() {
+ for &(index_in_block, tx) in txdata.iter() {
+ // Check if the transaction is the expected funding transaction, and if it is,
+ // check that it pays the right amount to the right script.
+ if self.context.funding_tx_confirmation_height == 0 {
+ if tx.txid() == funding_txo.txid {
+ let txo_idx = funding_txo.index as usize;
+ if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
+ tx.output[txo_idx].value != self.context.channel_value_satoshis {
+ if self.context.is_outbound() {
+ // If we generated the funding transaction and it doesn't match what it
+ // should, the client is really broken and we should just panic and
+ // tell them off. That said, because hash collisions happen with high
+ // probability in fuzzing mode, if we're fuzzing we just close the
+ // channel and move on.
+ #[cfg(not(fuzzing))]
+ panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
+ }
+ self.context.update_time_counter += 1;
+ let err_reason = "funding tx had wrong script/value or output index";
+ return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
+ } else {
+ if self.context.is_outbound() {
+ if !tx.is_coin_base() {
+ for input in tx.input.iter() {
+ if input.witness.is_empty() {
+ // We generated a malleable funding transaction, implying we've
+ // just exposed ourselves to funds loss to our counterparty.
+ #[cfg(not(fuzzing))]
+ panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
+ }
+ }
+ }
+ }
+ self.context.funding_tx_confirmation_height = height;
+ self.context.funding_tx_confirmed_in = Some(*block_hash);
+ self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
+ Ok(scid) => Some(scid),
+ Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
+ }
+ }
+ // If this is a coinbase transaction and not a 0-conf channel
+ // we should update our min_depth to 100 to handle coinbase maturity
+ if tx.is_coin_base() &&
+ self.context.minimum_depth.unwrap_or(0) > 0 &&
+ self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
+ self.context.minimum_depth = Some(COINBASE_MATURITY);
+ }
+ }
+ // If we allow 1-conf funding, we may need to check for channel_ready here and
+ // send it immediately instead of waiting for a best_block_updated call (which
+ // may have already happened for this block).
+ if let Some(channel_ready) = self.check_get_channel_ready(height) {
+ log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
+ let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger);
+ msgs = (Some(channel_ready), announcement_sigs);
+ }
+ }
+ for inp in tx.input.iter() {
+ if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
+ log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id());
+ return Err(ClosureReason::CommitmentTxConfirmed);
+ }
+ }
+ }
}
+ Ok(msgs)
+ }
- debug_assert!(self.context.pending_update_fee.is_none());
- self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
+ /// When a new block is connected, we check the height of the block against outbound holding
+ /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
+ /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
+ /// handled by the ChannelMonitor.
+ ///
+ /// If we return Err, the channel may have been closed, at which point the standard
+ /// requirements apply - no calls may be made except those explicitly stated to be allowed
+ /// post-shutdown.
+ ///
+ /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
+ /// back.
+ pub fn best_block_updated<NS: Deref, L: Deref>(
+ &mut self, height: u32, highest_header_time: u32, chain_hash: ChainHash,
+ node_signer: &NS, user_config: &UserConfig, logger: &L
+ ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
+ where
+ NS::Target: NodeSigner,
+ L::Target: Logger
+ {
+ self.do_best_block_updated(height, highest_header_time, Some((chain_hash, node_signer, user_config)), logger)
+ }
+
+ fn do_best_block_updated<NS: Deref, L: Deref>(
+ &mut self, height: u32, highest_header_time: u32,
+ chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L
+ ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
+ where
+ NS::Target: NodeSigner,
+ L::Target: Logger
+ {
+ let mut timed_out_htlcs = Vec::new();
+ // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
+ // forward an HTLC when our counterparty should almost certainly just fail it for expiring
+ // ~now.
+ let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
+ self.context.holding_cell_htlc_updates.retain(|htlc_update| {
+ match htlc_update {
+ &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
+ if *cltv_expiry <= unforwarded_htlc_cltv_limit {
+ timed_out_htlcs.push((source.clone(), payment_hash.clone()));
+ false
+ } else { true }
+ },
+ _ => true
+ }
+ });
- Some(msgs::UpdateFee {
- channel_id: self.context.channel_id,
- feerate_per_kw,
- })
- }
+ self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
- /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
- /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
- /// resent.
- /// No further message handling calls may be made until a channel_reestablish dance has
- /// completed.
- pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) where L::Target: Logger {
- assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
- if self.context.channel_state < ChannelState::FundingSent as u32 {
- self.context.channel_state = ChannelState::ShutdownComplete as u32;
- return;
+ if let Some(channel_ready) = self.check_get_channel_ready(height) {
+ let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
+ self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
+ } else { None };
+ log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
+ return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
}
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == (ChannelState::PeerDisconnected as u32) {
- // While the below code should be idempotent, it's simpler to just return early, as
- // redundant disconnect events can fire, though they should be rare.
- return;
- }
+ if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
+ self.context.channel_state.is_our_channel_ready() {
+ let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
+ if self.context.funding_tx_confirmation_height == 0 {
+ // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
+ // zero if it has been reorged out, however in either case, our state flags
+ // indicate we've already sent a channel_ready
+ funding_tx_confirmations = 0;
+ }
- if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
- self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
+ // If we've sent channel_ready (or have both sent and received channel_ready), and
+ // the funding transaction has become unconfirmed,
+ // close the channel and hope we can get the latest state on chain (because presumably
+ // the funding transaction is at least still in the mempool of most nodes).
+ //
+ // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
+ // 0-conf channel, but not doing so may lead to the
+ // `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have
+ // to.
+ if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() {
+ let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
+ self.context.minimum_depth.unwrap(), funding_tx_confirmations);
+ return Err(ClosureReason::ProcessingError { err: err_reason });
+ }
+ } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
+ height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
+ log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
+ // If funding_tx_confirmed_in is unset, the channel must not be active
+ assert!(self.context.channel_state <= ChannelState::ChannelReady(ChannelReadyFlags::new()));
+ assert!(!self.context.channel_state.is_our_channel_ready());
+ return Err(ClosureReason::FundingTimedOut);
}
- // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
- // will be retransmitted.
- self.context.last_sent_closing_fee = None;
- self.context.pending_counterparty_closing_signed = None;
- self.context.closing_fee_limits = None;
+ let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
+ self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
+ } else { None };
+ Ok((None, timed_out_htlcs, announcement_sigs))
+ }
- let mut inbound_drop_count = 0;
- self.context.pending_inbound_htlcs.retain(|htlc| {
- match htlc.state {
- InboundHTLCState::RemoteAnnounced(_) => {
- // They sent us an update_add_htlc but we never got the commitment_signed.
- // We'll tell them what commitment_signed we're expecting next and they'll drop
- // this HTLC accordingly
- inbound_drop_count += 1;
- false
- },
- InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
- // We received a commitment_signed updating this HTLC and (at least hopefully)
- // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
- // in response to it yet, so don't touch it.
- true
- },
- InboundHTLCState::Committed => true,
- InboundHTLCState::LocalRemoved(_) => {
- // We (hopefully) sent a commitment_signed updating this HTLC (which we can
- // re-transmit if needed) and they may have even sent a revoke_and_ack back
- // (that we missed). Keep this around for now and if they tell us they missed
- // the commitment_signed we can re-transmit the update then.
- true
+ /// Indicates the funding transaction is no longer confirmed in the main chain. This may
+ /// force-close the channel, but may also indicate a harmless reorganization of a block or two
+ /// before the channel has reached channel_ready and we can just wait for more blocks.
+ pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
+ if self.context.funding_tx_confirmation_height != 0 {
+ // We handle the funding disconnection by calling best_block_updated with a height one
+ // below where our funding was connected, implying a reorg back to conf_height - 1.
+ let reorg_height = self.context.funding_tx_confirmation_height - 1;
+ // We use the time field to bump the current time we set on channel updates if its
+ // larger. If we don't know that time has moved forward, we can just set it to the last
+ // time we saw and it will be ignored.
+ let best_time = self.context.update_time_counter;
+ match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&NodeSigner, &UserConfig)>, logger) {
+ Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
+ assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
+ assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
+ assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
+ Ok(())
},
+ Err(e) => Err(e)
}
- });
- self.context.next_counterparty_htlc_id -= inbound_drop_count;
-
- if let Some((_, update_state)) = self.context.pending_update_fee {
- if update_state == FeeUpdateState::RemoteAnnounced {
- debug_assert!(!self.context.is_outbound());
- self.context.pending_update_fee = None;
- }
+ } else {
+ // We never learned about the funding confirmation anyway, just ignore
+ Ok(())
}
+ }
- for htlc in self.context.pending_outbound_htlcs.iter_mut() {
- if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
- // They sent us an update to remove this but haven't yet sent the corresponding
- // commitment_signed, we need to move it back to Committed and they can re-send
- // the update upon reconnection.
- htlc.state = OutboundHTLCState::Committed;
- }
+ // Methods to get unprompted messages to send to the remote end (or where we already returned
+ // something in the handler for the message that prompted this message):
+
+ /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
+ /// announceable and available for use (have exchanged [`ChannelReady`] messages in both
+ /// directions). Should be used for both broadcasted announcements and in response to an
+ /// AnnouncementSignatures message from the remote peer.
+ ///
+ /// Will only fail if we're not in a state where channel_announcement may be sent (including
+ /// closing).
+ ///
+ /// This will only return ChannelError::Ignore upon failure.
+ ///
+ /// [`ChannelReady`]: crate::ln::msgs::ChannelReady
+ fn get_channel_announcement<NS: Deref>(
+ &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
+ ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
+ if !self.context.config.announced_channel {
+ return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
+ }
+ if !self.context.is_usable() {
+ return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
}
- self.context.sent_message_awaiting_response = None;
+ let short_channel_id = self.context.get_short_channel_id()
+ .ok_or(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel has not been confirmed yet".to_owned()))?;
+ let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
+ .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
+ let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
+ let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
- self.context.channel_state |= ChannelState::PeerDisconnected as u32;
- log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, log_bytes!(self.context.channel_id()));
- }
+ let msg = msgs::UnsignedChannelAnnouncement {
+ features: channelmanager::provided_channel_features(&user_config),
+ chain_hash,
+ short_channel_id,
+ node_id_1: if were_node_one { node_id } else { counterparty_node_id },
+ node_id_2: if were_node_one { counterparty_node_id } else { node_id },
+ bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
+ bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
+ excess_data: Vec::new(),
+ };
- /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
- /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
- /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
- /// update completes (potentially immediately).
- /// The messages which were generated with the monitor update must *not* have been sent to the
- /// remote end, and must instead have been dropped. They will be regenerated when
- /// [`Self::monitor_updating_restored`] is called.
- ///
- /// [`ChannelManager`]: super::channelmanager::ChannelManager
- /// [`chain::Watch`]: crate::chain::Watch
- /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
- fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
- resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
- mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
- mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
- ) {
- self.context.monitor_pending_revoke_and_ack |= resend_raa;
- self.context.monitor_pending_commitment_signed |= resend_commitment;
- self.context.monitor_pending_channel_ready |= resend_channel_ready;
- self.context.monitor_pending_forwards.append(&mut pending_forwards);
- self.context.monitor_pending_failures.append(&mut pending_fails);
- self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
- self.context.channel_state |= ChannelState::MonitorUpdateInProgress as u32;
+ Ok(msg)
}
- /// Indicates that the latest ChannelMonitor update has been committed by the client
- /// successfully and we should restore normal operation. Returns messages which should be sent
- /// to the remote side.
- pub fn monitor_updating_restored<L: Deref, NS: Deref>(
- &mut self, logger: &L, node_signer: &NS, genesis_block_hash: BlockHash,
- user_config: &UserConfig, best_block_height: u32
- ) -> MonitorRestoreUpdates
+ fn get_announcement_sigs<NS: Deref, L: Deref>(
+ &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
+ best_block_height: u32, logger: &L
+ ) -> Option<msgs::AnnouncementSignatures>
where
- L::Target: Logger,
- NS::Target: NodeSigner
+ NS::Target: NodeSigner,
+ L::Target: Logger
{
- assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, ChannelState::MonitorUpdateInProgress as u32);
- self.context.channel_state &= !(ChannelState::MonitorUpdateInProgress as u32);
- let mut found_blocked = false;
- self.context.pending_monitor_updates.retain(|upd| {
- if found_blocked { debug_assert!(upd.blocked, "No mons may be unblocked after a blocked one"); }
- if upd.blocked { found_blocked = true; }
- upd.blocked
- });
+ if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
+ return None;
+ }
- // If we're past (or at) the FundingSent stage on an outbound channel, try to
- // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
- // first received the funding_signed.
- let mut funding_broadcastable =
- if self.context.is_outbound() && self.context.channel_state & !MULTI_STATE_FLAGS >= ChannelState::FundingSent as u32 {
- self.context.funding_transaction.take()
- } else { None };
- // That said, if the funding transaction is already confirmed (ie we're active with a
- // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
- if self.context.channel_state & !MULTI_STATE_FLAGS >= ChannelState::ChannelReady as u32 && self.context.minimum_depth != Some(0) {
- funding_broadcastable = None;
+ if !self.context.is_usable() {
+ return None;
}
- // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
- // (and we assume the user never directly broadcasts the funding transaction and waits for
- // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
- // * an inbound channel that failed to persist the monitor on funding_created and we got
- // the funding transaction confirmed before the monitor was persisted, or
- // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
- let channel_ready = if self.context.monitor_pending_channel_ready {
- assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
- "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
- self.context.monitor_pending_channel_ready = false;
- let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
- Some(msgs::ChannelReady {
- channel_id: self.context.channel_id(),
- next_per_commitment_point,
- short_channel_id_alias: Some(self.context.outbound_scid_alias),
- })
- } else { None };
+ if self.context.channel_state.is_peer_disconnected() {
+ log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
+ return None;
+ }
+
+ if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
+ return None;
+ }
- let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block_height, logger);
+ log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id());
+ let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
+ Ok(a) => a,
+ Err(e) => {
+ log_trace!(logger, "{:?}", e);
+ return None;
+ }
+ };
+ let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
+ Err(_) => {
+ log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
+ return None;
+ },
+ Ok(v) => v
+ };
+ match &self.context.holder_signer {
+ ChannelSignerType::Ecdsa(ecdsa) => {
+ let our_bitcoin_sig = match ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
+ Err(_) => {
+ log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
+ return None;
+ },
+ Ok(v) => v
+ };
+ let short_channel_id = match self.context.get_short_channel_id() {
+ Some(scid) => scid,
+ None => return None,
+ };
- let mut accepted_htlcs = Vec::new();
- mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
- let mut failed_htlcs = Vec::new();
- mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
- let mut finalized_claimed_htlcs = Vec::new();
- mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
+ self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) != 0 {
- self.context.monitor_pending_revoke_and_ack = false;
- self.context.monitor_pending_commitment_signed = false;
- return MonitorRestoreUpdates {
- raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
- accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
- };
+ Some(msgs::AnnouncementSignatures {
+ channel_id: self.context.channel_id(),
+ short_channel_id,
+ node_signature: our_node_sig,
+ bitcoin_signature: our_bitcoin_sig,
+ })
+ },
+ // TODO (taproot|arik)
+ #[cfg(taproot)]
+ _ => todo!()
}
+ }
- let raa = if self.context.monitor_pending_revoke_and_ack {
- Some(self.get_last_revoke_and_ack())
- } else { None };
- let commitment_update = if self.context.monitor_pending_commitment_signed {
- self.mark_awaiting_response();
- Some(self.get_last_commitment_update(logger))
- } else { None };
+ /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
+ /// available.
+ fn sign_channel_announcement<NS: Deref>(
+ &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
+ ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
+ if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
+ let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
+ .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
+ let were_node_one = announcement.node_id_1 == our_node_key;
- self.context.monitor_pending_revoke_and_ack = false;
- self.context.monitor_pending_commitment_signed = false;
- let order = self.context.resend_order.clone();
- log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
- log_bytes!(self.context.channel_id()), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
- if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
- match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
- MonitorRestoreUpdates {
- raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
+ let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
+ .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
+ match &self.context.holder_signer {
+ ChannelSignerType::Ecdsa(ecdsa) => {
+ let our_bitcoin_sig = ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
+ .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
+ Ok(msgs::ChannelAnnouncement {
+ node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
+ node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
+ bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
+ bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
+ contents: announcement,
+ })
+ },
+ // TODO (taproot|arik)
+ #[cfg(taproot)]
+ _ => todo!()
+ }
+ } else {
+ Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
}
}
- pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
- where F::Target: FeeEstimator, L::Target: Logger
- {
- if self.context.is_outbound() {
- return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
+ /// Processes an incoming announcement_signatures message, providing a fully-signed
+ /// channel_announcement message which we can broadcast and storing our counterparty's
+ /// signatures for later reconstruction/rebroadcast of the channel_announcement.
+ pub fn announcement_signatures<NS: Deref>(
+ &mut self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32,
+ msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
+ ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
+ let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
+
+ let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
+
+ if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
+ return Err(ChannelError::Close(format!(
+ "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
+ &announcement, self.context.get_counterparty_node_id())));
}
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
- return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
+ if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
+ return Err(ChannelError::Close(format!(
+ "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
+ &announcement, self.context.counterparty_funding_pubkey())));
}
- Channel::<Signer>::check_remote_fee(fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
- let feerate_over_dust_buffer = msg.feerate_per_kw > self.context.get_dust_buffer_feerate(None);
- self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
- self.context.update_time_counter += 1;
- // If the feerate has increased over the previous dust buffer (note that
- // `get_dust_buffer_feerate` considers the `pending_update_fee` status), check that we
- // won't be pushed over our dust exposure limit by the feerate increase.
- if feerate_over_dust_buffer {
- let inbound_stats = self.get_inbound_pending_htlc_stats(None);
- let outbound_stats = self.get_outbound_pending_htlc_stats(None);
- let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
- let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
- if holder_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
- return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
- msg.feerate_per_kw, holder_tx_dust_exposure)));
- }
- if counterparty_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
- return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
- msg.feerate_per_kw, counterparty_tx_dust_exposure)));
- }
+ self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
+ if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
+ return Err(ChannelError::Ignore(
+ "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
}
- Ok(())
- }
- fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
- let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
- let per_commitment_secret = self.context.holder_signer.release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
- msgs::RevokeAndACK {
- channel_id: self.context.channel_id,
- per_commitment_secret,
- next_per_commitment_point,
- #[cfg(taproot)]
- next_local_nonce: None,
- }
+ self.sign_channel_announcement(node_signer, announcement)
}
- fn get_last_commitment_update<L: Deref>(&self, logger: &L) -> msgs::CommitmentUpdate where L::Target: Logger {
- let mut update_add_htlcs = Vec::new();
- let mut update_fulfill_htlcs = Vec::new();
- let mut update_fail_htlcs = Vec::new();
- let mut update_fail_malformed_htlcs = Vec::new();
-
- for htlc in self.context.pending_outbound_htlcs.iter() {
- if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
- update_add_htlcs.push(msgs::UpdateAddHTLC {
- channel_id: self.context.channel_id(),
- htlc_id: htlc.htlc_id,
- amount_msat: htlc.amount_msat,
- payment_hash: htlc.payment_hash,
- cltv_expiry: htlc.cltv_expiry,
- onion_routing_packet: (**onion_packet).clone(),
- });
- }
+ /// Gets a signed channel_announcement for this channel, if we previously received an
+ /// announcement_signatures from our counterparty.
+ pub fn get_signed_channel_announcement<NS: Deref>(
+ &self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, user_config: &UserConfig
+ ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
+ if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
+ return None;
}
-
- for htlc in self.context.pending_inbound_htlcs.iter() {
- if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
- match reason {
- &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
- update_fail_htlcs.push(msgs::UpdateFailHTLC {
- channel_id: self.context.channel_id(),
- htlc_id: htlc.htlc_id,
- reason: err_packet.clone()
- });
- },
- &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
- update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
- channel_id: self.context.channel_id(),
- htlc_id: htlc.htlc_id,
- sha256_of_onion: sha256_of_onion.clone(),
- failure_code: failure_code.clone(),
- });
- },
- &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
- update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
- channel_id: self.context.channel_id(),
- htlc_id: htlc.htlc_id,
- payment_preimage: payment_preimage.clone(),
- });
- },
- }
- }
+ let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
+ Ok(res) => res,
+ Err(_) => return None,
+ };
+ match self.sign_channel_announcement(node_signer, announcement) {
+ Ok(res) => Some(res),
+ Err(_) => None,
}
+ }
- let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
- Some(msgs::UpdateFee {
- channel_id: self.context.channel_id(),
- feerate_per_kw: self.context.pending_update_fee.unwrap().0,
- })
- } else { None };
+ /// May panic if called on a channel that wasn't immediately-previously
+ /// self.remove_uncommitted_htlcs_and_mark_paused()'d
+ pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
+ assert!(self.context.channel_state.is_peer_disconnected());
+ assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
+ // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
+ // current to_remote balances. However, it no longer has any use, and thus is now simply
+ // set to a dummy (but valid, as required by the spec) public key.
+ // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
+ // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
+ // valid, and valid in fuzzing mode's arbitrary validity criteria:
+ let mut pk = [2; 33]; pk[1] = 0xff;
+ let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
+ let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
+ let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
+ log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id());
+ remote_last_secret
+ } else {
+ log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id());
+ [0;32]
+ };
+ self.mark_awaiting_response();
+ msgs::ChannelReestablish {
+ channel_id: self.context.channel_id(),
+ // The protocol has two different commitment number concepts - the "commitment
+ // transaction number", which starts from 0 and counts up, and the "revocation key
+ // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
+ // commitment transaction numbers by the index which will be used to reveal the
+ // revocation key for that commitment transaction, which means we have to convert them
+ // to protocol-level commitment numbers here...
- log_trace!(logger, "Regenerated latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
- log_bytes!(self.context.channel_id()), if update_fee.is_some() { " update_fee," } else { "" },
- update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
- msgs::CommitmentUpdate {
- update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
- commitment_signed: self.send_commitment_no_state_update(logger).expect("It looks like we failed to re-generate a commitment_signed we had previously sent?").0,
+ // next_local_commitment_number is the next commitment_signed number we expect to
+ // receive (indicating if they need to resend one that we missed).
+ next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
+ // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
+ // receive, however we track it by the next commitment number for a remote transaction
+ // (which is one further, as they always revoke previous commitment transaction, not
+ // the one we send) so we have to decrement by 1. Note that if
+ // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
+ // dropped this channel on disconnect as it hasn't yet reached AwaitingChannelReady so we can't
+ // overflow here.
+ next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
+ your_last_per_commitment_secret: remote_last_secret,
+ my_current_per_commitment_point: dummy_pubkey,
+ // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
+ // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
+ // txid of that interactive transaction, else we MUST NOT set it.
+ next_funding_txid: None,
}
}
- /// May panic if some calls other than message-handling calls (which will all Err immediately)
- /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
+
+ // Send stuff to our remote peers:
+
+ /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
+ /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
+ /// commitment update.
///
- /// Some links printed in log lines are included here to check them during build (when run with
- /// `cargo doc --document-private-items`):
- /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
- /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
- pub fn channel_reestablish<L: Deref, NS: Deref>(
- &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
- genesis_block_hash: BlockHash, user_config: &UserConfig, best_block: &BestBlock
- ) -> Result<ReestablishResponses, ChannelError>
- where
- L::Target: Logger,
- NS::Target: NodeSigner
+ /// `Err`s will only be [`ChannelError::Ignore`].
+ pub fn queue_add_htlc<F: Deref, L: Deref>(
+ &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
+ onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
+ blinding_point: Option<PublicKey>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+ ) -> Result<(), ChannelError>
+ where F::Target: FeeEstimator, L::Target: Logger
{
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
- // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
- // almost certainly indicates we are going to end up out-of-sync in some way, so we
- // just close here instead of trying to recover.
- return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
- }
-
- if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
- msg.next_local_commitment_number == 0 {
- return Err(ChannelError::Close("Peer sent a garbage channel_reestablish (usually an lnd node with lost state asking us to force-close for them)".to_owned()));
- }
+ self
+ .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
+ skimmed_fee_msat, blinding_point, fee_estimator, logger)
+ .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
+ .map_err(|err| {
+ if let ChannelError::Ignore(_) = err { /* fine */ }
+ else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
+ err
+ })
+ }
- if msg.next_remote_commitment_number > 0 {
- let expected_point = self.context.holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
- let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
- .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
- if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
- return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
- }
- if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number {
- macro_rules! log_and_panic {
- ($err_msg: expr) => {
- log_error!(logger, $err_msg, log_bytes!(self.context.channel_id), log_pubkey!(self.context.counterparty_node_id));
- panic!($err_msg, log_bytes!(self.context.channel_id), log_pubkey!(self.context.counterparty_node_id));
- }
- }
- log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
- This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
- More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
- If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
- ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
- ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
- Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
- See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
- }
+ /// Adds a pending outbound HTLC to this channel, note that you probably want
+ /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
+ ///
+ /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
+ /// the wire:
+ /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
+ /// wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
+ /// awaiting ACK.
+ /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
+ /// we may not yet have sent the previous commitment update messages and will need to
+ /// regenerate them.
+ ///
+ /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
+ /// on this [`Channel`] if `force_holding_cell` is false.
+ ///
+ /// `Err`s will only be [`ChannelError::Ignore`].
+ fn send_htlc<F: Deref, L: Deref>(
+ &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
+ onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
+ skimmed_fee_msat: Option<u64>, blinding_point: Option<PublicKey>,
+ fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+ ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
+ where F::Target: FeeEstimator, L::Target: Logger
+ {
+ if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
+ self.context.channel_state.is_local_shutdown_sent() ||
+ self.context.channel_state.is_remote_shutdown_sent()
+ {
+ return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
+ }
+ let channel_total_msat = self.context.channel_value_satoshis * 1000;
+ if amount_msat > channel_total_msat {
+ return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
}
- // Before we change the state of the channel, we check if the peer is sending a very old
- // commitment transaction number, if yes we send a warning message.
- let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
- if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
- return Err(
- ChannelError::Warn(format!("Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)", msg.next_remote_commitment_number, our_commitment_transaction))
- );
+ if amount_msat == 0 {
+ return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
}
- // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
- // remaining cases either succeed or ErrorMessage-fail).
- self.context.channel_state &= !(ChannelState::PeerDisconnected as u32);
- self.context.sent_message_awaiting_response = None;
+ let available_balances = self.context.get_available_balances(fee_estimator);
+ if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
+ return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
+ available_balances.next_outbound_htlc_minimum_msat)));
+ }
- let shutdown_msg = if self.context.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 {
- assert!(self.context.shutdown_scriptpubkey.is_some());
- Some(msgs::Shutdown {
- channel_id: self.context.channel_id,
- scriptpubkey: self.get_closing_scriptpubkey(),
- })
- } else { None };
+ if amount_msat > available_balances.next_outbound_htlc_limit_msat {
+ return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
+ available_balances.next_outbound_htlc_limit_msat)));
+ }
- let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger);
+ if self.context.channel_state.is_peer_disconnected() {
+ // Note that this should never really happen, if we're !is_live() on receipt of an
+ // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
+ // the user to send directly into a !is_live() channel. However, if we
+ // disconnected during the time the previous hop was doing the commitment dance we may
+ // end up getting here after the forwarding delay. In any case, returning an
+ // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
+ return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
+ }
- if self.context.channel_state & (ChannelState::FundingSent as u32) == ChannelState::FundingSent as u32 {
- // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
- if self.context.channel_state & (ChannelState::OurChannelReady as u32) == 0 ||
- self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
- if msg.next_remote_commitment_number != 0 {
- return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
- }
- // Short circuit the whole handler as there is nothing we can resend them
- return Ok(ReestablishResponses {
- channel_ready: None,
- raa: None, commitment_update: None,
- order: RAACommitmentOrder::CommitmentFirst,
- shutdown_msg, announcement_sigs,
- });
- }
+ let need_holding_cell = self.context.channel_state.should_force_holding_cell();
+ log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
+ payment_hash, amount_msat,
+ if force_holding_cell { "into holding cell" }
+ else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
+ else { "to peer" });
- // We have OurChannelReady set!
- let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
- return Ok(ReestablishResponses {
- channel_ready: Some(msgs::ChannelReady {
- channel_id: self.context.channel_id(),
- next_per_commitment_point,
- short_channel_id_alias: Some(self.context.outbound_scid_alias),
- }),
- raa: None, commitment_update: None,
- order: RAACommitmentOrder::CommitmentFirst,
- shutdown_msg, announcement_sigs,
+ if need_holding_cell {
+ force_holding_cell = true;
+ }
+
+ // Now update local state:
+ if force_holding_cell {
+ self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
+ amount_msat,
+ payment_hash,
+ cltv_expiry,
+ source,
+ onion_routing_packet,
+ skimmed_fee_msat,
+ blinding_point,
});
+ return Ok(None);
}
- let required_revoke = if msg.next_remote_commitment_number + 1 == INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number {
- // Remote isn't waiting on any RevokeAndACK from us!
- // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
- None
- } else if msg.next_remote_commitment_number + 1 == (INITIAL_COMMITMENT_NUMBER - 1) - self.context.cur_holder_commitment_transaction_number {
- if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
- self.context.monitor_pending_revoke_and_ack = true;
- None
- } else {
- Some(self.get_last_revoke_and_ack())
- }
- } else {
- return Err(ChannelError::Close("Peer attempted to reestablish channel with a very old local commitment transaction".to_owned()));
- };
+ self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
+ htlc_id: self.context.next_holder_htlc_id,
+ amount_msat,
+ payment_hash: payment_hash.clone(),
+ cltv_expiry,
+ state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
+ source,
+ blinding_point,
+ skimmed_fee_msat,
+ });
- // We increment cur_counterparty_commitment_transaction_number only upon receipt of
- // revoke_and_ack, not on sending commitment_signed, so we add one if have
- // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
- // the corresponding revoke_and_ack back yet.
- let is_awaiting_remote_revoke = self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 != 0;
- if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
- self.mark_awaiting_response();
- }
- let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
+ let res = msgs::UpdateAddHTLC {
+ channel_id: self.context.channel_id,
+ htlc_id: self.context.next_holder_htlc_id,
+ amount_msat,
+ payment_hash,
+ cltv_expiry,
+ onion_routing_packet,
+ skimmed_fee_msat,
+ blinding_point,
+ };
+ self.context.next_holder_htlc_id += 1;
- let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
- // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
- let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
- Some(msgs::ChannelReady {
- channel_id: self.context.channel_id(),
- next_per_commitment_point,
- short_channel_id_alias: Some(self.context.outbound_scid_alias),
- })
- } else { None };
+ Ok(Some(res))
+ }
- if msg.next_local_commitment_number == next_counterparty_commitment_number {
- if required_revoke.is_some() {
- log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", log_bytes!(self.context.channel_id()));
- } else {
- log_debug!(logger, "Reconnected channel {} with no loss", log_bytes!(self.context.channel_id()));
+ fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
+ log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
+ // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
+ // fail to generate this, we still are at least at a position where upgrading their status
+ // is acceptable.
+ for htlc in self.context.pending_inbound_htlcs.iter_mut() {
+ let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
+ Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
+ } else { None };
+ if let Some(state) = new_state {
+ log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
+ htlc.state = state;
}
-
- Ok(ReestablishResponses {
- channel_ready, shutdown_msg, announcement_sigs,
- raa: required_revoke,
- commitment_update: None,
- order: self.context.resend_order.clone(),
- })
- } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
- if required_revoke.is_some() {
- log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", log_bytes!(self.context.channel_id()));
- } else {
- log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", log_bytes!(self.context.channel_id()));
+ }
+ for htlc in self.context.pending_outbound_htlcs.iter_mut() {
+ if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
+ log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
+ // Grab the preimage, if it exists, instead of cloning
+ let mut reason = OutboundHTLCOutcome::Success(None);
+ mem::swap(outcome, &mut reason);
+ htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
}
-
- if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
- self.context.monitor_pending_commitment_signed = true;
- Ok(ReestablishResponses {
- channel_ready, shutdown_msg, announcement_sigs,
- commitment_update: None, raa: None,
- order: self.context.resend_order.clone(),
- })
- } else {
- Ok(ReestablishResponses {
- channel_ready, shutdown_msg, announcement_sigs,
- raa: required_revoke,
- commitment_update: Some(self.get_last_commitment_update(logger)),
- order: self.context.resend_order.clone(),
- })
+ }
+ if let Some((feerate, update_state)) = self.context.pending_update_fee {
+ if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
+ debug_assert!(!self.context.is_outbound());
+ log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
+ self.context.feerate_per_kw = feerate;
+ self.context.pending_update_fee = None;
}
- } else {
- Err(ChannelError::Close("Peer attempted to reestablish channel with a very old remote commitment transaction".to_owned()))
}
- }
-
- /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
- /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
- /// at which point they will be recalculated.
- fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
- -> (u64, u64)
- where F::Target: FeeEstimator
- {
- if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
-
- // Propose a range from our current Background feerate to our Normal feerate plus our
- // force_close_avoidance_max_fee_satoshis.
- // If we fail to come to consensus, we'll have to force-close.
- let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Background);
- let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
- let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
+ self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
- // The spec requires that (when the channel does not have anchors) we only send absolute
- // channel fees no greater than the absolute channel fee on the current commitment
- // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
- // very good reason to apply such a limit in any case. We don't bother doing so, risking
- // some force-closure by old nodes, but we wanted to close the channel anyway.
+ let (mut htlcs_ref, counterparty_commitment_tx) =
+ self.build_commitment_no_state_update(logger);
+ let counterparty_commitment_txid = counterparty_commitment_tx.trust().txid();
+ let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
+ htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
- if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
- let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
- proposed_feerate = cmp::max(proposed_feerate, min_feerate);
- proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
+ if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
+ self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
}
- // Note that technically we could end up with a lower minimum fee if one sides' balance is
- // below our dust limit, causing the output to disappear. We don't bother handling this
- // case, however, as this should only happen if a channel is closed before any (material)
- // payments have been made on it. This may cause slight fee overpayment and/or failure to
- // come to consensus with our counterparty on appropriate fees, however it should be a
- // relatively rare case. We can revisit this later, though note that in order to determine
- // if the funders' output is dust we have to know the absolute fee we're going to use.
- let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
- let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
- let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
- // We always add force_close_avoidance_max_fee_satoshis to our normal
- // feerate-calculated fee, but allow the max to be overridden if we're using a
- // target feerate-calculated fee.
- cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
- proposed_max_feerate as u64 * tx_weight / 1000)
- } else {
- self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
- };
-
- self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
- self.context.closing_fee_limits.clone().unwrap()
+ self.context.latest_monitor_update_id += 1;
+ let monitor_update = ChannelMonitorUpdate {
+ update_id: self.context.latest_monitor_update_id,
+ updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
+ commitment_txid: counterparty_commitment_txid,
+ htlc_outputs: htlcs.clone(),
+ commitment_number: self.context.cur_counterparty_commitment_transaction_number,
+ their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap(),
+ feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
+ to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
+ to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
+ }]
+ };
+ self.context.channel_state.set_awaiting_remote_revoke();
+ monitor_update
}
- /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
- /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
- /// this point if we're the funder we should send the initial closing_signed, and in any case
- /// shutdown should complete within a reasonable timeframe.
- fn closing_negotiation_ready(&self) -> bool {
- self.context.pending_inbound_htlcs.is_empty() && self.context.pending_outbound_htlcs.is_empty() &&
- self.context.channel_state &
- (BOTH_SIDES_SHUTDOWN_MASK | ChannelState::AwaitingRemoteRevoke as u32 |
- ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)
- == BOTH_SIDES_SHUTDOWN_MASK &&
- self.context.pending_update_fee.is_none()
- }
+ fn build_commitment_no_state_update<L: Deref>(&self, logger: &L)
+ -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction)
+ where L::Target: Logger
+ {
+ let counterparty_keys = self.context.build_remote_transaction_keys();
+ let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
+ let counterparty_commitment_tx = commitment_stats.tx;
- /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
- /// an Err if no progress is being made and the channel should be force-closed instead.
- /// Should be called on a one-minute timer.
- pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
- if self.closing_negotiation_ready() {
- if self.context.closing_signed_in_flight {
- return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
- } else {
- self.context.closing_signed_in_flight = true;
+ #[cfg(any(test, fuzzing))]
+ {
+ if !self.context.is_outbound() {
+ let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
+ *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
+ if let Some(info) = projected_commit_tx_info {
+ let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
+ if info.total_pending_htlcs == total_pending_htlcs
+ && info.next_holder_htlc_id == self.context.next_holder_htlc_id
+ && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
+ && info.feerate == self.context.feerate_per_kw {
+ let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.get_channel_type());
+ assert_eq!(actual_fee, info.fee);
+ }
+ }
}
}
- Ok(())
+
+ (commitment_stats.htlcs_included, counterparty_commitment_tx)
}
- pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
- &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
- -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>), ChannelError>
- where F::Target: FeeEstimator, L::Target: Logger
- {
- if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
- return Ok((None, None));
- }
+ /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
+ /// generation when we shouldn't change HTLC/channel state.
+ fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
+ // Get the fee tests from `build_commitment_no_state_update`
+ #[cfg(any(test, fuzzing))]
+ self.build_commitment_no_state_update(logger);
- if !self.context.is_outbound() {
- if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
- return self.closing_signed(fee_estimator, &msg);
- }
- return Ok((None, None));
- }
+ let counterparty_keys = self.context.build_remote_transaction_keys();
+ let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
+ let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
- let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
+ match &self.context.holder_signer {
+ ChannelSignerType::Ecdsa(ecdsa) => {
+ let (signature, htlc_signatures);
- assert!(self.context.shutdown_scriptpubkey.is_some());
- let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
- log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
- our_min_fee, our_max_fee, total_fee_satoshis);
+ {
+ let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
+ for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
+ htlcs.push(htlc);
+ }
- let sig = self.context.holder_signer
- .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
- .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
+ let res = ecdsa.sign_counterparty_commitment(
+ &commitment_stats.tx,
+ commitment_stats.inbound_htlc_preimages,
+ commitment_stats.outbound_htlc_preimages,
+ &self.context.secp_ctx,
+ ).map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
+ signature = res.0;
+ htlc_signatures = res.1;
+
+ log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
+ encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
+ &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
+ log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id());
+
+ for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
+ log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
+ encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
+ encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
+ log_bytes!(counterparty_keys.broadcaster_htlc_key.to_public_key().serialize()),
+ log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id());
+ }
+ }
- self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
- Ok((Some(msgs::ClosingSigned {
- channel_id: self.context.channel_id,
- fee_satoshis: total_fee_satoshis,
- signature: sig,
- fee_range: Some(msgs::ClosingSignedFeeRange {
- min_fee_satoshis: our_min_fee,
- max_fee_satoshis: our_max_fee,
- }),
- }), None))
+ Ok((msgs::CommitmentSigned {
+ channel_id: self.context.channel_id,
+ signature,
+ htlc_signatures,
+ #[cfg(taproot)]
+ partial_signature_with_nonce: None,
+ }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
+ },
+ // TODO (taproot|arik)
+ #[cfg(taproot)]
+ _ => todo!()
+ }
}
- // Marks a channel as waiting for a response from the counterparty. If it's not received
- // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
- // a reconnection.
- fn mark_awaiting_response(&mut self) {
- self.context.sent_message_awaiting_response = Some(0);
+ /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
+ /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
+ ///
+ /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
+ /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
+ pub fn send_htlc_and_commit<F: Deref, L: Deref>(
+ &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
+ source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
+ fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+ ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
+ where F::Target: FeeEstimator, L::Target: Logger
+ {
+ let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
+ onion_routing_packet, false, skimmed_fee_msat, None, fee_estimator, logger);
+ if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
+ match send_res? {
+ Some(_) => {
+ let monitor_update = self.build_commitment_no_status_check(logger);
+ self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
+ Ok(self.push_ret_blockable_mon_update(monitor_update))
+ },
+ None => Ok(None)
+ }
}
- /// Determines whether we should disconnect the counterparty due to not receiving a response
- /// within our expected timeframe.
- ///
- /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
- pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
- let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
- ticks_elapsed
- } else {
- // Don't disconnect when we're not waiting on a response.
- return false;
- };
- *ticks_elapsed += 1;
- *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
+ /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually
+ /// happened.
+ pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<bool, ChannelError> {
+ let new_forwarding_info = Some(CounterpartyForwardingInfo {
+ fee_base_msat: msg.contents.fee_base_msat,
+ fee_proportional_millionths: msg.contents.fee_proportional_millionths,
+ cltv_expiry_delta: msg.contents.cltv_expiry_delta
+ });
+ let did_change = self.context.counterparty_forwarding_info != new_forwarding_info;
+ if did_change {
+ self.context.counterparty_forwarding_info = new_forwarding_info;
+ }
+
+ Ok(did_change)
}
- pub fn shutdown<SP: Deref>(
- &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
- ) -> Result<(Option<msgs::Shutdown>, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
- where SP::Target: SignerProvider
+ /// Begins the shutdown process, getting a message for the remote peer and returning all
+ /// holding cell HTLCs for payment failure.
+ pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
+ target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
+ -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
{
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
- return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
+ for htlc in self.context.pending_outbound_htlcs.iter() {
+ if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
+ return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
+ }
}
- if self.context.channel_state < ChannelState::FundingSent as u32 {
- // Spec says we should fail the connection, not the channel, but that's nonsense, there
- // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
- // can do that via error message without getting a connection fail anyway...
- return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
+ if self.context.channel_state.is_local_shutdown_sent() {
+ return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
}
- for htlc in self.context.pending_inbound_htlcs.iter() {
- if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
- return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
- }
+ else if self.context.channel_state.is_remote_shutdown_sent() {
+ return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
}
- assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
-
- if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
- return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_bytes().to_hex())));
+ if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
+ return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
}
-
- if self.context.counterparty_shutdown_scriptpubkey.is_some() {
- if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
- return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_bytes().to_hex())));
- }
- } else {
- self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
+ assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
+ if self.context.channel_state.is_peer_disconnected() || self.context.channel_state.is_monitor_update_in_progress() {
+ return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
}
- // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
- // immediately after the commitment dance, but we can send a Shutdown because we won't send
- // any further commitment updates after we set LocalShutdownSent.
- let send_shutdown = (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != ChannelState::LocalShutdownSent as u32;
-
let update_shutdown_script = match self.context.shutdown_scriptpubkey {
Some(_) => false,
None => {
- assert!(send_shutdown);
- let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
- Ok(scriptpubkey) => scriptpubkey,
- Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
+ // use override shutdown script if provided
+ let shutdown_scriptpubkey = match override_shutdown_script {
+ Some(script) => script,
+ None => {
+ // otherwise, use the shutdown scriptpubkey provided by the signer
+ match signer_provider.get_shutdown_scriptpubkey() {
+ Ok(scriptpubkey) => scriptpubkey,
+ Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
+ }
+ },
};
if !shutdown_scriptpubkey.is_compatible(their_features) {
- return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
+ return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
}
self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
true
};
// From here on out, we may not fail!
-
- self.context.channel_state |= ChannelState::RemoteShutdownSent as u32;
+ self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
+ self.context.channel_state.set_local_shutdown_sent();
self.context.update_time_counter += 1;
let monitor_update = if update_shutdown_script {
}],
};
self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
- if self.push_blockable_mon_update(monitor_update) {
- self.context.pending_monitor_updates.last().map(|upd| &upd.update)
- } else { None }
- } else { None };
- let shutdown = if send_shutdown {
- Some(msgs::Shutdown {
- channel_id: self.context.channel_id,
- scriptpubkey: self.get_closing_scriptpubkey(),
- })
+ self.push_ret_blockable_mon_update(monitor_update)
} else { None };
+ let shutdown = msgs::Shutdown {
+ channel_id: self.context.channel_id,
+ scriptpubkey: self.get_closing_scriptpubkey(),
+ };
- // We can't send our shutdown until we've committed all of our pending HTLCs, but the
- // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
- // cell HTLCs and return them to fail the payment.
+ // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
+ // our shutdown until we've committed all of the pending changes.
self.context.holding_cell_update_fee = None;
let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
self.context.holding_cell_htlc_updates.retain(|htlc_update| {
}
});
- self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
- self.context.update_time_counter += 1;
+ debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
+ "we can't both complete shutdown and return a monitor update");
Ok((shutdown, monitor_update, dropped_outbound_htlcs))
}
- fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
- let mut tx = closing_tx.trust().built_transaction().clone();
-
- tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
-
- let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
- let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
- let mut holder_sig = sig.serialize_der().to_vec();
- holder_sig.push(EcdsaSighashType::All as u8);
- let mut cp_sig = counterparty_sig.serialize_der().to_vec();
- cp_sig.push(EcdsaSighashType::All as u8);
- if funding_key[..] < counterparty_funding_key[..] {
- tx.input[0].witness.push(holder_sig);
- tx.input[0].witness.push(cp_sig);
- } else {
- tx.input[0].witness.push(cp_sig);
- tx.input[0].witness.push(holder_sig);
- }
-
- tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
- tx
+ pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
+ self.context.holding_cell_htlc_updates.iter()
+ .flat_map(|htlc_update| {
+ match htlc_update {
+ HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
+ => Some((source, payment_hash)),
+ _ => None,
+ }
+ })
+ .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
}
+}
- pub fn closing_signed<F: Deref>(
- &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
- -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>), ChannelError>
- where F::Target: FeeEstimator
+/// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
+pub(super) struct OutboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
+ pub context: ChannelContext<SP>,
+ pub unfunded_context: UnfundedChannelContext,
+}
+
+impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
+ pub fn new<ES: Deref, F: Deref>(
+ fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
+ channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
+ outbound_scid_alias: u64, temporary_channel_id: Option<ChannelId>
+ ) -> Result<OutboundV1Channel<SP>, APIError>
+ where ES::Target: EntropySource,
+ F::Target: FeeEstimator
{
- if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != BOTH_SIDES_SHUTDOWN_MASK {
- return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
- }
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
- return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
+ let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
+ let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
+ let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
+ let pubkeys = holder_signer.pubkeys().clone();
+
+ if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
+ return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
}
- if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
- return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
+ if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
+ return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
}
- if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
- return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
+ let channel_value_msat = channel_value_satoshis * 1000;
+ if push_msat > channel_value_msat {
+ return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
}
-
- if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
- return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
+ if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
+ return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
}
-
- if self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32 != 0 {
- self.context.pending_counterparty_closing_signed = Some(msg.clone());
- return Ok((None, None));
+ let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
+ if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
+ // Protocol level safety check in place, although it should never happen because
+ // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
+ return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
}
- let funding_redeemscript = self.context.get_funding_redeemscript();
- let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
- if used_total_fee != msg.fee_satoshis {
- return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
- }
- let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
+ let channel_type = Self::get_initial_channel_type(&config, their_features);
+ debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
- match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
- Ok(_) => {},
- Err(_e) => {
- // The remote end may have decided to revoke their output due to inconsistent dust
- // limits, so check for that case by re-checking the signature here.
- closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
- let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
- secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
- },
+ let (commitment_conf_target, anchor_outputs_value_msat) = if channel_type.supports_anchors_zero_fee_htlc_tx() {
+ (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000)
+ } else {
+ (ConfirmationTarget::NonAnchorChannelFee, 0)
};
+ let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
- for outp in closing_tx.trust().built_transaction().output.iter() {
- if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
- return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
+ let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
+ let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
+ if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee {
+ return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
+ }
+
+ let mut secp_ctx = Secp256k1::new();
+ secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
+
+ let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
+ match signer_provider.get_shutdown_scriptpubkey() {
+ Ok(scriptpubkey) => Some(scriptpubkey),
+ Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
}
- }
+ } else { None };
- assert!(self.context.shutdown_scriptpubkey.is_some());
- if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
- if last_fee == msg.fee_satoshis {
- let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
- self.context.channel_state = ChannelState::ShutdownComplete as u32;
- self.context.update_time_counter += 1;
- return Ok((None, Some(tx)));
+ if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
+ if !shutdown_scriptpubkey.is_compatible(&their_features) {
+ return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
}
}
- let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
+ let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
+ Ok(script) => script,
+ Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
+ };
- macro_rules! propose_fee {
- ($new_fee: expr) => {
- let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
- (closing_tx, $new_fee)
- } else {
- self.build_closing_transaction($new_fee, false)
- };
+ let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source));
- let sig = self.context.holder_signer
- .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
- .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
+ Ok(Self {
+ context: ChannelContext {
+ user_id,
- let signed_tx = if $new_fee == msg.fee_satoshis {
- self.context.channel_state = ChannelState::ShutdownComplete as u32;
- self.context.update_time_counter += 1;
- let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
- Some(tx)
- } else { None };
+ config: LegacyChannelConfig {
+ options: config.channel_config.clone(),
+ announced_channel: config.channel_handshake_config.announced_channel,
+ commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
+ },
- self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
- return Ok((Some(msgs::ClosingSigned {
- channel_id: self.context.channel_id,
- fee_satoshis: used_fee,
- signature: sig,
- fee_range: Some(msgs::ClosingSignedFeeRange {
- min_fee_satoshis: our_min_fee,
- max_fee_satoshis: our_max_fee,
- }),
- }), signed_tx))
- }
- }
+ prev_config: None,
- if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
- if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
- return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
- }
- if max_fee_satoshis < our_min_fee {
- return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
- }
- if min_fee_satoshis > our_max_fee {
- return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
- }
+ inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
- if !self.context.is_outbound() {
- // They have to pay, so pick the highest fee in the overlapping range.
- // We should never set an upper bound aside from their full balance
- debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
- propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
- } else {
- if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
- return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
- msg.fee_satoshis, our_min_fee, our_max_fee)));
- }
- // The proposed fee is in our acceptable range, accept it and broadcast!
- propose_fee!(msg.fee_satoshis);
- }
- } else {
- // Old fee style negotiation. We don't bother to enforce whether they are complying
- // with the "making progress" requirements, we just comply and hope for the best.
- if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
- if msg.fee_satoshis > last_fee {
- if msg.fee_satoshis < our_max_fee {
- propose_fee!(msg.fee_satoshis);
- } else if last_fee < our_max_fee {
- propose_fee!(our_max_fee);
- } else {
- return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
- }
- } else {
- if msg.fee_satoshis > our_min_fee {
- propose_fee!(msg.fee_satoshis);
- } else if last_fee > our_min_fee {
- propose_fee!(our_min_fee);
- } else {
- return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
- }
- }
- } else {
- if msg.fee_satoshis < our_min_fee {
- propose_fee!(our_min_fee);
- } else if msg.fee_satoshis > our_max_fee {
- propose_fee!(our_max_fee);
- } else {
- propose_fee!(msg.fee_satoshis);
- }
- }
- }
- }
+ channel_id: temporary_channel_id,
+ temporary_channel_id: Some(temporary_channel_id),
+ channel_state: ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT),
+ announcement_sigs_state: AnnouncementSigsState::NotSent,
+ secp_ctx,
+ channel_value_satoshis,
- fn internal_htlc_satisfies_config(
- &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
- ) -> Result<(), (&'static str, u16)> {
- let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
- .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
- if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
- (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
- return Err((
- "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
- 0x1000 | 12, // fee_insufficient
- ));
- }
- if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
- return Err((
- "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
- 0x1000 | 13, // incorrect_cltv_expiry
- ));
- }
- Ok(())
- }
+ latest_monitor_update_id: 0,
- /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
- /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
- /// unsuccessful, falls back to the previous one if one exists.
- pub fn htlc_satisfies_config(
- &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
- ) -> Result<(), (&'static str, u16)> {
- self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
- .or_else(|err| {
- if let Some(prev_config) = self.context.prev_config() {
- self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
- } else {
- Err(err)
- }
- })
- }
+ holder_signer: ChannelSignerType::Ecdsa(holder_signer),
+ shutdown_scriptpubkey,
+ destination_script,
- pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
- self.context.cur_holder_commitment_transaction_number + 1
- }
+ cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
+ cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
+ value_to_self_msat,
- pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
- self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32) != 0 { 1 } else { 0 }
- }
+ pending_inbound_htlcs: Vec::new(),
+ pending_outbound_htlcs: Vec::new(),
+ holding_cell_htlc_updates: Vec::new(),
+ pending_update_fee: None,
+ holding_cell_update_fee: None,
+ next_holder_htlc_id: 0,
+ next_counterparty_htlc_id: 0,
+ update_time_counter: 1,
- pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
- self.context.cur_counterparty_commitment_transaction_number + 2
- }
+ resend_order: RAACommitmentOrder::CommitmentFirst,
- #[cfg(test)]
- pub fn get_signer(&self) -> &Signer {
- &self.context.holder_signer
- }
+ monitor_pending_channel_ready: false,
+ monitor_pending_revoke_and_ack: false,
+ monitor_pending_commitment_signed: false,
+ monitor_pending_forwards: Vec::new(),
+ monitor_pending_failures: Vec::new(),
+ monitor_pending_finalized_fulfills: Vec::new(),
- #[cfg(test)]
- pub fn get_value_stat(&self) -> ChannelValueStat {
- ChannelValueStat {
- value_to_self_msat: self.context.value_to_self_msat,
- channel_value_msat: self.context.channel_value_satoshis * 1000,
- channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
- pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
- pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
- holding_cell_outbound_amount_msat: {
- let mut res = 0;
- for h in self.context.holding_cell_htlc_updates.iter() {
- match h {
- &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
- res += amount_msat;
- }
- _ => {}
- }
- }
- res
- },
- counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
- counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
- }
- }
+ signer_pending_commitment_update: false,
+ signer_pending_funding: false,
- /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
- /// Allowed in any state (including after shutdown)
- pub fn is_awaiting_monitor_update(&self) -> bool {
- (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0
- }
+ #[cfg(debug_assertions)]
+ holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
+ #[cfg(debug_assertions)]
+ counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
- pub fn get_latest_complete_monitor_update_id(&self) -> u64 {
- if self.context.pending_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
- self.context.pending_monitor_updates[0].update.update_id - 1
- }
+ last_sent_closing_fee: None,
+ pending_counterparty_closing_signed: None,
+ expecting_peer_commitment_signed: false,
+ closing_fee_limits: None,
+ target_closing_feerate_sats_per_kw: None,
- /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
- /// further blocked monitor update exists after the next.
- pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(&ChannelMonitorUpdate, bool)> {
- for i in 0..self.context.pending_monitor_updates.len() {
- if self.context.pending_monitor_updates[i].blocked {
- self.context.pending_monitor_updates[i].blocked = false;
- return Some((&self.context.pending_monitor_updates[i].update,
- self.context.pending_monitor_updates.len() > i + 1));
- }
- }
- None
- }
+ funding_tx_confirmed_in: None,
+ funding_tx_confirmation_height: 0,
+ short_channel_id: None,
+ channel_creation_height: current_chain_height,
- /// Pushes a new monitor update into our monitor update queue, returning whether it should be
- /// immediately given to the user for persisting or if it should be held as blocked.
- fn push_blockable_mon_update(&mut self, update: ChannelMonitorUpdate) -> bool {
- let release_monitor = self.context.pending_monitor_updates.iter().all(|upd| !upd.blocked);
- self.context.pending_monitor_updates.push(PendingChannelMonitorUpdate {
- update, blocked: !release_monitor
- });
- release_monitor
- }
+ feerate_per_kw: commitment_feerate,
+ counterparty_dust_limit_satoshis: 0,
+ holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
+ counterparty_max_htlc_value_in_flight_msat: 0,
+ holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
+ counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
+ holder_selected_channel_reserve_satoshis,
+ counterparty_htlc_minimum_msat: 0,
+ holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
+ counterparty_max_accepted_htlcs: 0,
+ holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
+ minimum_depth: None, // Filled in in accept_channel
- /// Pushes a new monitor update into our monitor update queue, returning a reference to it if
- /// it should be immediately given to the user for persisting or `None` if it should be held as
- /// blocked.
- fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
- -> Option<&ChannelMonitorUpdate> {
- let release_monitor = self.push_blockable_mon_update(update);
- if release_monitor { self.context.pending_monitor_updates.last().map(|upd| &upd.update) } else { None }
- }
+ counterparty_forwarding_info: None,
+
+ channel_transaction_parameters: ChannelTransactionParameters {
+ holder_pubkeys: pubkeys,
+ holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
+ is_outbound_from_holder: true,
+ counterparty_parameters: None,
+ funding_outpoint: None,
+ channel_type_features: channel_type.clone()
+ },
+ funding_transaction: None,
+ is_batch_funding: None,
+
+ counterparty_cur_commitment_point: None,
+ counterparty_prev_commitment_point: None,
+ counterparty_node_id,
+
+ counterparty_shutdown_scriptpubkey: None,
+
+ commitment_secrets: CounterpartyCommitmentSecrets::new(),
+
+ channel_update_status: ChannelUpdateStatus::Enabled,
+ closing_signed_in_flight: false,
- pub fn no_monitor_updates_pending(&self) -> bool {
- self.context.pending_monitor_updates.is_empty()
- }
+ announcement_sigs: None,
- pub fn complete_all_mon_updates_through(&mut self, update_id: u64) {
- self.context.pending_monitor_updates.retain(|upd| {
- if upd.update.update_id <= update_id {
- assert!(!upd.blocked, "Completed update must have flown");
- false
- } else { true }
- });
- }
+ #[cfg(any(test, fuzzing))]
+ next_local_commitment_tx_fee_info_cached: Mutex::new(None),
+ #[cfg(any(test, fuzzing))]
+ next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
- pub fn complete_one_mon_update(&mut self, update_id: u64) {
- self.context.pending_monitor_updates.retain(|upd| upd.update.update_id != update_id);
- }
+ workaround_lnd_bug_4006: None,
+ sent_message_awaiting_response: None,
- /// Returns an iterator over all unblocked monitor updates which have not yet completed.
- pub fn uncompleted_unblocked_mon_updates(&self) -> impl Iterator<Item=&ChannelMonitorUpdate> {
- self.context.pending_monitor_updates.iter()
- .filter_map(|upd| if upd.blocked { None } else { Some(&upd.update) })
- }
+ latest_inbound_scid_alias: None,
+ outbound_scid_alias,
- /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
- /// If the channel is outbound, this implies we have not yet broadcasted the funding
- /// transaction. If the channel is inbound, this implies simply that the channel has not
- /// advanced state.
- pub fn is_awaiting_initial_mon_persist(&self) -> bool {
- if !self.is_awaiting_monitor_update() { return false; }
- if self.context.channel_state &
- !(ChannelState::TheirChannelReady as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)
- == ChannelState::FundingSent as u32 {
- // If we're not a 0conf channel, we'll be waiting on a monitor update with only
- // FundingSent set, though our peer could have sent their channel_ready.
- debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
- return true;
- }
- if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
- self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
- // If we're a 0-conf channel, we'll move beyond FundingSent immediately even while
- // waiting for the initial monitor persistence. Thus, we check if our commitment
- // transaction numbers have both been iterated only exactly once (for the
- // funding_signed), and we're awaiting monitor update.
- //
- // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
- // only way to get an awaiting-monitor-update state during initial funding is if the
- // initial monitor persistence is still pending).
- //
- // Because deciding we're awaiting initial broadcast spuriously could result in
- // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
- // we hard-assert here, even in production builds.
- if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
- assert!(self.context.monitor_pending_channel_ready);
- assert_eq!(self.context.latest_monitor_update_id, 0);
- return true;
- }
- false
- }
+ channel_pending_event_emitted: false,
+ channel_ready_event_emitted: false,
- /// Returns true if our channel_ready has been sent
- pub fn is_our_channel_ready(&self) -> bool {
- (self.context.channel_state & ChannelState::OurChannelReady as u32) != 0 || self.context.channel_state >= ChannelState::ChannelReady as u32
- }
+ #[cfg(any(test, fuzzing))]
+ historical_inbound_htlc_fulfills: HashSet::new(),
- /// Returns true if our peer has either initiated or agreed to shut down the channel.
- pub fn received_shutdown(&self) -> bool {
- (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) != 0
- }
+ channel_type,
+ channel_keys_id,
- /// Returns true if we either initiated or agreed to shut down the channel.
- pub fn sent_shutdown(&self) -> bool {
- (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != 0
+ blocked_monitor_updates: Vec::new(),
+ },
+ unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
+ })
}
- /// Returns true if this channel is fully shut down. True here implies that no further actions
- /// may/will be taken on this channel, and thus this object should be freed. Any future changes
- /// will be handled appropriately by the chain monitor.
- pub fn is_shutdown(&self) -> bool {
- if (self.context.channel_state & ChannelState::ShutdownComplete as u32) == ChannelState::ShutdownComplete as u32 {
- assert!(self.context.channel_state == ChannelState::ShutdownComplete as u32);
- true
- } else { false }
- }
+ /// Only allowed after [`ChannelContext::channel_transaction_parameters`] is set.
+ fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
+ let counterparty_keys = self.context.build_remote_transaction_keys();
+ let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
+ let signature = match &self.context.holder_signer {
+ // TODO (taproot|arik): move match into calling method for Taproot
+ ChannelSignerType::Ecdsa(ecdsa) => {
+ ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.context.secp_ctx)
+ .map(|(sig, _)| sig).ok()?
+ },
+ // TODO (taproot|arik)
+ #[cfg(taproot)]
+ _ => todo!()
+ };
- pub fn channel_update_status(&self) -> ChannelUpdateStatus {
- self.context.channel_update_status
- }
+ if self.context.signer_pending_funding {
+ log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
+ self.context.signer_pending_funding = false;
+ }
- pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
- self.context.update_time_counter += 1;
- self.context.channel_update_status = status;
+ Some(msgs::FundingCreated {
+ temporary_channel_id: self.context.temporary_channel_id.unwrap(),
+ funding_txid: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid,
+ funding_output_index: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index,
+ signature,
+ #[cfg(taproot)]
+ partial_signature_with_nonce: None,
+ #[cfg(taproot)]
+ next_local_nonce: None,
+ })
}
- fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
- // Called:
- // * always when a new block/transactions are confirmed with the new height
- // * when funding is signed with a height of 0
- if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
- return None;
+ /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
+ /// a funding_created message for the remote peer.
+ /// Panics if called at some time other than immediately after initial handshake, if called twice,
+ /// or if called on an inbound channel.
+ /// Note that channel_id changes during this call!
+ /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
+ /// If an Err is returned, it is a ChannelError::Close.
+ pub fn get_funding_created<L: Deref>(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
+ -> Result<Option<msgs::FundingCreated>, (Self, ChannelError)> where L::Target: Logger {
+ if !self.context.is_outbound() {
+ panic!("Tried to create outbound funding_created message on an inbound channel!");
}
-
- let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
- if funding_tx_confirmations <= 0 {
- self.context.funding_tx_confirmation_height = 0;
+ if !matches!(
+ self.context.channel_state, ChannelState::NegotiatingFunding(flags)
+ if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
+ ) {
+ panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
}
-
- if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
- return None;
+ if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
+ self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
+ self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
+ panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
}
- let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
- let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 {
- self.context.channel_state |= ChannelState::OurChannelReady as u32;
- true
- } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) {
- self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
- self.context.update_time_counter += 1;
- true
- } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
- // We got a reorg but not enough to trigger a force close, just ignore.
- false
- } else {
- if self.context.funding_tx_confirmation_height != 0 && self.context.channel_state < ChannelState::ChannelReady as u32 {
- // We should never see a funding transaction on-chain until we've received
- // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
- // an inbound channel - before that we have no known funding TXID). The fuzzer,
- // however, may do this and we shouldn't treat it as a bug.
- #[cfg(not(fuzzing))]
- panic!("Started confirming a channel in a state pre-FundingSent: {}.\n\
- Do NOT broadcast a funding transaction manually - let LDK do it for you!",
- self.context.channel_state);
- }
- // We got a reorg but not enough to trigger a force close, just ignore.
- false
- };
+ self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
+ self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
- if need_commitment_update {
- if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) == 0 {
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
- let next_per_commitment_point =
- self.context.holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
- return Some(msgs::ChannelReady {
- channel_id: self.context.channel_id,
- next_per_commitment_point,
- short_channel_id_alias: Some(self.context.outbound_scid_alias),
- });
- }
- } else {
- self.context.monitor_pending_channel_ready = true;
- }
- }
- None
- }
+ // Now that we're past error-generating stuff, update our local state:
- /// When a transaction is confirmed, we check whether it is or spends the funding transaction
- /// In the first case, we store the confirmation height and calculating the short channel id.
- /// In the second, we simply return an Err indicating we need to be force-closed now.
- pub fn transactions_confirmed<NS: Deref, L: Deref>(
- &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
- genesis_block_hash: BlockHash, node_signer: &NS, user_config: &UserConfig, logger: &L
- ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
- where
- NS::Target: NodeSigner,
- L::Target: Logger
- {
- if let Some(funding_txo) = self.context.get_funding_txo() {
- for &(index_in_block, tx) in txdata.iter() {
- // Check if the transaction is the expected funding transaction, and if it is,
- // check that it pays the right amount to the right script.
- if self.context.funding_tx_confirmation_height == 0 {
- if tx.txid() == funding_txo.txid {
- let txo_idx = funding_txo.index as usize;
- if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
- tx.output[txo_idx].value != self.context.channel_value_satoshis {
- if self.context.is_outbound() {
- // If we generated the funding transaction and it doesn't match what it
- // should, the client is really broken and we should just panic and
- // tell them off. That said, because hash collisions happen with high
- // probability in fuzzing mode, if we're fuzzing we just close the
- // channel and move on.
- #[cfg(not(fuzzing))]
- panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
- }
- self.context.update_time_counter += 1;
- let err_reason = "funding tx had wrong script/value or output index";
- return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
- } else {
- if self.context.is_outbound() {
- for input in tx.input.iter() {
- if input.witness.is_empty() {
- // We generated a malleable funding transaction, implying we've
- // just exposed ourselves to funds loss to our counterparty.
- #[cfg(not(fuzzing))]
- panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
- }
- }
- }
- self.context.funding_tx_confirmation_height = height;
- self.context.funding_tx_confirmed_in = Some(*block_hash);
- self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
- Ok(scid) => Some(scid),
- Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
- }
- }
- }
- // If we allow 1-conf funding, we may need to check for channel_ready here and
- // send it immediately instead of waiting for a best_block_updated call (which
- // may have already happened for this block).
- if let Some(channel_ready) = self.check_get_channel_ready(height) {
- log_info!(logger, "Sending a channel_ready to our peer for channel {}", log_bytes!(self.context.channel_id));
- let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, height, logger);
- return Ok((Some(channel_ready), announcement_sigs));
- }
- }
- for inp in tx.input.iter() {
- if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
- log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, log_bytes!(self.context.channel_id()));
- return Err(ClosureReason::CommitmentTxConfirmed);
- }
- }
- }
+ self.context.channel_state = ChannelState::FundingNegotiated;
+ self.context.channel_id = funding_txo.to_channel_id();
+
+ // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
+ // We can skip this if it is a zero-conf channel.
+ if funding_transaction.is_coin_base() &&
+ self.context.minimum_depth.unwrap_or(0) > 0 &&
+ self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
+ self.context.minimum_depth = Some(COINBASE_MATURITY);
}
- Ok((None, None))
- }
- /// When a new block is connected, we check the height of the block against outbound holding
- /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
- /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
- /// handled by the ChannelMonitor.
- ///
- /// If we return Err, the channel may have been closed, at which point the standard
- /// requirements apply - no calls may be made except those explicitly stated to be allowed
- /// post-shutdown.
- ///
- /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
- /// back.
- pub fn best_block_updated<NS: Deref, L: Deref>(
- &mut self, height: u32, highest_header_time: u32, genesis_block_hash: BlockHash,
- node_signer: &NS, user_config: &UserConfig, logger: &L
- ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
- where
- NS::Target: NodeSigner,
- L::Target: Logger
- {
- self.do_best_block_updated(height, highest_header_time, Some((genesis_block_hash, node_signer, user_config)), logger)
- }
+ self.context.funding_transaction = Some(funding_transaction);
+ self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
- fn do_best_block_updated<NS: Deref, L: Deref>(
- &mut self, height: u32, highest_header_time: u32,
- genesis_node_signer: Option<(BlockHash, &NS, &UserConfig)>, logger: &L
- ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
- where
- NS::Target: NodeSigner,
- L::Target: Logger
- {
- let mut timed_out_htlcs = Vec::new();
- // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
- // forward an HTLC when our counterparty should almost certainly just fail it for expiring
- // ~now.
- let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
- self.context.holding_cell_htlc_updates.retain(|htlc_update| {
- match htlc_update {
- &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
- if *cltv_expiry <= unforwarded_htlc_cltv_limit {
- timed_out_htlcs.push((source.clone(), payment_hash.clone()));
- false
- } else { true }
- },
- _ => true
+ let funding_created = self.get_funding_created_msg(logger);
+ if funding_created.is_none() {
+ if !self.context.signer_pending_funding {
+ log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
+ self.context.signer_pending_funding = true;
}
- });
+ }
- self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
+ Ok(funding_created)
+ }
- if let Some(channel_ready) = self.check_get_channel_ready(height) {
- let announcement_sigs = if let Some((genesis_block_hash, node_signer, user_config)) = genesis_node_signer {
- self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, height, logger)
- } else { None };
- log_info!(logger, "Sending a channel_ready to our peer for channel {}", log_bytes!(self.context.channel_id));
- return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
+ fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
+ // The default channel type (ie the first one we try) depends on whether the channel is
+ // public - if it is, we just go with `only_static_remotekey` as it's the only option
+ // available. If it's private, we first try `scid_privacy` as it provides better privacy
+ // with no other changes, and fall back to `only_static_remotekey`.
+ let mut ret = ChannelTypeFeatures::only_static_remote_key();
+ if !config.channel_handshake_config.announced_channel &&
+ config.channel_handshake_config.negotiate_scid_privacy &&
+ their_features.supports_scid_privacy() {
+ ret.set_scid_privacy_required();
}
- let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
- if non_shutdown_state >= ChannelState::ChannelReady as u32 ||
- (non_shutdown_state & ChannelState::OurChannelReady as u32) == ChannelState::OurChannelReady as u32 {
- let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
- if self.context.funding_tx_confirmation_height == 0 {
- // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
- // zero if it has been reorged out, however in either case, our state flags
- // indicate we've already sent a channel_ready
- funding_tx_confirmations = 0;
- }
-
- // If we've sent channel_ready (or have both sent and received channel_ready), and
- // the funding transaction has become unconfirmed,
- // close the channel and hope we can get the latest state on chain (because presumably
- // the funding transaction is at least still in the mempool of most nodes).
- //
- // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
- // 0-conf channel, but not doing so may lead to the
- // `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have
- // to.
- if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() {
- let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
- self.context.minimum_depth.unwrap(), funding_tx_confirmations);
- return Err(ClosureReason::ProcessingError { err: err_reason });
- }
- } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
- height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
- log_info!(logger, "Closing channel {} due to funding timeout", log_bytes!(self.context.channel_id));
- // If funding_tx_confirmed_in is unset, the channel must not be active
- assert!(non_shutdown_state <= ChannelState::ChannelReady as u32);
- assert_eq!(non_shutdown_state & ChannelState::OurChannelReady as u32, 0);
- return Err(ClosureReason::FundingTimedOut);
+ // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
+ // set it now. If they don't understand it, we'll fall back to our default of
+ // `only_static_remotekey`.
+ if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
+ their_features.supports_anchors_zero_fee_htlc_tx() {
+ ret.set_anchors_zero_fee_htlc_tx_required();
}
- let announcement_sigs = if let Some((genesis_block_hash, node_signer, user_config)) = genesis_node_signer {
- self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, height, logger)
- } else { None };
- Ok((None, timed_out_htlcs, announcement_sigs))
+ ret
}
- /// Indicates the funding transaction is no longer confirmed in the main chain. This may
- /// force-close the channel, but may also indicate a harmless reorganization of a block or two
- /// before the channel has reached channel_ready and we can just wait for more blocks.
- pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
- if self.context.funding_tx_confirmation_height != 0 {
- // We handle the funding disconnection by calling best_block_updated with a height one
- // below where our funding was connected, implying a reorg back to conf_height - 1.
- let reorg_height = self.context.funding_tx_confirmation_height - 1;
- // We use the time field to bump the current time we set on channel updates if its
- // larger. If we don't know that time has moved forward, we can just set it to the last
- // time we saw and it will be ignored.
- let best_time = self.context.update_time_counter;
- match self.do_best_block_updated(reorg_height, best_time, None::<(BlockHash, &&NodeSigner, &UserConfig)>, logger) {
- Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
- assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
- assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
- assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
- Ok(())
- },
- Err(e) => Err(e)
- }
+ /// If we receive an error message, it may only be a rejection of the channel type we tried,
+ /// not of our ability to open any channel at all. Thus, on error, we should first call this
+ /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
+ pub(crate) fn maybe_handle_error_without_close<F: Deref>(
+ &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
+ ) -> Result<msgs::OpenChannel, ()>
+ where
+ F::Target: FeeEstimator
+ {
+ if !self.context.is_outbound() ||
+ !matches!(
+ self.context.channel_state, ChannelState::NegotiatingFunding(flags)
+ if flags == NegotiatingFundingFlags::OUR_INIT_SENT
+ )
+ {
+ return Err(());
+ }
+ if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
+ // We've exhausted our options
+ return Err(());
+ }
+ // We support opening a few different types of channels. Try removing our additional
+ // features one by one until we've either arrived at our default or the counterparty has
+ // accepted one.
+ //
+ // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
+ // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
+ // checks whether the counterparty supports every feature, this would only happen if the
+ // counterparty is advertising the feature, but rejecting channels proposing the feature for
+ // whatever reason.
+ if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
+ self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
+ self.context.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
+ assert!(!self.context.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
+ } else if self.context.channel_type.supports_scid_privacy() {
+ self.context.channel_type.clear_scid_privacy();
} else {
- // We never learned about the funding confirmation anyway, just ignore
- Ok(())
+ self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
}
+ self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone();
+ Ok(self.get_open_channel(chain_hash))
}
- // Methods to get unprompted messages to send to the remote end (or where we already returned
- // something in the handler for the message that prompted this message):
-
- pub fn get_open_channel(&self, chain_hash: BlockHash) -> msgs::OpenChannel {
+ pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel {
if !self.context.is_outbound() {
panic!("Tried to open a channel for an inbound channel?");
}
- if self.context.channel_state != ChannelState::OurInitSent as u32 {
+ if self.context.have_received_message() {
panic!("Cannot generate an open_channel after we've moved forward");
}
panic!("Tried to send an open_channel for a channel that has already advanced");
}
- let first_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
+ let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
let keys = self.context.get_holder_pubkeys();
msgs::OpenChannel {
to_self_delay: self.context.get_holder_selected_contest_delay(),
max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
funding_pubkey: keys.funding_pubkey,
- revocation_basepoint: keys.revocation_basepoint,
+ revocation_basepoint: keys.revocation_basepoint.to_public_key(),
payment_point: keys.payment_point,
- delayed_payment_basepoint: keys.delayed_payment_basepoint,
- htlc_basepoint: keys.htlc_basepoint,
+ delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
+ htlc_basepoint: keys.htlc_basepoint.to_public_key(),
first_per_commitment_point,
channel_flags: if self.context.config.announced_channel {1} else {0},
shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
}
}
- pub fn inbound_is_awaiting_accept(&self) -> bool {
- self.context.inbound_awaiting_accept
- }
+ // Message handlers
+ pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
+ let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
- /// Sets this channel to accepting 0conf, must be done before `get_accept_channel`
- pub fn set_0conf(&mut self) {
- assert!(self.context.inbound_awaiting_accept);
- self.context.minimum_depth = Some(0);
- }
+ // Check sanity of message fields:
+ if !self.context.is_outbound() {
+ return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
+ }
+ if !matches!(self.context.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) {
+ return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
+ }
+ if msg.dust_limit_satoshis > 21000000 * 100000000 {
+ return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis)));
+ }
+ if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
+ return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
+ }
+ if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
+ return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
+ }
+ if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
+ return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
+ msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
+ }
+ let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
+ if msg.htlc_minimum_msat >= full_channel_value_msat {
+ return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
+ }
+ let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
+ if msg.to_self_delay > max_delay_acceptable {
+ return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay)));
+ }
+ if msg.max_accepted_htlcs < 1 {
+ return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
+ }
+ if msg.max_accepted_htlcs > MAX_HTLCS {
+ return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
+ }
- /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
- /// should be sent back to the counterparty node.
- ///
- /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
- pub fn accept_inbound_channel(&mut self, user_id: u128) -> msgs::AcceptChannel {
- if self.context.is_outbound() {
- panic!("Tried to send accept_channel for an outbound channel?");
+ // Now check against optional parameters as set by config...
+ if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
+ return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
}
- if self.context.channel_state != (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32) {
- panic!("Tried to send accept_channel after channel had moved forward");
+ if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
+ return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
}
- if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
- panic!("Tried to send an accept_channel for a channel that has already advanced");
+ if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
+ return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
+ }
+ if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
+ return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
+ }
+ if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
+ return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
+ }
+ if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
+ return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
}
- if !self.context.inbound_awaiting_accept {
- panic!("The inbound channel has already been accepted");
+ if msg.minimum_depth > peer_limits.max_minimum_depth {
+ return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth)));
}
- self.context.user_id = user_id;
- self.context.inbound_awaiting_accept = false;
+ if let Some(ty) = &msg.channel_type {
+ if *ty != self.context.channel_type {
+ return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
+ }
+ } else if their_features.supports_channel_type() {
+ // Assume they've accepted the channel type as they said they understand it.
+ } else {
+ let channel_type = ChannelTypeFeatures::from_init(&their_features);
+ if channel_type != ChannelTypeFeatures::only_static_remote_key() {
+ return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
+ }
+ self.context.channel_type = channel_type.clone();
+ self.context.channel_transaction_parameters.channel_type_features = channel_type;
+ }
- self.generate_accept_channel_message()
- }
+ let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
+ match &msg.shutdown_scriptpubkey {
+ &Some(ref script) => {
+ // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
+ if script.len() == 0 {
+ None
+ } else {
+ if !script::is_bolt2_compliant(&script, their_features) {
+ return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
+ }
+ Some(script.clone())
+ }
+ },
+ // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
+ &None => {
+ return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
+ }
+ }
+ } else { None };
- /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
- /// inbound channel. If the intention is to accept an inbound channel, use
- /// [`Channel::accept_inbound_channel`] instead.
- ///
- /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
- fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
- let first_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
- let keys = self.context.get_holder_pubkeys();
+ self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis;
+ self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
+ self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
+ self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
+ self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
- msgs::AcceptChannel {
- temporary_channel_id: self.context.channel_id,
- dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
- max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
- channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
- htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
- minimum_depth: self.context.minimum_depth.unwrap(),
- to_self_delay: self.context.get_holder_selected_contest_delay(),
- max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
- funding_pubkey: keys.funding_pubkey,
- revocation_basepoint: keys.revocation_basepoint,
- payment_point: keys.payment_point,
- delayed_payment_basepoint: keys.delayed_payment_basepoint,
- htlc_basepoint: keys.htlc_basepoint,
- first_per_commitment_point,
- shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
- Some(script) => script.clone().into_inner(),
- None => Builder::new().into_script(),
- }),
- channel_type: Some(self.context.channel_type.clone()),
- #[cfg(taproot)]
- next_local_nonce: None,
+ if peer_limits.trust_own_funding_0conf {
+ self.context.minimum_depth = Some(msg.minimum_depth);
+ } else {
+ self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth));
}
- }
- /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
- /// inbound channel without accepting it.
- ///
- /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
- #[cfg(test)]
- pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
- self.generate_accept_channel_message()
- }
+ let counterparty_pubkeys = ChannelPublicKeys {
+ funding_pubkey: msg.funding_pubkey,
+ revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
+ payment_point: msg.payment_point,
+ delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
+ htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
+ };
- /// If an Err is returned, it is a ChannelError::Close (for get_outbound_funding_created)
- fn get_outbound_funding_created_signature<L: Deref>(&mut self, logger: &L) -> Result<Signature, ChannelError> where L::Target: Logger {
- let counterparty_keys = self.context.build_remote_transaction_keys();
- let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
- Ok(self.context.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx)
- .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0)
+ self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
+ selected_contest_delay: msg.to_self_delay,
+ pubkeys: counterparty_pubkeys,
+ });
+
+ self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
+ self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
+
+ self.context.channel_state = ChannelState::NegotiatingFunding(
+ NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
+ );
+ self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
+
+ Ok(())
}
- /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
- /// a funding_created message for the remote peer.
- /// Panics if called at some time other than immediately after initial handshake, if called twice,
- /// or if called on an inbound channel.
- /// Note that channel_id changes during this call!
- /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
- /// If an Err is returned, it is a ChannelError::Close.
- pub fn get_outbound_funding_created<L: Deref>(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, logger: &L) -> Result<msgs::FundingCreated, ChannelError> where L::Target: Logger {
+ /// Handles a funding_signed message from the remote end.
+ /// If this call is successful, broadcast the funding transaction (and not before!)
+ pub fn funding_signed<L: Deref>(
+ mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
+ ) -> Result<(Channel<SP>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (OutboundV1Channel<SP>, ChannelError)>
+ where
+ L::Target: Logger
+ {
if !self.context.is_outbound() {
- panic!("Tried to create outbound funding_created message on an inbound channel!");
+ return Err((self, ChannelError::Close("Received funding_signed for an inbound channel?".to_owned())));
}
- if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
- panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
+ if !matches!(self.context.channel_state, ChannelState::FundingNegotiated) {
+ return Err((self, ChannelError::Close("Received funding_signed in strange state!".to_owned())));
}
if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
}
- self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
- self.context.holder_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
-
- let signature = match self.get_outbound_funding_created_signature(logger) {
- Ok(res) => res,
- Err(e) => {
- log_error!(logger, "Got bad signatures: {:?}!", e);
- self.context.channel_transaction_parameters.funding_outpoint = None;
- return Err(e);
- }
- };
+ let funding_script = self.context.get_funding_redeemscript();
- let temporary_channel_id = self.context.channel_id;
+ let counterparty_keys = self.context.build_remote_transaction_keys();
+ let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
+ let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
+ let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
- // Now that we're past error-generating stuff, update our local state:
+ log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
+ &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
- self.context.channel_state = ChannelState::FundingCreated as u32;
- self.context.channel_id = funding_txo.to_channel_id();
- self.context.funding_transaction = Some(funding_transaction);
+ let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
+ let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
+ {
+ let trusted_tx = initial_commitment_tx.trust();
+ let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
+ let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
+ // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
+ if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
+ return Err((self, ChannelError::Close("Invalid funding_signed signature from peer".to_owned())));
+ }
+ }
- Ok(msgs::FundingCreated {
- temporary_channel_id,
- funding_txid: funding_txo.txid,
- funding_output_index: funding_txo.index,
- signature,
- #[cfg(taproot)]
- partial_signature_with_nonce: None,
- #[cfg(taproot)]
- next_local_nonce: None,
- })
- }
+ let holder_commitment_tx = HolderCommitmentTransaction::new(
+ initial_commitment_tx,
+ msg.signature,
+ Vec::new(),
+ &self.context.get_holder_pubkeys().funding_pubkey,
+ self.context.counterparty_funding_pubkey()
+ );
- /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
- /// announceable and available for use (have exchanged ChannelReady messages in both
- /// directions). Should be used for both broadcasted announcements and in response to an
- /// AnnouncementSignatures message from the remote peer.
- ///
- /// Will only fail if we're not in a state where channel_announcement may be sent (including
- /// closing).
- ///
- /// This will only return ChannelError::Ignore upon failure.
- fn get_channel_announcement<NS: Deref>(
- &self, node_signer: &NS, chain_hash: BlockHash, user_config: &UserConfig,
- ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
- if !self.context.config.announced_channel {
- return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
+ let validated =
+ self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new());
+ if validated.is_err() {
+ return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
}
- if !self.context.is_usable() {
- return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
+
+ let funding_redeemscript = self.context.get_funding_redeemscript();
+ let funding_txo = self.context.get_funding_txo().unwrap();
+ let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
+ let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
+ let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
+ let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
+ monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
+ let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
+ shutdown_script, self.context.get_holder_selected_contest_delay(),
+ &self.context.destination_script, (funding_txo, funding_txo_script),
+ &self.context.channel_transaction_parameters,
+ funding_redeemscript.clone(), self.context.channel_value_satoshis,
+ obscure_factor,
+ holder_commitment_tx, best_block, self.context.counterparty_node_id);
+ channel_monitor.provide_initial_counterparty_commitment_tx(
+ counterparty_initial_bitcoin_tx.txid, Vec::new(),
+ self.context.cur_counterparty_commitment_transaction_number,
+ self.context.counterparty_cur_commitment_point.unwrap(),
+ counterparty_initial_commitment_tx.feerate_per_kw(),
+ counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
+ counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
+
+ assert!(!self.context.channel_state.is_monitor_update_in_progress()); // We have no had any monitor(s) yet to fail update!
+ if self.context.is_batch_funding() {
+ self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH);
+ } else {
+ self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
}
+ self.context.cur_holder_commitment_transaction_number -= 1;
+ self.context.cur_counterparty_commitment_transaction_number -= 1;
- let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
- .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
- let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
- let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
+ log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
- let msg = msgs::UnsignedChannelAnnouncement {
- features: channelmanager::provided_channel_features(&user_config),
- chain_hash,
- short_channel_id: self.context.get_short_channel_id().unwrap(),
- node_id_1: if were_node_one { node_id } else { counterparty_node_id },
- node_id_2: if were_node_one { counterparty_node_id } else { node_id },
- bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
- bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
- excess_data: Vec::new(),
- };
+ let mut channel = Channel { context: self.context };
- Ok(msg)
+ let need_channel_ready = channel.check_get_channel_ready(0).is_some();
+ channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
+ Ok((channel, channel_monitor))
}
- fn get_announcement_sigs<NS: Deref, L: Deref>(
- &mut self, node_signer: &NS, genesis_block_hash: BlockHash, user_config: &UserConfig,
- best_block_height: u32, logger: &L
- ) -> Option<msgs::AnnouncementSignatures>
- where
- NS::Target: NodeSigner,
- L::Target: Logger
+ /// Indicates that the signer may have some signatures for us, so we should retry if we're
+ /// blocked.
+ #[allow(unused)]
+ pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
+ if self.context.signer_pending_funding && self.context.is_outbound() {
+ log_trace!(logger, "Signer unblocked a funding_created");
+ self.get_funding_created_msg(logger)
+ } else { None }
+ }
+}
+
+/// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
+pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
+ pub context: ChannelContext<SP>,
+ pub unfunded_context: UnfundedChannelContext,
+}
+
+impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
+ /// Creates a new channel from a remote sides' request for one.
+ /// Assumes chain_hash has already been checked and corresponds with what we expect!
+ pub fn new<ES: Deref, F: Deref, L: Deref>(
+ fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
+ counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
+ their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
+ current_chain_height: u32, logger: &L, is_0conf: bool,
+ ) -> Result<InboundV1Channel<SP>, ChannelError>
+ where ES::Target: EntropySource,
+ F::Target: FeeEstimator,
+ L::Target: Logger,
{
- if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
- return None;
+ let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.temporary_channel_id));
+ let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
+
+ // First check the channel type is known, failing before we do anything else if we don't
+ // support this channel type.
+ let channel_type = if let Some(channel_type) = &msg.channel_type {
+ if channel_type.supports_any_optional_bits() {
+ return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
+ }
+
+ // We only support the channel types defined by the `ChannelManager` in
+ // `provided_channel_type_features`. The channel type must always support
+ // `static_remote_key`.
+ if !channel_type.requires_static_remote_key() {
+ return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
+ }
+ // Make sure we support all of the features behind the channel type.
+ if !channel_type.is_subset(our_supported_features) {
+ return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
+ }
+ if channel_type.requires_scid_privacy() && announced_channel {
+ return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
+ }
+ channel_type.clone()
+ } else {
+ let channel_type = ChannelTypeFeatures::from_init(&their_features);
+ if channel_type != ChannelTypeFeatures::only_static_remote_key() {
+ return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
+ }
+ channel_type
+ };
+
+ let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
+ let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
+ let pubkeys = holder_signer.pubkeys().clone();
+ let counterparty_pubkeys = ChannelPublicKeys {
+ funding_pubkey: msg.funding_pubkey,
+ revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
+ payment_point: msg.payment_point,
+ delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
+ htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
+ };
+
+ if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
+ return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
}
- if !self.context.is_usable() {
- return None;
+ // Check sanity of message fields:
+ if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis {
+ return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis)));
}
+ if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
+ return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
+ }
+ if msg.channel_reserve_satoshis > msg.funding_satoshis {
+ return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
+ }
+ let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
+ if msg.push_msat > full_channel_value_msat {
+ return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat)));
+ }
+ if msg.dust_limit_satoshis > msg.funding_satoshis {
+ return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis)));
+ }
+ if msg.htlc_minimum_msat >= full_channel_value_msat {
+ return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
+ }
+ Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, msg.feerate_per_kw, None, &&logger)?;
- if self.context.channel_state & ChannelState::PeerDisconnected as u32 != 0 {
- log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
- return None;
+ let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
+ if msg.to_self_delay > max_counterparty_selected_contest_delay {
+ return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay)));
+ }
+ if msg.max_accepted_htlcs < 1 {
+ return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
+ }
+ if msg.max_accepted_htlcs > MAX_HTLCS {
+ return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
}
- if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
- return None;
+ // Now check against optional parameters as set by config...
+ if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis {
+ return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
+ }
+ if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
+ return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
+ }
+ if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
+ return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
+ }
+ if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
+ return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
+ }
+ if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
+ return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
+ }
+ if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
+ return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
+ }
+ if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
+ return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
}
- log_trace!(logger, "Creating an announcement_signatures message for channel {}", log_bytes!(self.context.channel_id()));
- let announcement = match self.get_channel_announcement(node_signer, genesis_block_hash, user_config) {
- Ok(a) => a,
- Err(e) => {
- log_trace!(logger, "{:?}", e);
- return None;
- }
- };
- let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
- Err(_) => {
- log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
- return None;
- },
- Ok(v) => v
- };
- let our_bitcoin_sig = match self.context.holder_signer.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
- Err(_) => {
- log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
- return None;
- },
- Ok(v) => v
- };
- self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
+ // Convert things into internal flags and prep our state:
- Some(msgs::AnnouncementSignatures {
- channel_id: self.context.channel_id(),
- short_channel_id: self.context.get_short_channel_id().unwrap(),
- node_signature: our_node_sig,
- bitcoin_signature: our_bitcoin_sig,
- })
- }
+ if config.channel_handshake_limits.force_announced_channel_preference {
+ if config.channel_handshake_config.announced_channel != announced_channel {
+ return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
+ }
+ }
- /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
- /// available.
- fn sign_channel_announcement<NS: Deref>(
- &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
- ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
- if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
- let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
- .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
- let were_node_one = announcement.node_id_1 == our_node_key;
+ let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config);
+ if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
+ // Protocol level safety check in place, although it should never happen because
+ // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
+ return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
+ }
+ if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
+ return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
+ }
+ if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
+ log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
+ msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
+ }
+ if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis {
+ return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
+ }
- let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
- .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
- let our_bitcoin_sig = self.context.holder_signer.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
- .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
- Ok(msgs::ChannelAnnouncement {
- node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
- node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
- bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
- bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
- contents: announcement,
- })
+ // check if the funder's amount for the initial commitment tx is sufficient
+ // for full fee payment plus a few HTLCs to ensure the channel will be useful.
+ let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() {
+ ANCHOR_OUTPUT_VALUE_SATOSHI * 2
} else {
- Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
+ 0
+ };
+ let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
+ let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
+ if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
+ return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
}
- }
-
- /// Processes an incoming announcement_signatures message, providing a fully-signed
- /// channel_announcement message which we can broadcast and storing our counterparty's
- /// signatures for later reconstruction/rebroadcast of the channel_announcement.
- pub fn announcement_signatures<NS: Deref>(
- &mut self, node_signer: &NS, chain_hash: BlockHash, best_block_height: u32,
- msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
- ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
- let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
-
- let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
- if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
- return Err(ChannelError::Close(format!(
- "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
- &announcement, self.context.get_counterparty_node_id())));
- }
- if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
- return Err(ChannelError::Close(format!(
- "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
- &announcement, self.context.counterparty_funding_pubkey())));
+ let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
+ // While it's reasonable for us to not meet the channel reserve initially (if they don't
+ // want to push much to us), our counterparty should always have more than our reserve.
+ if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
+ return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
}
- self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
- if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
- return Err(ChannelError::Ignore(
- "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
- }
+ let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
+ match &msg.shutdown_scriptpubkey {
+ &Some(ref script) => {
+ // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
+ if script.len() == 0 {
+ None
+ } else {
+ if !script::is_bolt2_compliant(&script, their_features) {
+ return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
+ }
+ Some(script.clone())
+ }
+ },
+ // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
+ &None => {
+ return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
+ }
+ }
+ } else { None };
- self.sign_channel_announcement(node_signer, announcement)
- }
+ let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
+ match signer_provider.get_shutdown_scriptpubkey() {
+ Ok(scriptpubkey) => Some(scriptpubkey),
+ Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
+ }
+ } else { None };
- /// Gets a signed channel_announcement for this channel, if we previously received an
- /// announcement_signatures from our counterparty.
- pub fn get_signed_channel_announcement<NS: Deref>(
- &self, node_signer: &NS, chain_hash: BlockHash, best_block_height: u32, user_config: &UserConfig
- ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
- if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
- return None;
+ if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
+ if !shutdown_scriptpubkey.is_compatible(&their_features) {
+ return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
+ }
}
- let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
- Ok(res) => res,
- Err(_) => return None,
+
+ let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
+ Ok(script) => script,
+ Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
};
- match self.sign_channel_announcement(node_signer, announcement) {
- Ok(res) => Some(res),
- Err(_) => None,
- }
- }
- /// May panic if called on a channel that wasn't immediately-previously
- /// self.remove_uncommitted_htlcs_and_mark_paused()'d
- pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
- assert_eq!(self.context.channel_state & ChannelState::PeerDisconnected as u32, ChannelState::PeerDisconnected as u32);
- assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
- // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
- // current to_remote balances. However, it no longer has any use, and thus is now simply
- // set to a dummy (but valid, as required by the spec) public key.
- // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
- // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
- // valid, and valid in fuzzing mode's arbitrary validity criteria:
- let mut pk = [2; 33]; pk[1] = 0xff;
- let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
- let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
- let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
- log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), log_bytes!(self.context.channel_id()));
- remote_last_secret
+ let mut secp_ctx = Secp256k1::new();
+ secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
+
+ let minimum_depth = if is_0conf {
+ Some(0)
} else {
- log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", log_bytes!(self.context.channel_id()));
- [0;32]
+ Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
};
- self.mark_awaiting_response();
- msgs::ChannelReestablish {
- channel_id: self.context.channel_id(),
- // The protocol has two different commitment number concepts - the "commitment
- // transaction number", which starts from 0 and counts up, and the "revocation key
- // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
- // commitment transaction numbers by the index which will be used to reveal the
- // revocation key for that commitment transaction, which means we have to convert them
- // to protocol-level commitment numbers here...
- // next_local_commitment_number is the next commitment_signed number we expect to
- // receive (indicating if they need to resend one that we missed).
- next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
- // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
- // receive, however we track it by the next commitment number for a remote transaction
- // (which is one further, as they always revoke previous commitment transaction, not
- // the one we send) so we have to decrement by 1. Note that if
- // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
- // dropped this channel on disconnect as it hasn't yet reached FundingSent so we can't
- // overflow here.
- next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
- your_last_per_commitment_secret: remote_last_secret,
- my_current_per_commitment_point: dummy_pubkey,
- // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
- // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
- // txid of that interactive transaction, else we MUST NOT set it.
- next_funding_txid: None,
- }
- }
+ let chan = Self {
+ context: ChannelContext {
+ user_id,
+ config: LegacyChannelConfig {
+ options: config.channel_config.clone(),
+ announced_channel,
+ commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
+ },
- // Send stuff to our remote peers:
+ prev_config: None,
- /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
- /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
- /// commitment update.
- ///
- /// `Err`s will only be [`ChannelError::Ignore`].
- pub fn queue_add_htlc<L: Deref>(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
- onion_routing_packet: msgs::OnionPacket, logger: &L)
- -> Result<(), ChannelError> where L::Target: Logger {
- self
- .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true, logger)
- .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
- .map_err(|err| {
- if let ChannelError::Ignore(_) = err { /* fine */ }
- else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
- err
- })
- }
+ inbound_handshake_limits_override: None,
- /// Adds a pending outbound HTLC to this channel, note that you probably want
- /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
- ///
- /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
- /// the wire:
- /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
- /// wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
- /// awaiting ACK.
- /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
- /// we may not yet have sent the previous commitment update messages and will need to
- /// regenerate them.
- ///
- /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
- /// on this [`Channel`] if `force_holding_cell` is false.
- ///
- /// `Err`s will only be [`ChannelError::Ignore`].
- fn send_htlc<L: Deref>(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
- onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool, logger: &L)
- -> Result<Option<msgs::UpdateAddHTLC>, ChannelError> where L::Target: Logger {
- if (self.context.channel_state & (ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelReady as u32) {
- return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
- }
- let channel_total_msat = self.context.channel_value_satoshis * 1000;
- if amount_msat > channel_total_msat {
- return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
- }
+ temporary_channel_id: Some(msg.temporary_channel_id),
+ channel_id: msg.temporary_channel_id,
+ channel_state: ChannelState::NegotiatingFunding(
+ NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
+ ),
+ announcement_sigs_state: AnnouncementSigsState::NotSent,
+ secp_ctx,
- if amount_msat == 0 {
- return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
- }
+ latest_monitor_update_id: 0,
- let available_balances = self.get_available_balances();
- if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
- return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
- available_balances.next_outbound_htlc_minimum_msat)));
- }
+ holder_signer: ChannelSignerType::Ecdsa(holder_signer),
+ shutdown_scriptpubkey,
+ destination_script,
- if amount_msat > available_balances.next_outbound_htlc_limit_msat {
- return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
- available_balances.next_outbound_htlc_limit_msat)));
- }
+ cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
+ cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
+ value_to_self_msat: msg.push_msat,
- if (self.context.channel_state & (ChannelState::PeerDisconnected as u32)) != 0 {
- // Note that this should never really happen, if we're !is_live() on receipt of an
- // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
- // the user to send directly into a !is_live() channel. However, if we
- // disconnected during the time the previous hop was doing the commitment dance we may
- // end up getting here after the forwarding delay. In any case, returning an
- // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
- return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
- }
+ pending_inbound_htlcs: Vec::new(),
+ pending_outbound_htlcs: Vec::new(),
+ holding_cell_htlc_updates: Vec::new(),
+ pending_update_fee: None,
+ holding_cell_update_fee: None,
+ next_holder_htlc_id: 0,
+ next_counterparty_htlc_id: 0,
+ update_time_counter: 1,
- let need_holding_cell = (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0;
- log_debug!(logger, "Pushing new outbound HTLC for {} msat {}", amount_msat,
- if force_holding_cell { "into holding cell" }
- else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
- else { "to peer" });
+ resend_order: RAACommitmentOrder::CommitmentFirst,
- if need_holding_cell {
- force_holding_cell = true;
- }
+ monitor_pending_channel_ready: false,
+ monitor_pending_revoke_and_ack: false,
+ monitor_pending_commitment_signed: false,
+ monitor_pending_forwards: Vec::new(),
+ monitor_pending_failures: Vec::new(),
+ monitor_pending_finalized_fulfills: Vec::new(),
- // Now update local state:
- if force_holding_cell {
- self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
- amount_msat,
- payment_hash,
- cltv_expiry,
- source,
- onion_routing_packet,
- });
- return Ok(None);
- }
+ signer_pending_commitment_update: false,
+ signer_pending_funding: false,
- self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
- htlc_id: self.context.next_holder_htlc_id,
- amount_msat,
- payment_hash: payment_hash.clone(),
- cltv_expiry,
- state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
- source,
- });
+ #[cfg(debug_assertions)]
+ holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
+ #[cfg(debug_assertions)]
+ counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
- let res = msgs::UpdateAddHTLC {
- channel_id: self.context.channel_id,
- htlc_id: self.context.next_holder_htlc_id,
- amount_msat,
- payment_hash,
- cltv_expiry,
- onion_routing_packet,
- };
- self.context.next_holder_htlc_id += 1;
+ last_sent_closing_fee: None,
+ pending_counterparty_closing_signed: None,
+ expecting_peer_commitment_signed: false,
+ closing_fee_limits: None,
+ target_closing_feerate_sats_per_kw: None,
- Ok(Some(res))
- }
+ funding_tx_confirmed_in: None,
+ funding_tx_confirmation_height: 0,
+ short_channel_id: None,
+ channel_creation_height: current_chain_height,
- fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
- log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
- // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
- // fail to generate this, we still are at least at a position where upgrading their status
- // is acceptable.
- for htlc in self.context.pending_inbound_htlcs.iter_mut() {
- let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
- Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
- } else { None };
- if let Some(state) = new_state {
- log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", log_bytes!(htlc.payment_hash.0));
- htlc.state = state;
- }
- }
- for htlc in self.context.pending_outbound_htlcs.iter_mut() {
- if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
- log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", log_bytes!(htlc.payment_hash.0));
- // Grab the preimage, if it exists, instead of cloning
- let mut reason = OutboundHTLCOutcome::Success(None);
- mem::swap(outcome, &mut reason);
- htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
- }
- }
- if let Some((feerate, update_state)) = self.context.pending_update_fee {
- if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
- debug_assert!(!self.context.is_outbound());
- log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
- self.context.feerate_per_kw = feerate;
- self.context.pending_update_fee = None;
- }
- }
- self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
+ feerate_per_kw: msg.feerate_per_kw,
+ channel_value_satoshis: msg.funding_satoshis,
+ counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
+ holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
+ counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
+ holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config),
+ counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
+ holder_selected_channel_reserve_satoshis,
+ counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
+ holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
+ counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
+ holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
+ minimum_depth,
- let (counterparty_commitment_txid, mut htlcs_ref) = self.build_commitment_no_state_update(logger);
- let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
- htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
+ counterparty_forwarding_info: None,
- if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
- self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
- }
+ channel_transaction_parameters: ChannelTransactionParameters {
+ holder_pubkeys: pubkeys,
+ holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
+ is_outbound_from_holder: false,
+ counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
+ selected_contest_delay: msg.to_self_delay,
+ pubkeys: counterparty_pubkeys,
+ }),
+ funding_outpoint: None,
+ channel_type_features: channel_type.clone()
+ },
+ funding_transaction: None,
+ is_batch_funding: None,
- self.context.latest_monitor_update_id += 1;
- let monitor_update = ChannelMonitorUpdate {
- update_id: self.context.latest_monitor_update_id,
- updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
- commitment_txid: counterparty_commitment_txid,
- htlc_outputs: htlcs.clone(),
- commitment_number: self.context.cur_counterparty_commitment_transaction_number,
- their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap()
- }]
- };
- self.context.channel_state |= ChannelState::AwaitingRemoteRevoke as u32;
- monitor_update
- }
+ counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
+ counterparty_prev_commitment_point: None,
+ counterparty_node_id,
- fn build_commitment_no_state_update<L: Deref>(&self, logger: &L) -> (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>) where L::Target: Logger {
- let counterparty_keys = self.context.build_remote_transaction_keys();
- let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
- let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
+ counterparty_shutdown_scriptpubkey,
- #[cfg(any(test, fuzzing))]
- {
- if !self.context.is_outbound() {
- let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
- *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
- if let Some(info) = projected_commit_tx_info {
- let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
- if info.total_pending_htlcs == total_pending_htlcs
- && info.next_holder_htlc_id == self.context.next_holder_htlc_id
- && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
- && info.feerate == self.context.feerate_per_kw {
- let actual_fee = Self::commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.opt_anchors());
- assert_eq!(actual_fee, info.fee);
- }
- }
- }
- }
+ commitment_secrets: CounterpartyCommitmentSecrets::new(),
- (counterparty_commitment_txid, commitment_stats.htlcs_included)
- }
+ channel_update_status: ChannelUpdateStatus::Enabled,
+ closing_signed_in_flight: false,
- /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
- /// generation when we shouldn't change HTLC/channel state.
- fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
- // Get the fee tests from `build_commitment_no_state_update`
- #[cfg(any(test, fuzzing))]
- self.build_commitment_no_state_update(logger);
+ announcement_sigs: None,
- let counterparty_keys = self.context.build_remote_transaction_keys();
- let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
- let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
- let (signature, htlc_signatures);
+ #[cfg(any(test, fuzzing))]
+ next_local_commitment_tx_fee_info_cached: Mutex::new(None),
+ #[cfg(any(test, fuzzing))]
+ next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
- {
- let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
- for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
- htlcs.push(htlc);
- }
+ workaround_lnd_bug_4006: None,
+ sent_message_awaiting_response: None,
- let res = self.context.holder_signer.sign_counterparty_commitment(&commitment_stats.tx, commitment_stats.preimages, &self.context.secp_ctx)
- .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?;
- signature = res.0;
- htlc_signatures = res.1;
+ latest_inbound_scid_alias: None,
+ outbound_scid_alias: 0,
- log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
- encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
- &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
- log_bytes!(signature.serialize_compact()[..]), log_bytes!(self.context.channel_id()));
+ channel_pending_event_emitted: false,
+ channel_ready_event_emitted: false,
- for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
- log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
- encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, self.context.opt_anchors(), false, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
- encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, self.context.opt_anchors(), &counterparty_keys)),
- log_bytes!(counterparty_keys.broadcaster_htlc_key.serialize()),
- log_bytes!(htlc_sig.serialize_compact()[..]), log_bytes!(self.context.channel_id()));
- }
- }
+ #[cfg(any(test, fuzzing))]
+ historical_inbound_htlc_fulfills: HashSet::new(),
- Ok((msgs::CommitmentSigned {
- channel_id: self.context.channel_id,
- signature,
- htlc_signatures,
- #[cfg(taproot)]
- partial_signature_with_nonce: None,
- }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
- }
+ channel_type,
+ channel_keys_id,
- /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
- /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
- ///
- /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
- /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
- pub fn send_htlc_and_commit<L: Deref>(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket, logger: &L) -> Result<Option<&ChannelMonitorUpdate>, ChannelError> where L::Target: Logger {
- let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, false, logger);
- if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
- match send_res? {
- Some(_) => {
- let monitor_update = self.build_commitment_no_status_check(logger);
- self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
- Ok(self.push_ret_blockable_mon_update(monitor_update))
+ blocked_monitor_updates: Vec::new(),
},
- None => Ok(None)
- }
- }
-
- pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<(), ChannelError> {
- if msg.contents.htlc_minimum_msat >= self.context.channel_value_satoshis * 1000 {
- return Err(ChannelError::Close("Minimum htlc value is greater than channel value".to_string()));
- }
- self.context.counterparty_forwarding_info = Some(CounterpartyForwardingInfo {
- fee_base_msat: msg.contents.fee_base_msat,
- fee_proportional_millionths: msg.contents.fee_proportional_millionths,
- cltv_expiry_delta: msg.contents.cltv_expiry_delta
- });
+ unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
+ };
- Ok(())
+ Ok(chan)
}
- /// Begins the shutdown process, getting a message for the remote peer and returning all
- /// holding cell HTLCs for payment failure.
+ /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
+ /// should be sent back to the counterparty node.
///
- /// May jump to the channel being fully shutdown (see [`Self::is_shutdown`]) in which case no
- /// [`ChannelMonitorUpdate`] will be returned).
- pub fn get_shutdown<SP: Deref>(&mut self, signer_provider: &SP, their_features: &InitFeatures,
- target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
- -> Result<(msgs::Shutdown, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
- where SP::Target: SignerProvider {
- for htlc in self.context.pending_outbound_htlcs.iter() {
- if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
- return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
- }
- }
- if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0 {
- if (self.context.channel_state & ChannelState::LocalShutdownSent as u32) == ChannelState::LocalShutdownSent as u32 {
- return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
- }
- else if (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) == ChannelState::RemoteShutdownSent as u32 {
- return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
- }
+ /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
+ pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel {
+ if self.context.is_outbound() {
+ panic!("Tried to send accept_channel for an outbound channel?");
}
- if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
- return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
+ if !matches!(
+ self.context.channel_state, ChannelState::NegotiatingFunding(flags)
+ if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
+ ) {
+ panic!("Tried to send accept_channel after channel had moved forward");
}
- assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0 {
- return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
+ if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
+ panic!("Tried to send an accept_channel for a channel that has already advanced");
}
- // If we haven't funded the channel yet, we don't need to bother ensuring the shutdown
- // script is set, we just force-close and call it a day.
- let mut chan_closed = false;
- if self.context.channel_state < ChannelState::FundingSent as u32 {
- chan_closed = true;
+ self.generate_accept_channel_message()
+ }
+
+ /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
+ /// inbound channel. If the intention is to accept an inbound channel, use
+ /// [`InboundV1Channel::accept_inbound_channel`] instead.
+ ///
+ /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
+ fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
+ let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
+ let keys = self.context.get_holder_pubkeys();
+
+ msgs::AcceptChannel {
+ temporary_channel_id: self.context.channel_id,
+ dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
+ max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
+ channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
+ htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
+ minimum_depth: self.context.minimum_depth.unwrap(),
+ to_self_delay: self.context.get_holder_selected_contest_delay(),
+ max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
+ funding_pubkey: keys.funding_pubkey,
+ revocation_basepoint: keys.revocation_basepoint.to_public_key(),
+ payment_point: keys.payment_point,
+ delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
+ htlc_basepoint: keys.htlc_basepoint.to_public_key(),
+ first_per_commitment_point,
+ shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
+ Some(script) => script.clone().into_inner(),
+ None => Builder::new().into_script(),
+ }),
+ channel_type: Some(self.context.channel_type.clone()),
+ #[cfg(taproot)]
+ next_local_nonce: None,
}
+ }
- let update_shutdown_script = match self.context.shutdown_scriptpubkey {
- Some(_) => false,
- None if !chan_closed => {
- // use override shutdown script if provided
- let shutdown_scriptpubkey = match override_shutdown_script {
- Some(script) => script,
- None => {
- // otherwise, use the shutdown scriptpubkey provided by the signer
- match signer_provider.get_shutdown_scriptpubkey() {
- Ok(scriptpubkey) => scriptpubkey,
- Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
- }
- },
- };
- if !shutdown_scriptpubkey.is_compatible(their_features) {
- return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
- }
- self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
- true
+ /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
+ /// inbound channel without accepting it.
+ ///
+ /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
+ #[cfg(test)]
+ pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
+ self.generate_accept_channel_message()
+ }
+
+ fn check_funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<CommitmentTransaction, ChannelError> where L::Target: Logger {
+ let funding_script = self.context.get_funding_redeemscript();
+
+ let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
+ let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
+ let trusted_tx = initial_commitment_tx.trust();
+ let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
+ let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
+ // They sign the holder commitment transaction...
+ log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
+ log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
+ encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
+ encode::serialize_hex(&funding_script), &self.context.channel_id());
+ secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
+
+ Ok(initial_commitment_tx)
+ }
+
+ pub fn funding_created<L: Deref>(
+ mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
+ ) -> Result<(Channel<SP>, Option<msgs::FundingSigned>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (Self, ChannelError)>
+ where
+ L::Target: Logger
+ {
+ if self.context.is_outbound() {
+ return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
+ }
+ if !matches!(
+ self.context.channel_state, ChannelState::NegotiatingFunding(flags)
+ if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
+ ) {
+ // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
+ // remember the channel, so it's safe to just send an error_message here and drop the
+ // channel.
+ return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned())));
+ }
+ if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
+ self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
+ self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
+ panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
+ }
+
+ let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
+ self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
+ // This is an externally observable change before we finish all our checks. In particular
+ // check_funding_created_signature may fail.
+ self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
+
+ let initial_commitment_tx = match self.check_funding_created_signature(&msg.signature, logger) {
+ Ok(res) => res,
+ Err(ChannelError::Close(e)) => {
+ self.context.channel_transaction_parameters.funding_outpoint = None;
+ return Err((self, ChannelError::Close(e)));
},
- None => false,
+ Err(e) => {
+ // The only error we know how to handle is ChannelError::Close, so we fall over here
+ // to make sure we don't continue with an inconsistent state.
+ panic!("unexpected error type from check_funding_created_signature {:?}", e);
+ }
};
- // From here on out, we may not fail!
- self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
- if self.context.channel_state < ChannelState::FundingSent as u32 {
- self.context.channel_state = ChannelState::ShutdownComplete as u32;
- } else {
- self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
- }
- self.context.update_time_counter += 1;
+ let holder_commitment_tx = HolderCommitmentTransaction::new(
+ initial_commitment_tx,
+ msg.signature,
+ Vec::new(),
+ &self.context.get_holder_pubkeys().funding_pubkey,
+ self.context.counterparty_funding_pubkey()
+ );
- let monitor_update = if update_shutdown_script {
- self.context.latest_monitor_update_id += 1;
- let monitor_update = ChannelMonitorUpdate {
- update_id: self.context.latest_monitor_update_id,
- updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
- scriptpubkey: self.get_closing_scriptpubkey(),
- }],
- };
- self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
- if self.push_blockable_mon_update(monitor_update) {
- self.context.pending_monitor_updates.last().map(|upd| &upd.update)
- } else { None }
- } else { None };
- let shutdown = msgs::Shutdown {
- channel_id: self.context.channel_id,
- scriptpubkey: self.get_closing_scriptpubkey(),
- };
+ if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) {
+ return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
+ }
- // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
- // our shutdown until we've committed all of the pending changes.
- self.context.holding_cell_update_fee = None;
- let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
- self.context.holding_cell_htlc_updates.retain(|htlc_update| {
- match htlc_update {
- &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
- dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
- false
- },
- _ => true
- }
- });
+ // Now that we're past error-generating stuff, update our local state:
- debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
- "we can't both complete shutdown and return a monitor update");
+ self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
+ self.context.channel_id = funding_txo.to_channel_id();
+ self.context.cur_counterparty_commitment_transaction_number -= 1;
+ self.context.cur_holder_commitment_transaction_number -= 1;
- Ok((shutdown, monitor_update, dropped_outbound_htlcs))
- }
+ let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger);
- /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
- /// shutdown of this channel - no more calls into this Channel may be made afterwards except
- /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
- /// Also returns the list of payment_hashes for channels which we can safely fail backwards
- /// immediately (others we will have to allow to time out).
- pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
- // Note that we MUST only generate a monitor update that indicates force-closure - we're
- // called during initialization prior to the chain_monitor in the encompassing ChannelManager
- // being fully configured in some cases. Thus, its likely any monitor events we generate will
- // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
- assert!(self.context.channel_state != ChannelState::ShutdownComplete as u32);
+ let funding_redeemscript = self.context.get_funding_redeemscript();
+ let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
+ let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
+ let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
+ let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
+ monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
+ let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
+ shutdown_script, self.context.get_holder_selected_contest_delay(),
+ &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
+ &self.context.channel_transaction_parameters,
+ funding_redeemscript.clone(), self.context.channel_value_satoshis,
+ obscure_factor,
+ holder_commitment_tx, best_block, self.context.counterparty_node_id);
+ channel_monitor.provide_initial_counterparty_commitment_tx(
+ counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
+ self.context.cur_counterparty_commitment_transaction_number + 1,
+ self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
+ counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
+ counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
- // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
- // return them to fail the payment.
- let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
- let counterparty_node_id = self.context.get_counterparty_node_id();
- for htlc_update in self.context.holding_cell_htlc_updates.drain(..) {
- match htlc_update {
- HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
- dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.context.channel_id));
- },
- _ => {}
- }
- }
- let monitor_update = if let Some(funding_txo) = self.context.get_funding_txo() {
- // If we haven't yet exchanged funding signatures (ie channel_state < FundingSent),
- // returning a channel monitor update here would imply a channel monitor update before
- // we even registered the channel monitor to begin with, which is invalid.
- // Thus, if we aren't actually at a point where we could conceivably broadcast the
- // funding transaction, don't return a funding txo (which prevents providing the
- // monitor update to the user, even if we return one).
- // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
- if self.context.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 {
- self.context.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
- Some((self.context.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
- update_id: self.context.latest_monitor_update_id,
- updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
- }))
- } else { None }
- } else { None };
+ log_info!(logger, "{} funding_signed for peer for channel {}",
+ if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id());
- self.context.channel_state = ChannelState::ShutdownComplete as u32;
- self.context.update_time_counter += 1;
- (monitor_update, dropped_outbound_htlcs)
- }
+ // Promote the channel to a full-fledged one now that we have updated the state and have a
+ // `ChannelMonitor`.
+ let mut channel = Channel {
+ context: self.context,
+ };
+ let need_channel_ready = channel.check_get_channel_ready(0).is_some();
+ channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
- pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
- self.context.holding_cell_htlc_updates.iter()
- .flat_map(|htlc_update| {
- match htlc_update {
- HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
- => Some((source, payment_hash)),
- _ => None,
- }
- })
- .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
+ Ok((channel, funding_signed, channel_monitor))
}
}
const SERIALIZATION_VERSION: u8 = 3;
-const MIN_SERIALIZATION_VERSION: u8 = 2;
+const MIN_SERIALIZATION_VERSION: u8 = 3;
impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
(0, FailRelay),
}
}
-impl<Signer: WriteableEcdsaChannelSigner> Writeable for Channel<Signer> {
+impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
// Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
// called.
writer.write_all(&[0; 8])?;
self.context.channel_id.write(writer)?;
- (self.context.channel_state | ChannelState::PeerDisconnected as u32).write(writer)?;
+ {
+ let mut channel_state = self.context.channel_state;
+ if matches!(channel_state, ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)) {
+ channel_state.set_peer_disconnected();
+ }
+ channel_state.to_u32().write(writer)?;
+ }
self.context.channel_value_satoshis.write(writer)?;
self.context.latest_monitor_update_id.write(writer)?;
- let mut key_data = VecWriter(Vec::new());
- self.context.holder_signer.write(&mut key_data)?;
- assert!(key_data.0.len() < core::usize::MAX);
- assert!(key_data.0.len() < core::u32::MAX as usize);
- (key_data.0.len() as u32).write(writer)?;
- writer.write_all(&key_data.0[..])?;
-
// Write out the old serialization for shutdown_pubkey for backwards compatibility, if
// deserialized from that format.
match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
}
let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
+ let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
+ let mut pending_outbound_blinding_points: Vec<Option<PublicKey>> = Vec::new();
(self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
for htlc in self.context.pending_outbound_htlcs.iter() {
reason.write(writer)?;
}
}
+ pending_outbound_skimmed_fees.push(htlc.skimmed_fee_msat);
+ pending_outbound_blinding_points.push(htlc.blinding_point);
}
+ let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
+ let mut holding_cell_blinding_points: Vec<Option<PublicKey>> = Vec::new();
(self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
for update in self.context.holding_cell_htlc_updates.iter() {
match update {
- &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet } => {
+ &HTLCUpdateAwaitingACK::AddHTLC {
+ ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
+ blinding_point, skimmed_fee_msat,
+ } => {
0u8.write(writer)?;
amount_msat.write(writer)?;
cltv_expiry.write(writer)?;
payment_hash.write(writer)?;
source.write(writer)?;
onion_routing_packet.write(writer)?;
+
+ holding_cell_skimmed_fees.push(skimmed_fee_msat);
+ holding_cell_blinding_points.push(blinding_point);
},
&HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
1u8.write(writer)?;
(5, self.context.config, required),
(6, serialized_holder_htlc_max_in_flight, option),
(7, self.context.shutdown_scriptpubkey, option),
+ (8, self.context.blocked_monitor_updates, optional_vec),
(9, self.context.target_closing_feerate_sats_per_kw, option),
- (11, self.context.monitor_pending_finalized_fulfills, vec_type),
+ (11, self.context.monitor_pending_finalized_fulfills, required_vec),
(13, self.context.channel_creation_height, required),
- (15, preimages, vec_type),
+ (15, preimages, required_vec),
(17, self.context.announcement_sigs_state, required),
(19, self.context.latest_inbound_scid_alias, option),
(21, self.context.outbound_scid_alias, required),
(28, holder_max_accepted_htlcs, option),
(29, self.context.temporary_channel_id, option),
(31, channel_pending_event_emitted, option),
- (33, self.context.pending_monitor_updates, vec_type),
+ (35, pending_outbound_skimmed_fees, optional_vec),
+ (37, holding_cell_skimmed_fees, optional_vec),
+ (38, self.context.is_batch_funding, option),
+ (39, pending_outbound_blinding_points, optional_vec),
+ (41, holding_cell_blinding_points, optional_vec),
});
Ok(())
}
const MAX_ALLOC_SIZE: usize = 64*1024;
-impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<<SP::Target as SignerProvider>::Signer>
+impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<SP>
where
ES::Target: EntropySource,
SP::Target: SignerProvider
}
let channel_id = Readable::read(reader)?;
- let channel_state = Readable::read(reader)?;
+ let channel_state = ChannelState::from_u32(Readable::read(reader)?).map_err(|_| DecodeError::InvalidValue)?;
let channel_value_satoshis = Readable::read(reader)?;
let latest_monitor_update_id = Readable::read(reader)?;
},
_ => return Err(DecodeError::InvalidValue),
},
+ skimmed_fee_msat: None,
+ blinding_point: None,
});
}
payment_hash: Readable::read(reader)?,
source: Readable::read(reader)?,
onion_routing_packet: Readable::read(reader)?,
+ skimmed_fee_msat: None,
+ blinding_point: None,
},
1 => HTLCUpdateAwaitingACK::ClaimHTLC {
payment_preimage: Readable::read(reader)?,
_ => return Err(DecodeError::InvalidValue),
};
- let channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
- let funding_transaction = Readable::read(reader)?;
+ let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
+ let funding_transaction: Option<Transaction> = Readable::read(reader)?;
let counterparty_cur_commitment_point = Readable::read(reader)?;
let mut user_id_high_opt: Option<u64> = None;
let mut channel_keys_id: Option<[u8; 32]> = None;
- let mut temporary_channel_id: Option<[u8; 32]> = None;
+ let mut temporary_channel_id: Option<ChannelId> = None;
let mut holder_max_accepted_htlcs: Option<u16> = None;
- let mut pending_monitor_updates = Some(Vec::new());
+ let mut blocked_monitor_updates = Some(Vec::new());
+
+ let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
+ let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
+
+ let mut is_batch_funding: Option<()> = None;
+
+ let mut pending_outbound_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
+ let mut holding_cell_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
read_tlv_fields!(reader, {
(0, announcement_sigs, option),
(5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
(6, holder_max_htlc_value_in_flight_msat, option),
(7, shutdown_scriptpubkey, option),
+ (8, blocked_monitor_updates, optional_vec),
(9, target_closing_feerate_sats_per_kw, option),
- (11, monitor_pending_finalized_fulfills, vec_type),
+ (11, monitor_pending_finalized_fulfills, optional_vec),
(13, channel_creation_height, option),
- (15, preimages_opt, vec_type),
+ (15, preimages_opt, optional_vec),
(17, announcement_sigs_state, option),
(19, latest_inbound_scid_alias, option),
(21, outbound_scid_alias, option),
(28, holder_max_accepted_htlcs, option),
(29, temporary_channel_id, option),
(31, channel_pending_event_emitted, option),
- (33, pending_monitor_updates, vec_type),
+ (35, pending_outbound_skimmed_fees_opt, optional_vec),
+ (37, holding_cell_skimmed_fees_opt, optional_vec),
+ (38, is_batch_funding, option),
+ (39, pending_outbound_blinding_points_opt, optional_vec),
+ (41, holding_cell_blinding_points_opt, optional_vec),
});
let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
// If we've gotten to the funding stage of the channel, populate the signer with its
// required channel parameters.
- let non_shutdown_state = channel_state & (!MULTI_STATE_FLAGS);
- if non_shutdown_state >= (ChannelState::FundingCreated as u32) {
+ if channel_state >= ChannelState::FundingNegotiated {
holder_signer.provide_channel_parameters(&channel_parameters);
}
(channel_keys_id, holder_signer)
return Err(DecodeError::UnknownRequiredFeature);
}
+ // ChannelTransactionParameters may have had an empty features set upon deserialization.
+ // To account for that, we're proactively setting/overriding the field here.
+ channel_parameters.channel_type_features = chan_features.clone();
+
let mut secp_ctx = Secp256k1::new();
secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
+ if let Some(skimmed_fees) = pending_outbound_skimmed_fees_opt {
+ let mut iter = skimmed_fees.into_iter();
+ for htlc in pending_outbound_htlcs.iter_mut() {
+ htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
+ }
+ // We expect all skimmed fees to be consumed above
+ if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
+ }
+ if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt {
+ let mut iter = skimmed_fees.into_iter();
+ for htlc in holding_cell_htlc_updates.iter_mut() {
+ if let HTLCUpdateAwaitingACK::AddHTLC { ref mut skimmed_fee_msat, .. } = htlc {
+ *skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
+ }
+ }
+ // We expect all skimmed fees to be consumed above
+ if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
+ }
+ if let Some(blinding_pts) = pending_outbound_blinding_points_opt {
+ let mut iter = blinding_pts.into_iter();
+ for htlc in pending_outbound_htlcs.iter_mut() {
+ htlc.blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
+ }
+ // We expect all blinding points to be consumed above
+ if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
+ }
+ if let Some(blinding_pts) = holding_cell_blinding_points_opt {
+ let mut iter = blinding_pts.into_iter();
+ for htlc in holding_cell_htlc_updates.iter_mut() {
+ if let HTLCUpdateAwaitingACK::AddHTLC { ref mut blinding_point, .. } = htlc {
+ *blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
+ }
+ }
+ // We expect all blinding points to be consumed above
+ if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
+ }
+
Ok(Channel {
context: ChannelContext {
user_id,
latest_monitor_update_id,
- holder_signer,
+ holder_signer: ChannelSignerType::Ecdsa(holder_signer),
shutdown_scriptpubkey,
destination_script,
monitor_pending_failures,
monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
+ signer_pending_commitment_update: false,
+ signer_pending_funding: false,
+
pending_update_fee,
holding_cell_update_fee,
next_holder_htlc_id,
last_sent_closing_fee: None,
pending_counterparty_closing_signed: None,
+ expecting_peer_commitment_signed: false,
closing_fee_limits: None,
target_closing_feerate_sats_per_kw,
- inbound_awaiting_accept: false,
-
funding_tx_confirmed_in,
funding_tx_confirmation_height,
short_channel_id,
channel_transaction_parameters: channel_parameters,
funding_transaction,
+ is_batch_funding,
counterparty_cur_commitment_point,
counterparty_prev_commitment_point,
channel_type: channel_type.unwrap(),
channel_keys_id,
- pending_monitor_updates: pending_monitor_updates.unwrap(),
+ blocked_monitor_updates: blocked_monitor_updates.unwrap(),
}
})
}
#[cfg(test)]
mod tests {
use std::cmp;
- use bitcoin::blockdata::script::{Script, Builder};
+ use bitcoin::blockdata::constants::ChainHash;
+ use bitcoin::blockdata::script::{ScriptBuf, Builder};
use bitcoin::blockdata::transaction::{Transaction, TxOut};
- use bitcoin::blockdata::constants::genesis_block;
use bitcoin::blockdata::opcodes;
use bitcoin::network::constants::Network;
- use hex;
- use crate::ln::PaymentHash;
+ use crate::ln::{PaymentHash, PaymentPreimage};
+ use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint};
use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
- #[cfg(anchors)]
use crate::ln::channel::InitFeatures;
- use crate::ln::channel::{Channel, InboundHTLCOutput, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator};
+ use crate::ln::channel::{AwaitingChannelReadyFlags, Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, HTLCUpdateAwaitingACK, commit_tx_fee_msat};
use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
- use crate::ln::features::ChannelTypeFeatures;
+ use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
+ use crate::ln::msgs;
use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
use crate::ln::script::ShutdownScript;
- use crate::ln::chan_utils;
- use crate::ln::chan_utils::{htlc_success_tx_weight, htlc_timeout_tx_weight};
+ use crate::ln::chan_utils::{self, htlc_success_tx_weight, htlc_timeout_tx_weight};
use crate::chain::BestBlock;
use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
use crate::chain::transaction::OutPoint;
- use crate::routing::router::Path;
+ use crate::routing::router::{Path, RouteHop};
use crate::util::config::UserConfig;
- use crate::util::enforcing_trait_impls::EnforcingSigner;
use crate::util::errors::APIError;
+ use crate::util::ser::{ReadableArgs, Writeable};
use crate::util::test_utils;
- use crate::util::test_utils::OnGetShutdownScriptpubkey;
+ use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface};
use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
use bitcoin::secp256k1::ffi::Signature as FFISignature;
use bitcoin::secp256k1::{SecretKey,PublicKey};
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::hashes::Hash;
+ use bitcoin::hashes::hex::FromHex;
use bitcoin::hash_types::WPubkeyHash;
- use bitcoin::PackedLockTime;
- use bitcoin::util::address::WitnessVersion;
+ use bitcoin::blockdata::locktime::absolute::LockTime;
+ use bitcoin::address::{WitnessProgram, WitnessVersion};
use crate::prelude::*;
struct TestFeeEstimator {
"MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
}
- #[test]
- fn test_no_fee_check_overflow() {
- // Previously, calling `check_remote_fee` with a fee of 0xffffffff would overflow in
- // arithmetic, causing a panic with debug assertions enabled.
- let fee_est = TestFeeEstimator { fee_est: 42 };
- let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
- assert!(Channel::<InMemorySigner>::check_remote_fee(&bounded_fee_estimator,
- u32::max_value(), None, &&test_utils::TestLogger::new()).is_err());
- }
-
struct Keys {
signer: InMemorySigner,
}
}
impl SignerProvider for Keys {
- type Signer = InMemorySigner;
+ type EcdsaSigner = InMemorySigner;
+ #[cfg(taproot)]
+ type TaprootSigner = InMemorySigner;
fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
self.signer.channel_keys_id()
}
- fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::Signer {
+ fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::EcdsaSigner {
self.signer.clone()
}
- fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::Signer, DecodeError> { panic!(); }
+ fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::EcdsaSigner, DecodeError> { panic!(); }
- fn get_destination_script(&self) -> Result<Script, ()> {
+ fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> {
let secp_ctx = Secp256k1::signing_only();
- let channel_monitor_claim_key = SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
+ let channel_monitor_claim_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
- Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&channel_monitor_claim_key_hash[..]).into_script())
+ Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(channel_monitor_claim_key_hash).into_script())
}
fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
let secp_ctx = Secp256k1::signing_only();
- let channel_close_key = SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
+ let channel_close_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
}
}
- #[cfg(not(feature = "grind_signatures"))]
+ #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
- PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&hex::decode(hex).unwrap()[..]).unwrap())
+ PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&<Vec<u8>>::from_hex(hex).unwrap()[..]).unwrap())
}
#[test]
fn upfront_shutdown_script_incompatibility() {
let features = channelmanager::provided_init_features(&UserConfig::default()).clear_shutdown_anysegwit();
- let non_v0_segwit_shutdown_script =
- ShutdownScript::new_witness_program(WitnessVersion::V16, &[0, 40]).unwrap();
+ let non_v0_segwit_shutdown_script = ShutdownScript::new_witness_program(
+ &WitnessProgram::new(WitnessVersion::V16, &[0, 40]).unwrap(),
+ ).unwrap();
let seed = [42; 32];
let network = Network::Testnet;
let secp_ctx = Secp256k1::new();
let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
let config = UserConfig::default();
- match Channel::<EnforcingSigner>::new_outbound(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42) {
+ match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42, None) {
Err(APIError::IncompatibleShutdownScript { script }) => {
assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
},
let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
let config = UserConfig::default();
- let node_a_chan = Channel::<EnforcingSigner>::new_outbound(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
+ let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
// Now change the fee so we can check that the fee in the open_channel message is the
// same as the old fee.
fee_est.fee_est = 500;
- let open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash());
+ let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
assert_eq!(open_channel_msg.feerate_per_kw, original_fee);
}
let network = Network::Testnet;
let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
let logger = test_utils::TestLogger::new();
+ let best_block = BestBlock::from_network(network);
// Go through the flow of opening a channel between two nodes, making sure
// they have different dust limits.
// Create Node A's channel pointing to Node B's pubkey
let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
let config = UserConfig::default();
- let mut node_a_chan = Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
+ let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
// Create Node B's channel by receiving Node A's open_channel message
// Make sure A's dust limit is as we expect.
- let open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash());
+ let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
- let mut node_b_chan = Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, 42).unwrap();
+ let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
// Node B --> Node A: accept channel, explicitly setting B's dust limit.
- let mut accept_channel_msg = node_b_chan.accept_inbound_channel(0);
+ let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
accept_channel_msg.dust_limit_satoshis = 546;
node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
node_a_chan.context.holder_dust_limit_satoshis = 1560;
+ // Node A --> Node B: funding created
+ let output_script = node_a_chan.context.get_funding_redeemscript();
+ let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
+ value: 10000000, script_pubkey: output_script.clone(),
+ }]};
+ let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
+ let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
+ let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
+
+ // Node B --> Node A: funding signed
+ let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
+ let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
+
// Put some inbound and outbound HTLCs in A's channel.
let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput {
htlc_id: 0,
amount_msat: htlc_amount_msat,
- payment_hash: PaymentHash(Sha256::hash(&[42; 32]).into_inner()),
+ payment_hash: PaymentHash(Sha256::hash(&[42; 32]).to_byte_array()),
cltv_expiry: 300000000,
state: InboundHTLCState::Committed,
});
node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
htlc_id: 1,
amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's.
- payment_hash: PaymentHash(Sha256::hash(&[43; 32]).into_inner()),
+ payment_hash: PaymentHash(Sha256::hash(&[43; 32]).to_byte_array()),
cltv_expiry: 200000000,
state: OutboundHTLCState::Committed,
source: HTLCSource::OutboundRoute {
path: Path { hops: Vec::new(), blinded_tail: None },
- session_priv: SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
+ session_priv: SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
first_hop_htlc_msat: 548,
payment_id: PaymentId([42; 32]),
- }
+ },
+ skimmed_fee_msat: None,
+ blinding_point: None,
});
// Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
// the dust limit check.
let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
- let local_commit_tx_fee = node_a_chan.next_local_commit_tx_fee_msat(htlc_candidate, None);
- let local_commit_fee_0_htlcs = Channel::<EnforcingSigner>::commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.opt_anchors());
+ let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
+ let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.get_channel_type());
assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
// Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
// of the HTLCs are seen to be above the dust limit.
node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
- let remote_commit_fee_3_htlcs = Channel::<EnforcingSigner>::commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.opt_anchors());
+ let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.get_channel_type());
let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
- let remote_commit_tx_fee = node_a_chan.next_remote_commit_tx_fee_msat(htlc_candidate, None);
+ let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
}
let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
let config = UserConfig::default();
- let mut chan = Channel::<EnforcingSigner>::new_outbound(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
+ let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
- let commitment_tx_fee_0_htlcs = Channel::<EnforcingSigner>::commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.opt_anchors());
- let commitment_tx_fee_1_htlc = Channel::<EnforcingSigner>::commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.opt_anchors());
+ let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type());
+ let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type());
// If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
// counted as dust when it shouldn't be.
- let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.opt_anchors()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
+ let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
- let commitment_tx_fee = chan.next_local_commit_tx_fee_msat(htlc_candidate, None);
+ let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
// If swapped: this HTLC would be counted as non-dust when it shouldn't be.
- let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.opt_anchors()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
+ let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
- let commitment_tx_fee = chan.next_local_commit_tx_fee_msat(htlc_candidate, None);
+ let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
// If swapped: this HTLC would be counted as non-dust when it shouldn't be.
- let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.opt_anchors()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
+ let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
- let commitment_tx_fee = chan.next_remote_commit_tx_fee_msat(htlc_candidate, None);
+ let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
// If swapped: this HTLC would be counted as dust when it shouldn't be.
- let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.opt_anchors()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
+ let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
- let commitment_tx_fee = chan.next_remote_commit_tx_fee_msat(htlc_candidate, None);
+ let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
}
let seed = [42; 32];
let network = Network::Testnet;
let best_block = BestBlock::from_network(network);
- let chain_hash = best_block.block_hash();
+ let chain_hash = ChainHash::using_genesis_block(network);
let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
// Go through the flow of opening a channel between two nodes.
// Create Node A's channel pointing to Node B's pubkey
let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
let config = UserConfig::default();
- let mut node_a_chan = Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
+ let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
// Create Node B's channel by receiving Node A's open_channel message
let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
- let mut node_b_chan = Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, 42).unwrap();
+ let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
// Node B --> Node A: accept channel
- let accept_channel_msg = node_b_chan.accept_inbound_channel(0);
+ let accept_channel_msg = node_b_chan.accept_inbound_channel();
node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
// Node A --> Node B: funding created
let output_script = node_a_chan.context.get_funding_redeemscript();
- let tx = Transaction { version: 1, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
+ let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
value: 10000000, script_pubkey: output_script.clone(),
}]};
let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
- let funding_created_msg = node_a_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).unwrap();
- let (funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).unwrap();
+ let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
+ let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
// Node B --> Node A: funding signed
- let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger);
+ let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
+ let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
// Now disconnect the two nodes and check that the commitment point in
// Node B's channel_reestablish message is sane.
- node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger);
+ assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
let msg = node_b_chan.get_channel_reestablish(&&logger);
assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
// Check that the commitment point in Node A's channel_reestablish message
// is sane.
- node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger);
+ assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
let msg = node_a_chan.get_channel_reestablish(&&logger);
assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
let mut config_101_percent = UserConfig::default();
config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
- // Test that `new_outbound` creates a channel with the correct value for
+ // Test that `OutboundV1Channel::new` creates a channel with the correct value for
// `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
// which is set to the lower bound + 1 (2%) of the `channel_value`.
- let chan_1 = Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42).unwrap();
+ let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None).unwrap();
let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
// Test with the upper bound - 1 of valid values (99%).
- let chan_2 = Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42).unwrap();
+ let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None).unwrap();
let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
- let chan_1_open_channel_msg = chan_1.get_open_channel(genesis_block(network).header.block_hash());
+ let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network));
- // Test that `new_from_req` creates a channel with the correct value for
+ // Test that `InboundV1Channel::new` creates a channel with the correct value for
// `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
// which is set to the lower bound - 1 (2%) of the `channel_value`.
- let chan_3 = Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, 42).unwrap();
+ let chan_3 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000;
assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
// Test with the upper bound - 1 of valid values (99%).
- let chan_4 = Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, 42).unwrap();
+ let chan_4 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000;
assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
- // Test that `new_outbound` uses the lower bound of the configurable percentage values (1%)
+ // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
// if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
- let chan_5 = Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42).unwrap();
+ let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None).unwrap();
let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
- // Test that `new_outbound` uses the upper bound of the configurable percentage values
+ // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values
// (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
// than 100.
- let chan_6 = Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42).unwrap();
+ let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None).unwrap();
let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
- // Test that `new_from_req` uses the lower bound of the configurable percentage values (1%)
+ // Test that `InboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
// if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
- let chan_7 = Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, 42).unwrap();
+ let chan_7 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000;
assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
- // Test that `new_from_req` uses the upper bound of the configurable percentage values
+ // Test that `InboundV1Channel::new` uses the upper bound of the configurable percentage values
// (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
// than 100.
- let chan_8 = Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, 42).unwrap();
+ let chan_8 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000;
assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
}
#[test]
fn test_configured_holder_selected_channel_reserve_satoshis() {
- // Test that `new_outbound` and `new_from_req` create a channel with the correct
+ // Test that `OutboundV1Channel::new` and `InboundV1Channel::new` create a channel with the correct
// channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
let mut outbound_node_config = UserConfig::default();
outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
- let chan = Channel::<EnforcingSigner>::new_outbound(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42).unwrap();
+ let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None).unwrap();
let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
- let chan_open_channel_msg = chan.get_open_channel(genesis_block(network).header.block_hash());
+ let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network));
let mut inbound_node_config = UserConfig::default();
inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
- let chan_inbound_node = Channel::<EnforcingSigner>::new_from_req(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, 42).unwrap();
+ let chan_inbound_node = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false).unwrap();
let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
} else {
// Channel Negotiations failed
- let result = Channel::<EnforcingSigner>::new_from_req(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, 42);
+ let result = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false);
assert!(result.is_err());
}
}
#[test]
fn channel_update() {
let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
+ let logger = test_utils::TestLogger::new();
let secp_ctx = Secp256k1::new();
let seed = [42; 32];
let network = Network::Testnet;
- let chain_hash = genesis_block(network).header.block_hash();
+ let best_block = BestBlock::from_network(network);
+ let chain_hash = ChainHash::using_genesis_block(network);
let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
- // Create a channel.
+ // Create Node A's channel pointing to Node B's pubkey
let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
let config = UserConfig::default();
- let mut node_a_chan = Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
- assert!(node_a_chan.context.counterparty_forwarding_info.is_none());
- assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1); // the default
- assert!(node_a_chan.context.counterparty_forwarding_info().is_none());
+ let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
+
+ // Create Node B's channel by receiving Node A's open_channel message
+ // Make sure A's dust limit is as we expect.
+ let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
+ let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
+ let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
+
+ // Node B --> Node A: accept channel, explicitly setting B's dust limit.
+ let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
+ accept_channel_msg.dust_limit_satoshis = 546;
+ node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
+ node_a_chan.context.holder_dust_limit_satoshis = 1560;
+
+ // Node A --> Node B: funding created
+ let output_script = node_a_chan.context.get_funding_redeemscript();
+ let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
+ value: 10000000, script_pubkey: output_script.clone(),
+ }]};
+ let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
+ let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
+ let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
+
+ // Node B --> Node A: funding signed
+ let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
+ let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
// Make sure that receiving a channel update will update the Channel as expected.
let update = ChannelUpdate {
},
signature: Signature::from(unsafe { FFISignature::new() })
};
- node_a_chan.channel_update(&update).unwrap();
+ assert!(node_a_chan.channel_update(&update).unwrap());
// The counterparty can send an update with a higher minimum HTLC, but that shouldn't
// change our official htlc_minimum_msat.
},
None => panic!("expected counterparty forwarding info to be Some")
}
+
+ assert!(!node_a_chan.channel_update(&update).unwrap());
+ }
+
+ #[test]
+ fn blinding_point_skimmed_fee_ser() {
+ // Ensure that channel blinding points and skimmed fees are (de)serialized properly.
+ let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
+ let secp_ctx = Secp256k1::new();
+ let seed = [42; 32];
+ let network = Network::Testnet;
+ let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
+
+ let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
+ let config = UserConfig::default();
+ let features = channelmanager::provided_init_features(&config);
+ let outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None).unwrap();
+ let mut chan = Channel { context: outbound_chan.context };
+
+ let dummy_htlc_source = HTLCSource::OutboundRoute {
+ path: Path {
+ hops: vec![RouteHop {
+ pubkey: test_utils::pubkey(2), channel_features: ChannelFeatures::empty(),
+ node_features: NodeFeatures::empty(), short_channel_id: 0, fee_msat: 0,
+ cltv_expiry_delta: 0, maybe_announced_channel: false,
+ }],
+ blinded_tail: None
+ },
+ session_priv: test_utils::privkey(42),
+ first_hop_htlc_msat: 0,
+ payment_id: PaymentId([42; 32]),
+ };
+ let dummy_outbound_output = OutboundHTLCOutput {
+ htlc_id: 0,
+ amount_msat: 0,
+ payment_hash: PaymentHash([43; 32]),
+ cltv_expiry: 0,
+ state: OutboundHTLCState::Committed,
+ source: dummy_htlc_source.clone(),
+ skimmed_fee_msat: None,
+ blinding_point: None,
+ };
+ let mut pending_outbound_htlcs = vec![dummy_outbound_output.clone(); 10];
+ for (idx, htlc) in pending_outbound_htlcs.iter_mut().enumerate() {
+ if idx % 2 == 0 {
+ htlc.blinding_point = Some(test_utils::pubkey(42 + idx as u8));
+ }
+ if idx % 3 == 0 {
+ htlc.skimmed_fee_msat = Some(1);
+ }
+ }
+ chan.context.pending_outbound_htlcs = pending_outbound_htlcs.clone();
+
+ let dummy_holding_cell_add_htlc = HTLCUpdateAwaitingACK::AddHTLC {
+ amount_msat: 0,
+ cltv_expiry: 0,
+ payment_hash: PaymentHash([43; 32]),
+ source: dummy_htlc_source.clone(),
+ onion_routing_packet: msgs::OnionPacket {
+ version: 0,
+ public_key: Ok(test_utils::pubkey(1)),
+ hop_data: [0; 20*65],
+ hmac: [0; 32]
+ },
+ skimmed_fee_msat: None,
+ blinding_point: None,
+ };
+ let dummy_holding_cell_claim_htlc = HTLCUpdateAwaitingACK::ClaimHTLC {
+ payment_preimage: PaymentPreimage([42; 32]),
+ htlc_id: 0,
+ };
+ let mut holding_cell_htlc_updates = Vec::with_capacity(10);
+ for i in 0..10 {
+ if i % 3 == 0 {
+ holding_cell_htlc_updates.push(dummy_holding_cell_add_htlc.clone());
+ } else if i % 3 == 1 {
+ holding_cell_htlc_updates.push(dummy_holding_cell_claim_htlc.clone());
+ } else {
+ let mut dummy_add = dummy_holding_cell_add_htlc.clone();
+ if let HTLCUpdateAwaitingACK::AddHTLC {
+ ref mut blinding_point, ref mut skimmed_fee_msat, ..
+ } = &mut dummy_add {
+ *blinding_point = Some(test_utils::pubkey(42 + i));
+ *skimmed_fee_msat = Some(42);
+ } else { panic!() }
+ holding_cell_htlc_updates.push(dummy_add);
+ }
+ }
+ chan.context.holding_cell_htlc_updates = holding_cell_htlc_updates.clone();
+
+ // Encode and decode the channel and ensure that the HTLCs within are the same.
+ let encoded_chan = chan.encode();
+ let mut s = crate::io::Cursor::new(&encoded_chan);
+ let mut reader = crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64);
+ let features = channelmanager::provided_channel_type_features(&config);
+ let decoded_chan = Channel::read(&mut reader, (&&keys_provider, &&keys_provider, 0, &features)).unwrap();
+ assert_eq!(decoded_chan.context.pending_outbound_htlcs, pending_outbound_htlcs);
+ assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates);
}
#[cfg(feature = "_test_vectors")]
#[test]
fn outbound_commitment_test() {
- use bitcoin::util::sighash;
+ use bitcoin::sighash;
use bitcoin::consensus::encode::serialize;
- use bitcoin::blockdata::transaction::EcdsaSighashType;
+ use bitcoin::sighash::EcdsaSighashType;
use bitcoin::hashes::hex::FromHex;
use bitcoin::hash_types::Txid;
use bitcoin::secp256k1::Message;
- use crate::sign::EcdsaChannelSigner;
+ use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, ecdsa::EcdsaChannelSigner};
use crate::ln::PaymentPreimage;
use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
+ use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint};
use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
use crate::util::logger::Logger;
use crate::sync::Arc;
+ use core::str::FromStr;
+ use hex::DisplayHex;
// Test vectors from BOLT 3 Appendices C and F (anchors):
let feeest = TestFeeEstimator{fee_est: 15000};
let mut signer = InMemorySigner::new(
&secp_ctx,
- SecretKey::from_slice(&hex::decode("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
- SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
- SecretKey::from_slice(&hex::decode("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
- SecretKey::from_slice(&hex::decode("3333333333333333333333333333333333333333333333333333333333333333").unwrap()[..]).unwrap(),
- SecretKey::from_slice(&hex::decode("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
+ SecretKey::from_slice(&<Vec<u8>>::from_hex("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
+ SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
+ SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
+ SecretKey::from_slice(&<Vec<u8>>::from_hex("3333333333333333333333333333333333333333333333333333333333333333").unwrap()[..]).unwrap(),
+ SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
// These aren't set in the test vectors:
[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
);
assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
- hex::decode("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
+ <Vec<u8>>::from_hex("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
let keys_provider = Keys { signer: signer.clone() };
let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
let mut config = UserConfig::default();
config.channel_handshake_config.announced_channel = false;
- let mut chan = Channel::<InMemorySigner>::new_outbound(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42).unwrap(); // Nothing uses their network key in this test
+ let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42, None).unwrap(); // Nothing uses their network key in this test
chan.context.holder_dust_limit_satoshis = 546;
chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
- let funding_info = OutPoint{ txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
+ let funding_info = OutPoint{ txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
let counterparty_pubkeys = ChannelPublicKeys {
funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
- revocation_basepoint: PublicKey::from_slice(&hex::decode("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap(),
+ revocation_basepoint: RevocationBasepoint::from(PublicKey::from_slice(&<Vec<u8>>::from_hex("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap()),
payment_point: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"),
- delayed_payment_basepoint: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
- htlc_basepoint: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444")
+ delayed_payment_basepoint: DelayedPaymentBasepoint::from(public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13")),
+ htlc_basepoint: HtlcBasepoint::from(public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"))
};
chan.context.channel_transaction_parameters.counterparty_parameters = Some(
CounterpartyChannelTransactionParameters {
signer.provide_channel_parameters(&chan.context.channel_transaction_parameters);
assert_eq!(counterparty_pubkeys.payment_point.serialize()[..],
- hex::decode("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
+ <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
assert_eq!(counterparty_pubkeys.funding_pubkey.serialize()[..],
- hex::decode("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
+ <Vec<u8>>::from_hex("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
- assert_eq!(counterparty_pubkeys.htlc_basepoint.serialize()[..],
- hex::decode("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
+ assert_eq!(counterparty_pubkeys.htlc_basepoint.to_public_key().serialize()[..],
+ <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
// We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
// derived from a commitment_seed, so instead we copy it here and call
// build_commitment_transaction.
- let delayed_payment_base = &chan.context.holder_signer.pubkeys().delayed_payment_basepoint;
- let per_commitment_secret = SecretKey::from_slice(&hex::decode("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
+ let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint;
+ let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
- let htlc_basepoint = &chan.context.holder_signer.pubkeys().htlc_basepoint;
+ let htlc_basepoint = &chan.context.holder_signer.as_ref().pubkeys().htlc_basepoint;
let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint);
macro_rules! test_commitment {
( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
- chan.context.channel_transaction_parameters.opt_anchors = None;
- test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, false, $($remain)*);
+ chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::only_static_remote_key();
+ test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::only_static_remote_key(), $($remain)*);
};
}
macro_rules! test_commitment_with_anchors {
( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
- chan.context.channel_transaction_parameters.opt_anchors = Some(());
- test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, true, $($remain)*);
+ chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
+ test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), $($remain)*);
};
}
let trusted_tx = commitment_tx.trust();
let unsigned_tx = trusted_tx.built_transaction();
let redeemscript = chan.context.get_funding_redeemscript();
- let counterparty_signature = Signature::from_der(&hex::decode($counterparty_sig_hex).unwrap()[..]).unwrap();
+ let counterparty_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_sig_hex).unwrap()[..]).unwrap();
let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.context.channel_value_satoshis);
- log_trace!(logger, "unsigned_tx = {}", hex::encode(serialize(&unsigned_tx.transaction)));
+ log_trace!(logger, "unsigned_tx = {}", serialize(&unsigned_tx.transaction).as_hex());
assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.context.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
let mut counterparty_htlc_sigs = Vec::new();
counterparty_htlc_sigs.clear(); // Don't warn about excess mut for no-HTLC calls
$({
- let remote_signature = Signature::from_der(&hex::decode($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
+ let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
per_htlc.push((htlcs[$htlc_idx].clone(), Some(remote_signature)));
counterparty_htlc_sigs.push(remote_signature);
})*
commitment_tx.clone(),
counterparty_signature,
counterparty_htlc_sigs,
- &chan.context.holder_signer.pubkeys().funding_pubkey,
+ &chan.context.holder_signer.as_ref().pubkeys().funding_pubkey,
chan.context.counterparty_funding_pubkey()
);
- let (holder_sig, htlc_sigs) = signer.sign_holder_commitment_and_htlcs(&holder_commitment_tx, &secp_ctx).unwrap();
- assert_eq!(Signature::from_der(&hex::decode($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
+ let holder_sig = signer.sign_holder_commitment(&holder_commitment_tx, &secp_ctx).unwrap();
+ assert_eq!(Signature::from_der(&<Vec<u8>>::from_hex($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
let funding_redeemscript = chan.context.get_funding_redeemscript();
let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig);
- assert_eq!(serialize(&tx)[..], hex::decode($tx_hex).unwrap()[..], "tx");
+ assert_eq!(serialize(&tx)[..], <Vec<u8>>::from_hex($tx_hex).unwrap()[..], "tx");
// ((htlc, counterparty_sig), (index, holder_sig))
- let mut htlc_sig_iter = holder_commitment_tx.htlcs().iter().zip(&holder_commitment_tx.counterparty_htlc_sigs).zip(htlc_sigs.iter().enumerate());
+ let mut htlc_counterparty_sig_iter = holder_commitment_tx.counterparty_htlc_sigs.iter();
$({
log_trace!(logger, "verifying htlc {}", $htlc_idx);
- let remote_signature = Signature::from_der(&hex::decode($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
+ let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
let ref htlc = htlcs[$htlc_idx];
- let htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
+ let mut htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
chan.context.get_counterparty_selected_contest_delay().unwrap(),
- &htlc, $opt_anchors, false, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
+ &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
- let htlc_sighashtype = if $opt_anchors { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
+ let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
- assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key).is_ok(), "verify counterparty htlc sig");
+ assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key.to_public_key()).is_ok(), "verify counterparty htlc sig");
let mut preimage: Option<PaymentPreimage> = None;
if !htlc.offered {
for i in 0..5 {
- let out = PaymentHash(Sha256::hash(&[i; 32]).into_inner());
+ let out = PaymentHash(Sha256::hash(&[i; 32]).to_byte_array());
if out == htlc.payment_hash {
preimage = Some(PaymentPreimage([i; 32]));
}
assert!(preimage.is_some());
}
- let htlc_sig = htlc_sig_iter.next().unwrap();
- let num_anchors = if $opt_anchors { 2 } else { 0 };
- assert_eq!((htlc_sig.0).0.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
-
- let signature = Signature::from_der(&hex::decode($htlc_sig_hex).unwrap()[..]).unwrap();
- assert_eq!(signature, *(htlc_sig.1).1, "htlc sig");
- let index = (htlc_sig.1).0;
- let channel_parameters = chan.context.channel_transaction_parameters.as_holder_broadcastable();
+ let htlc_counterparty_sig = htlc_counterparty_sig_iter.next().unwrap();
+ let htlc_holder_sig = signer.sign_holder_htlc_transaction(&htlc_tx, 0, &HTLCDescriptor {
+ channel_derivation_parameters: ChannelDerivationParameters {
+ value_satoshis: chan.context.channel_value_satoshis,
+ keys_id: chan.context.channel_keys_id,
+ transaction_parameters: chan.context.channel_transaction_parameters.clone(),
+ },
+ commitment_txid: trusted_tx.txid(),
+ per_commitment_number: trusted_tx.commitment_number(),
+ per_commitment_point: trusted_tx.per_commitment_point(),
+ feerate_per_kw: trusted_tx.feerate_per_kw(),
+ htlc: htlc.clone(),
+ preimage: preimage.clone(),
+ counterparty_sig: *htlc_counterparty_sig,
+ }, &secp_ctx).unwrap();
+ let num_anchors = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { 2 } else { 0 };
+ assert_eq!(htlc.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
+
+ let signature = Signature::from_der(&<Vec<u8>>::from_hex($htlc_sig_hex).unwrap()[..]).unwrap();
+ assert_eq!(signature, htlc_holder_sig, "htlc sig");
let trusted_tx = holder_commitment_tx.trust();
- log_trace!(logger, "htlc_tx = {}", hex::encode(serialize(&trusted_tx.get_signed_htlc_tx(&channel_parameters, index, &(htlc_sig.0).1, (htlc_sig.1).1, &preimage))));
- assert_eq!(serialize(&trusted_tx.get_signed_htlc_tx(&channel_parameters, index, &(htlc_sig.0).1, (htlc_sig.1).1, &preimage))[..],
- hex::decode($htlc_tx_hex).unwrap()[..], "htlc tx");
+ htlc_tx.input[0].witness = trusted_tx.build_htlc_input_witness($htlc_idx, htlc_counterparty_sig, &htlc_holder_sig, &preimage);
+ log_trace!(logger, "htlc_tx = {}", serialize(&htlc_tx).as_hex());
+ assert_eq!(serialize(&htlc_tx)[..], <Vec<u8>>::from_hex($htlc_tx_hex).unwrap()[..], "htlc tx");
})*
- assert!(htlc_sig_iter.next().is_none());
+ assert!(htlc_counterparty_sig_iter.next().is_none());
} }
}
payment_hash: PaymentHash([0; 32]),
state: InboundHTLCState::Committed,
};
- out.payment_hash.0 = Sha256::hash(&hex::decode("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).into_inner();
+ out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).to_byte_array();
out
});
chan.context.pending_inbound_htlcs.push({
payment_hash: PaymentHash([0; 32]),
state: InboundHTLCState::Committed,
};
- out.payment_hash.0 = Sha256::hash(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).into_inner();
+ out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
out
});
chan.context.pending_outbound_htlcs.push({
payment_hash: PaymentHash([0; 32]),
state: OutboundHTLCState::Committed,
source: HTLCSource::dummy(),
+ skimmed_fee_msat: None,
+ blinding_point: None,
};
- out.payment_hash.0 = Sha256::hash(&hex::decode("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).into_inner();
+ out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).to_byte_array();
out
});
chan.context.pending_outbound_htlcs.push({
payment_hash: PaymentHash([0; 32]),
state: OutboundHTLCState::Committed,
source: HTLCSource::dummy(),
+ skimmed_fee_msat: None,
+ blinding_point: None,
};
- out.payment_hash.0 = Sha256::hash(&hex::decode("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).into_inner();
+ out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).to_byte_array();
out
});
chan.context.pending_inbound_htlcs.push({
payment_hash: PaymentHash([0; 32]),
state: InboundHTLCState::Committed,
};
- out.payment_hash.0 = Sha256::hash(&hex::decode("0404040404040404040404040404040404040404040404040404040404040404").unwrap()).into_inner();
+ out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0404040404040404040404040404040404040404040404040404040404040404").unwrap()).to_byte_array();
out
});
chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
chan.context.feerate_per_kw = 2185;
chan.context.holder_dust_limit_satoshis = 2001;
+ let cached_channel_type = chan.context.channel_type;
+ chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
"3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
chan.context.feerate_per_kw = 3702;
chan.context.holder_dust_limit_satoshis = 546;
+ chan.context.channel_type = cached_channel_type.clone();
test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
"3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
chan.context.feerate_per_kw = 3687;
chan.context.holder_dust_limit_satoshis = 3001;
+ chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
"3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
chan.context.feerate_per_kw = 4914;
chan.context.holder_dust_limit_satoshis = 546;
+ chan.context.channel_type = cached_channel_type.clone();
test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
"3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
chan.context.feerate_per_kw = 4894;
chan.context.holder_dust_limit_satoshis = 4001;
+ chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
"30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
chan.context.feerate_per_kw = 9651180;
chan.context.holder_dust_limit_satoshis = 546;
+ chan.context.channel_type = cached_channel_type.clone();
test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
"3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
chan.context.feerate_per_kw = 6216010;
chan.context.holder_dust_limit_satoshis = 4001;
+ chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
"30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
chan.context.feerate_per_kw = 9651936;
chan.context.holder_dust_limit_satoshis = 546;
+ chan.context.channel_type = cached_channel_type;
test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
"304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
payment_hash: PaymentHash([0; 32]),
state: InboundHTLCState::Committed,
};
- out.payment_hash.0 = Sha256::hash(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).into_inner();
+ out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
out
});
chan.context.pending_outbound_htlcs.clear();
payment_hash: PaymentHash([0; 32]),
state: OutboundHTLCState::Committed,
source: HTLCSource::dummy(),
+ skimmed_fee_msat: None,
+ blinding_point: None,
};
- out.payment_hash.0 = Sha256::hash(&hex::decode("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).into_inner();
+ out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
out
});
chan.context.pending_outbound_htlcs.push({
payment_hash: PaymentHash([0; 32]),
state: OutboundHTLCState::Committed,
source: HTLCSource::dummy(),
+ skimmed_fee_msat: None,
+ blinding_point: None,
};
- out.payment_hash.0 = Sha256::hash(&hex::decode("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).into_inner();
+ out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
out
});
"020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
} );
+ chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
"3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
"02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
// Test vectors from BOLT 3 Appendix D:
let mut seed = [0; 32];
- seed[0..32].clone_from_slice(&hex::decode("0000000000000000000000000000000000000000000000000000000000000000").unwrap());
+ seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap());
assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
- hex::decode("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()[..]);
+ <Vec<u8>>::from_hex("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()[..]);
- seed[0..32].clone_from_slice(&hex::decode("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap());
+ seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap());
assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
- hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()[..]);
+ <Vec<u8>>::from_hex("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()[..]);
assert_eq!(chan_utils::build_commitment_secret(&seed, 0xaaaaaaaaaaa),
- hex::decode("56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528").unwrap()[..]);
+ <Vec<u8>>::from_hex("56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528").unwrap()[..]);
assert_eq!(chan_utils::build_commitment_secret(&seed, 0x555555555555),
- hex::decode("9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31").unwrap()[..]);
+ <Vec<u8>>::from_hex("9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31").unwrap()[..]);
- seed[0..32].clone_from_slice(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap());
+ seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap());
assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
- hex::decode("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
+ <Vec<u8>>::from_hex("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
}
#[test]
// Test vectors from BOLT 3 Appendix E:
let secp_ctx = Secp256k1::new();
- let base_secret = SecretKey::from_slice(&hex::decode("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
- let per_commitment_secret = SecretKey::from_slice(&hex::decode("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
+ let base_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
+ let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
let base_point = PublicKey::from_secret_key(&secp_ctx, &base_secret);
- assert_eq!(base_point.serialize()[..], hex::decode("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
+ assert_eq!(base_point.serialize()[..], <Vec<u8>>::from_hex("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
- assert_eq!(per_commitment_point.serialize()[..], hex::decode("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
-
- assert_eq!(chan_utils::derive_public_key(&secp_ctx, &per_commitment_point, &base_point).serialize()[..],
- hex::decode("0235f2dbfaa89b57ec7b055afe29849ef7ddfeb1cefdb9ebdc43f5494984db29e5").unwrap()[..]);
+ assert_eq!(per_commitment_point.serialize()[..], <Vec<u8>>::from_hex("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
- SecretKey::from_slice(&hex::decode("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
+ SecretKey::from_slice(&<Vec<u8>>::from_hex("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
- assert_eq!(chan_utils::derive_public_revocation_key(&secp_ctx, &per_commitment_point, &base_point).serialize()[..],
- hex::decode("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
+ assert_eq!(RevocationKey::from_basepoint(&secp_ctx, &RevocationBasepoint::from(base_point), &per_commitment_point).to_public_key().serialize()[..],
+ <Vec<u8>>::from_hex("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
- SecretKey::from_slice(&hex::decode("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
+ SecretKey::from_slice(&<Vec<u8>>::from_hex("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
}
#[test]
let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
let config = UserConfig::default();
- let node_a_chan = Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider, &&keys_provider,
- node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
+ let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
+ node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
channel_type_features.set_zero_conf_required();
- let mut open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash());
+ let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
open_channel_msg.channel_type = Some(channel_type_features);
let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
- let res = Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider,
+ let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
node_b_node_id, &channelmanager::provided_channel_type_features(&config),
- &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, 42);
+ &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false);
assert!(res.is_ok());
}
- #[cfg(anchors)]
#[test]
fn test_supports_anchors_zero_htlc_tx_fee() {
// Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
// It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both
// need to signal it.
- let channel_a = Channel::<EnforcingSigner>::new_outbound(
+ let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
&fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
&channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
- &config, 0, 42
+ &config, 0, 42, None
).unwrap();
assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx());
expected_channel_type.set_static_remote_key_required();
expected_channel_type.set_anchors_zero_fee_htlc_tx_required();
- let channel_a = Channel::<EnforcingSigner>::new_outbound(
+ let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
&fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
- &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42
+ &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
+ None
).unwrap();
- let open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash());
- let channel_b = Channel::<EnforcingSigner>::new_from_req(
+ let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
+ let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
&fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
&channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
- &open_channel_msg, 7, &config, 0, &&logger, 42
+ &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
).unwrap();
assert_eq!(channel_a.context.channel_type, expected_channel_type);
assert_eq!(channel_b.context.channel_type, expected_channel_type);
}
- #[cfg(anchors)]
#[test]
fn test_rejects_implicit_simple_anchors() {
// Tests that if `option_anchors` is being negotiated implicitly through the intersection of
let raw_init_features = static_remote_key_required | simple_anchors_required;
let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec());
- let channel_a = Channel::<EnforcingSigner>::new_outbound(
+ let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
&fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
- &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42
+ &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
+ None
).unwrap();
// Set `channel_type` to `None` to force the implicit feature negotiation.
- let mut open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash());
+ let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
open_channel_msg.channel_type = None;
// Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
// `static_remote_key`, it will fail the channel.
- let channel_b = Channel::<EnforcingSigner>::new_from_req(
+ let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
&fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
&channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
- &open_channel_msg, 7, &config, 0, &&logger, 42
+ &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
);
assert!(channel_b.is_err());
}
- #[cfg(anchors)]
#[test]
fn test_rejects_simple_anchors_channel_type() {
// Tests that if `option_anchors` is being negotiated through the `channel_type` feature,
let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
- assert!(simple_anchors_init.requires_unknown_bits());
- assert!(simple_anchors_channel_type.requires_unknown_bits());
+ assert!(!simple_anchors_init.requires_unknown_bits());
+ assert!(!simple_anchors_channel_type.requires_unknown_bits());
// First, we'll try to open a channel between A and B where A requests a channel type for
// the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
// B as it's not supported by LDK.
- let channel_a = Channel::<EnforcingSigner>::new_outbound(
+ let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
&fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
- &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42
+ &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
+ None
).unwrap();
- let mut open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash());
+ let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
- let res = Channel::<EnforcingSigner>::new_from_req(
+ let res = InboundV1Channel::<&TestKeysInterface>::new(
&fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
&channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
- &open_channel_msg, 7, &config, 0, &&logger, 42
+ &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
);
assert!(res.is_err());
// `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the
// original `option_anchors` feature, which should be rejected by A as it's not supported by
// LDK.
- let mut channel_a = Channel::<EnforcingSigner>::new_outbound(
+ let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
&fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
- 10000000, 100000, 42, &config, 0, 42
+ 10000000, 100000, 42, &config, 0, 42, None
).unwrap();
- let open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash());
+ let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
- let channel_b = Channel::<EnforcingSigner>::new_from_req(
+ let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
&fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
&channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
- &open_channel_msg, 7, &config, 0, &&logger, 42
+ &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
).unwrap();
let mut accept_channel_msg = channel_b.get_accept_channel_message();
);
assert!(res.is_err());
}
+
+ #[test]
+ fn test_waiting_for_batch() {
+ let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
+ let logger = test_utils::TestLogger::new();
+ let secp_ctx = Secp256k1::new();
+ let seed = [42; 32];
+ let network = Network::Testnet;
+ let best_block = BestBlock::from_network(network);
+ let chain_hash = ChainHash::using_genesis_block(network);
+ let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
+
+ let mut config = UserConfig::default();
+ // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a
+ // channel in a batch before all channels are ready.
+ config.channel_handshake_limits.trust_own_funding_0conf = true;
+
+ // Create a channel from node a to node b that will be part of batch funding.
+ let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
+ let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(
+ &feeest,
+ &&keys_provider,
+ &&keys_provider,
+ node_b_node_id,
+ &channelmanager::provided_init_features(&config),
+ 10000000,
+ 100000,
+ 42,
+ &config,
+ 0,
+ 42,
+ None
+ ).unwrap();
+
+ let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
+ let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
+ let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(
+ &feeest,
+ &&keys_provider,
+ &&keys_provider,
+ node_b_node_id,
+ &channelmanager::provided_channel_type_features(&config),
+ &channelmanager::provided_init_features(&config),
+ &open_channel_msg,
+ 7,
+ &config,
+ 0,
+ &&logger,
+ true, // Allow node b to send a 0conf channel_ready.
+ ).unwrap();
+
+ let accept_channel_msg = node_b_chan.accept_inbound_channel();
+ node_a_chan.accept_channel(
+ &accept_channel_msg,
+ &config.channel_handshake_limits,
+ &channelmanager::provided_init_features(&config),
+ ).unwrap();
+
+ // Fund the channel with a batch funding transaction.
+ let output_script = node_a_chan.context.get_funding_redeemscript();
+ let tx = Transaction {
+ version: 1,
+ lock_time: LockTime::ZERO,
+ input: Vec::new(),
+ output: vec![
+ TxOut {
+ value: 10000000, script_pubkey: output_script.clone(),
+ },
+ TxOut {
+ value: 10000000, script_pubkey: Builder::new().into_script(),
+ },
+ ]};
+ let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
+ let funding_created_msg = node_a_chan.get_funding_created(
+ tx.clone(), funding_outpoint, true, &&logger,
+ ).map_err(|_| ()).unwrap();
+ let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
+ &funding_created_msg.unwrap(),
+ best_block,
+ &&keys_provider,
+ &&logger,
+ ).map_err(|_| ()).unwrap();
+ let node_b_updates = node_b_chan.monitor_updating_restored(
+ &&logger,
+ &&keys_provider,
+ chain_hash,
+ &config,
+ 0,
+ );
+
+ // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
+ // broadcasting the funding transaction until the batch is ready.
+ let res = node_a_chan.funding_signed(
+ &funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger,
+ );
+ let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
+ let node_a_updates = node_a_chan.monitor_updating_restored(
+ &&logger,
+ &&keys_provider,
+ chain_hash,
+ &config,
+ 0,
+ );
+ // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
+ // as the funding transaction depends on all channels in the batch becoming ready.
+ assert!(node_a_updates.channel_ready.is_none());
+ assert!(node_a_updates.funding_broadcastable.is_none());
+ assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
+
+ // It is possible to receive a 0conf channel_ready from the remote node.
+ node_a_chan.channel_ready(
+ &node_b_updates.channel_ready.unwrap(),
+ &&keys_provider,
+ chain_hash,
+ &config,
+ &best_block,
+ &&logger,
+ ).unwrap();
+ assert_eq!(
+ node_a_chan.context.channel_state,
+ ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH | AwaitingChannelReadyFlags::THEIR_CHANNEL_READY)
+ );
+
+ // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
+ node_a_chan.set_batch_ready();
+ assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY));
+ assert!(node_a_chan.check_get_channel_ready(0).is_some());
+ }
}