X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannel.rs;h=57fbe51626bec580a111f62335dfdc63d70771d5;hb=refs%2Fheads%2Fupstream%2Fmain;hp=469eddd642f68686765896826a2621bf03c154f6;hpb=c5e3f5fa820a17dc6789db41e3a4a772dc28b068;p=rust-lightning diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 469eddd6..46e39ffd 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -7,6 +7,7 @@ // You may not use this file except in accordance with one or both of these // licenses. +use bitcoin::amount::Amount; use bitcoin::blockdata::constants::ChainHash; use bitcoin::blockdata::script::{Script, ScriptBuf, Builder}; use bitcoin::blockdata::transaction::Transaction; @@ -24,12 +25,13 @@ use bitcoin::secp256k1::{PublicKey,SecretKey}; use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature}; use bitcoin::secp256k1; -use crate::ln::{ChannelId, PaymentPreimage, PaymentHash}; +use crate::ln::types::{ChannelId, PaymentPreimage, PaymentHash}; use crate::ln::features::{ChannelTypeFeatures, InitFeatures}; use crate::ln::msgs; use crate::ln::msgs::DecodeError; use crate::ln::script::{self, ShutdownScript}; -use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState}; +use crate::ln::channel_state::{ChannelShutdownState, CounterpartyForwardingInfo, InboundHTLCDetails, InboundHTLCStateDetails, OutboundHTLCDetails, OutboundHTLCStateDetails}; +use crate::ln::channelmanager::{self, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT}; use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction}; use crate::ln::chan_utils; use crate::ln::onion_utils::HTLCFailReason; @@ -37,11 +39,12 @@ use crate::chain::BestBlock; use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator}; use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID}; use crate::chain::transaction::{OutPoint, TransactionData}; -use crate::sign::{EcdsaChannelSigner, WriteableEcdsaChannelSigner, EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient}; +use crate::sign::ecdsa::EcdsaChannelSigner; +use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient}; use crate::events::ClosureReason; use crate::routing::gossip::NodeId; use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer}; -use crate::util::logger::Logger; +use crate::util::logger::{Logger, Record, WithContext}; use crate::util::errors::APIError; use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure}; use crate::util::scid_utils::scid_from_parts; @@ -49,7 +52,6 @@ use crate::util::scid_utils::scid_from_parts; use crate::io; use crate::prelude::*; use core::{cmp,mem,fmt}; -use core::convert::TryInto; use core::ops::Deref; #[cfg(any(test, fuzzing, debug_assertions))] use crate::sync::Mutex; @@ -103,10 +105,38 @@ enum InboundHTLCRemovalReason { Fulfill(PaymentPreimage), } +/// Represents the resolution status of an inbound HTLC. +#[derive(Clone)] +enum InboundHTLCResolution { + /// Resolved implies the action we must take with the inbound HTLC has already been determined, + /// i.e., we already know whether it must be failed back or forwarded. + // + // TODO: Once this variant is removed, we should also clean up + // [`MonitorRestoreUpdates::accepted_htlcs`] as the path will be unreachable. + Resolved { + pending_htlc_status: PendingHTLCStatus, + }, + /// Pending implies we will attempt to resolve the inbound HTLC once it has been fully committed + /// to by both sides of the channel, i.e., once a `revoke_and_ack` has been processed by both + /// nodes for the state update in which it was proposed. + Pending { + update_add_htlc: msgs::UpdateAddHTLC, + }, +} + +impl_writeable_tlv_based_enum!(InboundHTLCResolution, + (0, Resolved) => { + (0, pending_htlc_status, required), + }, + (2, Pending) => { + (0, update_add_htlc, required), + }, +); + enum InboundHTLCState { /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an /// update_add_htlc message for this HTLC. - RemoteAnnounced(PendingHTLCStatus), + RemoteAnnounced(InboundHTLCResolution), /// Included in a received commitment_signed message (implying we've /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous /// state (see the example below). We have not yet included this HTLC in a @@ -136,13 +166,13 @@ enum InboundHTLCState { /// Implies AwaitingRemoteRevoke. /// /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md - AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus), + AwaitingRemoteRevokeToAnnounce(InboundHTLCResolution), /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it). /// We have also included this HTLC in our latest commitment_signed and are now just waiting /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the /// channel (before it can then get forwarded and/or removed). /// Implies AwaitingRemoteRevoke. - AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus), + AwaitingAnnouncedRemoteRevoke(InboundHTLCResolution), Committed, /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we /// created it we would have put it in the holding cell instead). When they next revoke_and_ack @@ -157,6 +187,26 @@ enum InboundHTLCState { LocalRemoved(InboundHTLCRemovalReason), } +impl From<&InboundHTLCState> for Option { + fn from(state: &InboundHTLCState) -> Option { + match state { + InboundHTLCState::RemoteAnnounced(_) => None, + InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => + Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToAdd), + InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => + Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToAdd), + InboundHTLCState::Committed => + Some(InboundHTLCStateDetails::Committed), + InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(_)) => + Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail), + InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed(_)) => + Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail), + InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) => + Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFulfill), + } + } +} + struct InboundHTLCOutput { htlc_id: u64, amount_msat: u64, @@ -165,6 +215,7 @@ struct InboundHTLCOutput { state: InboundHTLCState, } +#[cfg_attr(test, derive(Clone, Debug, PartialEq))] enum OutboundHTLCState { /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we /// created it we would have put it in the holding cell instead). When they next revoke_and_ack @@ -197,7 +248,31 @@ enum OutboundHTLCState { AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome), } +impl From<&OutboundHTLCState> for OutboundHTLCStateDetails { + fn from(state: &OutboundHTLCState) -> OutboundHTLCStateDetails { + match state { + OutboundHTLCState::LocalAnnounced(_) => + OutboundHTLCStateDetails::AwaitingRemoteRevokeToAdd, + OutboundHTLCState::Committed => + OutboundHTLCStateDetails::Committed, + // RemoteRemoved states are ignored as the state is transient and the remote has not committed to + // the state yet. + OutboundHTLCState::RemoteRemoved(_) => + OutboundHTLCStateDetails::Committed, + OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) => + OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveSuccess, + OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Failure(_)) => + OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFailure, + OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => + OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveSuccess, + OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Failure(_)) => + OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFailure, + } + } +} + #[derive(Clone)] +#[cfg_attr(test, derive(Debug, PartialEq))] enum OutboundHTLCOutcome { /// LDK version 0.0.105+ will always fill in the preimage here. Success(Option), @@ -222,6 +297,7 @@ impl<'a> Into> for &'a OutboundHTLCOutcome { } } +#[cfg_attr(test, derive(Clone, Debug, PartialEq))] struct OutboundHTLCOutput { htlc_id: u64, amount_msat: u64, @@ -229,10 +305,12 @@ struct OutboundHTLCOutput { payment_hash: PaymentHash, state: OutboundHTLCState, source: HTLCSource, + blinding_point: Option, skimmed_fee_msat: Option, } /// See AwaitingRemoteRevoke ChannelState for more info +#[cfg_attr(test, derive(Clone, Debug, PartialEq))] enum HTLCUpdateAwaitingACK { AddHTLC { // TODO: Time out if we're getting close to cltv_expiry // always outbound @@ -243,6 +321,7 @@ enum HTLCUpdateAwaitingACK { onion_routing_packet: msgs::OnionPacket, // The extra fee we're skimming off the top of this HTLC. skimmed_fee_msat: Option, + blinding_point: Option, }, ClaimHTLC { payment_preimage: PaymentPreimage, @@ -252,78 +331,325 @@ enum HTLCUpdateAwaitingACK { htlc_id: u64, err_packet: msgs::OnionErrorPacket, }, + FailMalformedHTLC { + htlc_id: u64, + failure_code: u16, + sha256_of_onion: [u8; 32], + }, +} + +macro_rules! define_state_flags { + ($flag_type_doc: expr, $flag_type: ident, [$(($flag_doc: expr, $flag: ident, $value: expr, $get: ident, $set: ident, $clear: ident)),+], $extra_flags: expr) => { + #[doc = $flag_type_doc] + #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)] + struct $flag_type(u32); + + impl $flag_type { + $( + #[doc = $flag_doc] + const $flag: $flag_type = $flag_type($value); + )* + + /// All flags that apply to the specified [`ChannelState`] variant. + #[allow(unused)] + const ALL: $flag_type = Self($(Self::$flag.0 | )* $extra_flags); + + #[allow(unused)] + fn new() -> Self { Self(0) } + + #[allow(unused)] + fn from_u32(flags: u32) -> Result { + if flags & !Self::ALL.0 != 0 { + Err(()) + } else { + Ok($flag_type(flags)) + } + } + + #[allow(unused)] + fn is_empty(&self) -> bool { self.0 == 0 } + #[allow(unused)] + fn is_set(&self, flag: Self) -> bool { *self & flag == flag } + #[allow(unused)] + fn set(&mut self, flag: Self) { *self |= flag } + #[allow(unused)] + fn clear(&mut self, flag: Self) -> Self { self.0 &= !flag.0; *self } + } + + $( + define_state_flags!($flag_type, Self::$flag, $get, $set, $clear); + )* + + impl core::ops::BitOr for $flag_type { + type Output = Self; + fn bitor(self, rhs: Self) -> Self::Output { Self(self.0 | rhs.0) } + } + impl core::ops::BitOrAssign for $flag_type { + fn bitor_assign(&mut self, rhs: Self) { self.0 |= rhs.0; } + } + impl core::ops::BitAnd for $flag_type { + type Output = Self; + fn bitand(self, rhs: Self) -> Self::Output { Self(self.0 & rhs.0) } + } + impl core::ops::BitAndAssign for $flag_type { + fn bitand_assign(&mut self, rhs: Self) { self.0 &= rhs.0; } + } + }; + ($flag_type_doc: expr, $flag_type: ident, $flags: tt) => { + define_state_flags!($flag_type_doc, $flag_type, $flags, 0); + }; + ($flag_type: ident, $flag: expr, $get: ident, $set: ident, $clear: ident) => { + impl $flag_type { + #[allow(unused)] + fn $get(&self) -> bool { self.is_set($flag_type::new() | $flag) } + #[allow(unused)] + fn $set(&mut self) { self.set($flag_type::new() | $flag) } + #[allow(unused)] + fn $clear(&mut self) -> Self { self.clear($flag_type::new() | $flag) } + } + }; + ($flag_type_doc: expr, FUNDED_STATE, $flag_type: ident, $flags: tt) => { + define_state_flags!($flag_type_doc, $flag_type, $flags, FundedStateFlags::ALL.0); + + define_state_flags!($flag_type, FundedStateFlags::PEER_DISCONNECTED, + is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected); + define_state_flags!($flag_type, FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS, + is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress); + define_state_flags!($flag_type, FundedStateFlags::REMOTE_SHUTDOWN_SENT, + is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent); + define_state_flags!($flag_type, FundedStateFlags::LOCAL_SHUTDOWN_SENT, + is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent); + + impl core::ops::BitOr for $flag_type { + type Output = Self; + fn bitor(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 | rhs.0) } + } + impl core::ops::BitOrAssign for $flag_type { + fn bitor_assign(&mut self, rhs: FundedStateFlags) { self.0 |= rhs.0; } + } + impl core::ops::BitAnd for $flag_type { + type Output = Self; + fn bitand(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 & rhs.0) } + } + impl core::ops::BitAndAssign for $flag_type { + fn bitand_assign(&mut self, rhs: FundedStateFlags) { self.0 &= rhs.0; } + } + impl PartialEq for $flag_type { + fn eq(&self, other: &FundedStateFlags) -> bool { self.0 == other.0 } + } + impl From for $flag_type { + fn from(flags: FundedStateFlags) -> Self { Self(flags.0) } + } + }; +} + +/// We declare all the states/flags here together to help determine which bits are still available +/// to choose. +mod state_flags { + pub const OUR_INIT_SENT: u32 = 1 << 0; + pub const THEIR_INIT_SENT: u32 = 1 << 1; + pub const FUNDING_NEGOTIATED: u32 = 1 << 2; + pub const AWAITING_CHANNEL_READY: u32 = 1 << 3; + pub const THEIR_CHANNEL_READY: u32 = 1 << 4; + pub const OUR_CHANNEL_READY: u32 = 1 << 5; + pub const CHANNEL_READY: u32 = 1 << 6; + pub const PEER_DISCONNECTED: u32 = 1 << 7; + pub const MONITOR_UPDATE_IN_PROGRESS: u32 = 1 << 8; + pub const AWAITING_REMOTE_REVOKE: u32 = 1 << 9; + pub const REMOTE_SHUTDOWN_SENT: u32 = 1 << 10; + pub const LOCAL_SHUTDOWN_SENT: u32 = 1 << 11; + pub const SHUTDOWN_COMPLETE: u32 = 1 << 12; + pub const WAITING_FOR_BATCH: u32 = 1 << 13; } -/// There are a few "states" and then a number of flags which can be applied: -/// We first move through init with `OurInitSent` -> `TheirInitSent` -> `FundingCreated` -> `FundingSent`. -/// `TheirChannelReady` and `OurChannelReady` then get set on `FundingSent`, and when both are set we -/// move on to `ChannelReady`. -/// Note that `PeerDisconnected` can be set on both `ChannelReady` and `FundingSent`. -/// `ChannelReady` can then get all remaining flags set on it, until we finish shutdown, then we -/// move on to `ShutdownComplete`, at which point most calls into this channel are disallowed. +define_state_flags!( + "Flags that apply to all [`ChannelState`] variants in which the channel is funded.", + FundedStateFlags, [ + ("Indicates the remote side is considered \"disconnected\" and no updates are allowed \ + until after we've done a `channel_reestablish` dance.", PEER_DISCONNECTED, state_flags::PEER_DISCONNECTED, + is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected), + ("Indicates the user has told us a `ChannelMonitor` update is pending async persistence \ + somewhere and we should pause sending any outbound messages until they've managed to \ + complete it.", MONITOR_UPDATE_IN_PROGRESS, state_flags::MONITOR_UPDATE_IN_PROGRESS, + is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress), + ("Indicates we received a `shutdown` message from the remote end. If set, they may not add \ + any new HTLCs to the channel, and we are expected to respond with our own `shutdown` \ + message when possible.", REMOTE_SHUTDOWN_SENT, state_flags::REMOTE_SHUTDOWN_SENT, + is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent), + ("Indicates we sent a `shutdown` message. At this point, we may not add any new HTLCs to \ + the channel.", LOCAL_SHUTDOWN_SENT, state_flags::LOCAL_SHUTDOWN_SENT, + is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent) + ] +); + +define_state_flags!( + "Flags that only apply to [`ChannelState::NegotiatingFunding`].", + NegotiatingFundingFlags, [ + ("Indicates we have (or are prepared to) send our `open_channel`/`accept_channel` message.", + OUR_INIT_SENT, state_flags::OUR_INIT_SENT, is_our_init_sent, set_our_init_sent, clear_our_init_sent), + ("Indicates we have received their `open_channel`/`accept_channel` message.", + THEIR_INIT_SENT, state_flags::THEIR_INIT_SENT, is_their_init_sent, set_their_init_sent, clear_their_init_sent) + ] +); + +define_state_flags!( + "Flags that only apply to [`ChannelState::AwaitingChannelReady`].", + FUNDED_STATE, AwaitingChannelReadyFlags, [ + ("Indicates they sent us a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \ + `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.", + THEIR_CHANNEL_READY, state_flags::THEIR_CHANNEL_READY, + is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready), + ("Indicates we sent them a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \ + `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.", + OUR_CHANNEL_READY, state_flags::OUR_CHANNEL_READY, + is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready), + ("Indicates the channel was funded in a batch and the broadcast of the funding transaction \ + is being held until all channels in the batch have received `funding_signed` and have \ + their monitors persisted.", WAITING_FOR_BATCH, state_flags::WAITING_FOR_BATCH, + is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch) + ] +); + +define_state_flags!( + "Flags that only apply to [`ChannelState::ChannelReady`].", + FUNDED_STATE, ChannelReadyFlags, [ + ("Indicates that we have sent a `commitment_signed` but are awaiting the responding \ + `revoke_and_ack` message. During this period, we can't generate new `commitment_signed` \ + messages as we'd be unable to determine which HTLCs they included in their `revoke_and_ack` \ + implicit ACK, so instead we have to hold them away temporarily to be sent later.", + AWAITING_REMOTE_REVOKE, state_flags::AWAITING_REMOTE_REVOKE, + is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke) + ] +); + +// Note that the order of this enum is implicitly defined by where each variant is placed. Take this +// into account when introducing new states and update `test_channel_state_order` accordingly. +#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)] enum ChannelState { - /// Implies we have (or are prepared to) send our open_channel/accept_channel message - OurInitSent = 1 << 0, - /// Implies we have received their `open_channel`/`accept_channel` message - TheirInitSent = 1 << 1, - /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to `FundingSent`. - /// Note that this is nonsense for an inbound channel as we immediately generate `funding_signed` - /// upon receipt of `funding_created`, so simply skip this state. - FundingCreated = 4, - /// Set when we have received/sent `funding_created` and `funding_signed` and are thus now waiting - /// on the funding transaction to confirm. The `ChannelReady` flags are set to indicate when we - /// and our counterparty consider the funding transaction confirmed. - FundingSent = 8, - /// Flag which can be set on `FundingSent` to indicate they sent us a `channel_ready` message. - /// Once both `TheirChannelReady` and `OurChannelReady` are set, state moves on to `ChannelReady`. - TheirChannelReady = 1 << 4, - /// Flag which can be set on `FundingSent` to indicate we sent them a `channel_ready` message. - /// Once both `TheirChannelReady` and `OurChannelReady` are set, state moves on to `ChannelReady`. - OurChannelReady = 1 << 5, - ChannelReady = 64, - /// Flag which is set on `ChannelReady` and `FundingSent` indicating remote side is considered - /// "disconnected" and no updates are allowed until after we've done a `channel_reestablish` - /// dance. - PeerDisconnected = 1 << 7, - /// Flag which is set on `ChannelReady`, FundingCreated, and `FundingSent` indicating the user has - /// told us a `ChannelMonitor` update is pending async persistence somewhere and we should pause - /// sending any outbound messages until they've managed to finish. - MonitorUpdateInProgress = 1 << 8, - /// Flag which implies that we have sent a commitment_signed but are awaiting the responding - /// revoke_and_ack message. During this time period, we can't generate new commitment_signed - /// messages as then we will be unable to determine which HTLCs they included in their - /// revoke_and_ack implicit ACK, so instead we have to hold them away temporarily to be sent - /// later. - /// Flag is set on `ChannelReady`. - AwaitingRemoteRevoke = 1 << 9, - /// Flag which is set on `ChannelReady` or `FundingSent` after receiving a shutdown message from - /// the remote end. If set, they may not add any new HTLCs to the channel, and we are expected - /// to respond with our own shutdown message when possible. - RemoteShutdownSent = 1 << 10, - /// Flag which is set on `ChannelReady` or `FundingSent` after sending a shutdown message. At this - /// point, we may not add any new HTLCs to the channel. - LocalShutdownSent = 1 << 11, - /// We've successfully negotiated a closing_signed dance. At this point ChannelManager is about - /// to drop us, but we store this anyway. - ShutdownComplete = 4096, - /// Flag which is set on `FundingSent` to indicate this channel is funded in a batch and the - /// broadcasting of the funding transaction is being held until all channels in the batch - /// have received funding_signed and have their monitors persisted. - WaitingForBatch = 1 << 13, + /// We are negotiating the parameters required for the channel prior to funding it. + NegotiatingFunding(NegotiatingFundingFlags), + /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to + /// `AwaitingChannelReady`. Note that this is nonsense for an inbound channel as we immediately generate + /// `funding_signed` upon receipt of `funding_created`, so simply skip this state. + FundingNegotiated, + /// We've received/sent `funding_created` and `funding_signed` and are thus now waiting on the + /// funding transaction to confirm. + AwaitingChannelReady(AwaitingChannelReadyFlags), + /// Both we and our counterparty consider the funding transaction confirmed and the channel is + /// now operational. + ChannelReady(ChannelReadyFlags), + /// We've successfully negotiated a `closing_signed` dance. At this point, the `ChannelManager` + /// is about to drop us, but we store this anyway. + ShutdownComplete, +} + +macro_rules! impl_state_flag { + ($get: ident, $set: ident, $clear: ident, [$($state: ident),+]) => { + #[allow(unused)] + fn $get(&self) -> bool { + match self { + $( + ChannelState::$state(flags) => flags.$get(), + )* + _ => false, + } + } + #[allow(unused)] + fn $set(&mut self) { + match self { + $( + ChannelState::$state(flags) => flags.$set(), + )* + _ => debug_assert!(false, "Attempted to set flag on unexpected ChannelState"), + } + } + #[allow(unused)] + fn $clear(&mut self) { + match self { + $( + ChannelState::$state(flags) => { let _ = flags.$clear(); }, + )* + _ => debug_assert!(false, "Attempted to clear flag on unexpected ChannelState"), + } + } + }; + ($get: ident, $set: ident, $clear: ident, FUNDED_STATES) => { + impl_state_flag!($get, $set, $clear, [AwaitingChannelReady, ChannelReady]); + }; + ($get: ident, $set: ident, $clear: ident, $state: ident) => { + impl_state_flag!($get, $set, $clear, [$state]); + }; +} + +impl ChannelState { + fn from_u32(state: u32) -> Result { + match state { + state_flags::FUNDING_NEGOTIATED => Ok(ChannelState::FundingNegotiated), + state_flags::SHUTDOWN_COMPLETE => Ok(ChannelState::ShutdownComplete), + val => { + if val & state_flags::AWAITING_CHANNEL_READY == state_flags::AWAITING_CHANNEL_READY { + AwaitingChannelReadyFlags::from_u32(val & !state_flags::AWAITING_CHANNEL_READY) + .map(|flags| ChannelState::AwaitingChannelReady(flags)) + } else if val & state_flags::CHANNEL_READY == state_flags::CHANNEL_READY { + ChannelReadyFlags::from_u32(val & !state_flags::CHANNEL_READY) + .map(|flags| ChannelState::ChannelReady(flags)) + } else if let Ok(flags) = NegotiatingFundingFlags::from_u32(val) { + Ok(ChannelState::NegotiatingFunding(flags)) + } else { + Err(()) + } + }, + } + } + + fn to_u32(&self) -> u32 { + match self { + ChannelState::NegotiatingFunding(flags) => flags.0, + ChannelState::FundingNegotiated => state_flags::FUNDING_NEGOTIATED, + ChannelState::AwaitingChannelReady(flags) => state_flags::AWAITING_CHANNEL_READY | flags.0, + ChannelState::ChannelReady(flags) => state_flags::CHANNEL_READY | flags.0, + ChannelState::ShutdownComplete => state_flags::SHUTDOWN_COMPLETE, + } + } + + fn is_pre_funded_state(&self) -> bool { + matches!(self, ChannelState::NegotiatingFunding(_)|ChannelState::FundingNegotiated) + } + + fn is_both_sides_shutdown(&self) -> bool { + self.is_local_shutdown_sent() && self.is_remote_shutdown_sent() + } + + fn with_funded_state_flags_mask(&self) -> FundedStateFlags { + match self { + ChannelState::AwaitingChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0), + ChannelState::ChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0), + _ => FundedStateFlags::new(), + } + } + + fn can_generate_new_commitment(&self) -> bool { + match self { + ChannelState::ChannelReady(flags) => + !flags.is_set(ChannelReadyFlags::AWAITING_REMOTE_REVOKE) && + !flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into()) && + !flags.is_set(FundedStateFlags::PEER_DISCONNECTED.into()), + _ => { + debug_assert!(false, "Can only generate new commitment within ChannelReady"); + false + }, + } + } + + impl_state_flag!(is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected, FUNDED_STATES); + impl_state_flag!(is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress, FUNDED_STATES); + impl_state_flag!(is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent, FUNDED_STATES); + impl_state_flag!(is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent, FUNDED_STATES); + impl_state_flag!(is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready, AwaitingChannelReady); + impl_state_flag!(is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready, AwaitingChannelReady); + impl_state_flag!(is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch, AwaitingChannelReady); + impl_state_flag!(is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke, ChannelReady); } -const BOTH_SIDES_SHUTDOWN_MASK: u32 = - ChannelState::LocalShutdownSent as u32 | - ChannelState::RemoteShutdownSent as u32; -const MULTI_STATE_FLAGS: u32 = - BOTH_SIDES_SHUTDOWN_MASK | - ChannelState::PeerDisconnected as u32 | - ChannelState::MonitorUpdateInProgress as u32; -const STATE_FLAGS: u32 = - MULTI_STATE_FLAGS | - ChannelState::TheirChannelReady as u32 | - ChannelState::OurChannelReady as u32 | - ChannelState::AwaitingRemoteRevoke as u32 | - ChannelState::WaitingForBatch as u32; pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1; @@ -384,7 +710,7 @@ pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000; pub(super) enum ChannelError { Ignore(String), Warn(String), - Close(String), + Close((String, ClosureReason)), } impl fmt::Debug for ChannelError { @@ -392,7 +718,7 @@ impl fmt::Debug for ChannelError { match self { &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e), &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e), - &ChannelError::Close(ref e) => write!(f, "Close : {}", e), + &ChannelError::Close((ref e, _)) => write!(f, "Close : {}", e), } } } @@ -402,7 +728,43 @@ impl fmt::Display for ChannelError { match self { &ChannelError::Ignore(ref e) => write!(f, "{}", e), &ChannelError::Warn(ref e) => write!(f, "{}", e), - &ChannelError::Close(ref e) => write!(f, "{}", e), + &ChannelError::Close((ref e, _)) => write!(f, "{}", e), + } + } +} + +impl ChannelError { + pub(super) fn close(err: String) -> Self { + ChannelError::Close((err.clone(), ClosureReason::ProcessingError { err })) + } +} + +pub(super) struct WithChannelContext<'a, L: Deref> where L::Target: Logger { + pub logger: &'a L, + pub peer_id: Option, + pub channel_id: Option, + pub payment_hash: Option, +} + +impl<'a, L: Deref> Logger for WithChannelContext<'a, L> where L::Target: Logger { + fn log(&self, mut record: Record) { + record.peer_id = self.peer_id; + record.channel_id = self.channel_id; + record.payment_hash = self.payment_hash; + self.logger.log(record) + } +} + +impl<'a, 'b, L: Deref> WithChannelContext<'a, L> +where L::Target: Logger { + pub(super) fn from(logger: &'a L, context: &'b ChannelContext, payment_hash: Option) -> Self + where S::Target: SignerProvider + { + WithChannelContext { + logger, + peer_id: Some(context.counterparty_node_id), + channel_id: Some(context.channel_id), + payment_hash } } } @@ -411,7 +773,7 @@ macro_rules! secp_check { ($res: expr, $err: expr) => { match $res { Ok(thing) => thing, - Err(_) => return Err(ChannelError::Close($err)), + Err(_) => return Err(ChannelError::close($err)), } }; } @@ -459,14 +821,16 @@ enum HTLCInitiator { RemoteOffered, } -/// An enum gathering stats on pending HTLCs, either inbound or outbound side. +/// Current counts of various HTLCs, useful for calculating current balances available exactly. struct HTLCStats { - pending_htlcs: u32, - pending_htlcs_value_msat: u64, + pending_inbound_htlcs: usize, + pending_outbound_htlcs: usize, + pending_inbound_htlcs_value_msat: u64, + pending_outbound_htlcs_value_msat: u64, on_counterparty_tx_dust_exposure_msat: u64, on_holder_tx_dust_exposure_msat: u64, - holding_cell_msat: u64, - on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included + outbound_holding_cell_msat: u64, + on_holder_tx_outbound_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included } /// An enum gathering stats on commitment transaction, either local or remote. @@ -476,9 +840,10 @@ struct CommitmentStats<'a> { total_fee_sat: u64, // the total fee included in the transaction num_nondust_htlcs: usize, // the number of HTLC outputs (dust HTLCs *non*-included) htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction - local_balance_msat: u64, // local balance before fees but considering dust limits - remote_balance_msat: u64, // remote balance before fees but considering dust limits - preimages: Vec, // preimages for successful offered HTLCs since last commitment + local_balance_msat: u64, // local balance before fees *not* considering dust limits + remote_balance_msat: u64, // remote balance before fees *not* considering dust limits + outbound_htlc_preimages: Vec, // preimages for successful offered HTLCs since last commitment + inbound_htlc_preimages: Vec, // preimages for successful received HTLCs since last commitment } /// Used when calculating whether we or the remote can afford an additional HTLC. @@ -531,6 +896,7 @@ pub(super) struct MonitorRestoreUpdates { pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>, pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, pub finalized_claimed_htlcs: Vec, + pub pending_update_adds: Vec, pub funding_broadcastable: Option, pub channel_ready: Option, pub announcement_sigs: Option, @@ -540,9 +906,10 @@ pub(super) struct MonitorRestoreUpdates { #[allow(unused)] pub(super) struct SignerResumeUpdates { pub commitment_update: Option, + pub revoke_and_ack: Option, pub funding_signed: Option, - pub funding_created: Option, pub channel_ready: Option, + pub order: RAACommitmentOrder, } /// The return value of `channel_reestablish` @@ -558,13 +925,125 @@ pub(super) struct ReestablishResponses { /// The result of a shutdown that should be handled. #[must_use] pub(crate) struct ShutdownResult { + pub(crate) closure_reason: ClosureReason, /// A channel monitor update to apply. - pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>, + pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelId, ChannelMonitorUpdate)>, /// A list of dropped outbound HTLCs that can safely be failed backwards immediately. pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>, /// An unbroadcasted batch funding transaction id. The closure of this channel should be /// propagated to the remainder of the batch. pub(crate) unbroadcasted_batch_funding_txid: Option, + pub(crate) channel_id: ChannelId, + pub(crate) user_channel_id: u128, + pub(crate) channel_capacity_satoshis: u64, + pub(crate) counterparty_node_id: PublicKey, + pub(crate) unbroadcasted_funding_tx: Option, + pub(crate) channel_funding_txo: Option, +} + +/// Tracks the transaction number, along with current and next commitment points. +/// This consolidates the logic to advance our commitment number and request new +/// commitment points from our signer. +#[derive(Debug, Copy, Clone)] +enum HolderCommitmentPoint { + // TODO: add a variant for before our first commitment point is retrieved + /// We've advanced our commitment number and are waiting on the next commitment point. + /// Until the `get_per_commitment_point` signer method becomes async, this variant + /// will not be used. + PendingNext { transaction_number: u64, current: PublicKey }, + /// Our current commitment point is ready, we've cached our next point, + /// and we are not pending a new one. + Available { transaction_number: u64, current: PublicKey, next: PublicKey }, +} + +impl HolderCommitmentPoint { + pub fn new(signer: &ChannelSignerType, secp_ctx: &Secp256k1) -> Self + where SP::Target: SignerProvider + { + HolderCommitmentPoint::Available { + transaction_number: INITIAL_COMMITMENT_NUMBER, + // TODO(async_signing): remove this expect with the Uninitialized variant + current: signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER, secp_ctx) + .expect("Signer must be able to provide initial commitment point"), + // TODO(async_signing): remove this expect with the Uninitialized variant + next: signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, secp_ctx) + .expect("Signer must be able to provide second commitment point"), + } + } + + pub fn is_available(&self) -> bool { + if let HolderCommitmentPoint::Available { .. } = self { true } else { false } + } + + pub fn transaction_number(&self) -> u64 { + match self { + HolderCommitmentPoint::PendingNext { transaction_number, .. } => *transaction_number, + HolderCommitmentPoint::Available { transaction_number, .. } => *transaction_number, + } + } + + pub fn current_point(&self) -> PublicKey { + match self { + HolderCommitmentPoint::PendingNext { current, .. } => *current, + HolderCommitmentPoint::Available { current, .. } => *current, + } + } + + pub fn next_point(&self) -> Option { + match self { + HolderCommitmentPoint::PendingNext { .. } => None, + HolderCommitmentPoint::Available { next, .. } => Some(*next), + } + } + + /// If we are pending the next commitment point, this method tries asking the signer again, + /// and transitions to the next state if successful. + /// + /// This method is used for the following transitions: + /// - `PendingNext` -> `Available` + pub fn try_resolve_pending(&mut self, signer: &ChannelSignerType, secp_ctx: &Secp256k1, logger: &L) + where SP::Target: SignerProvider, L::Target: Logger + { + if let HolderCommitmentPoint::PendingNext { transaction_number, current } = self { + if let Ok(next) = signer.as_ref().get_per_commitment_point(*transaction_number - 1, secp_ctx) { + log_trace!(logger, "Retrieved next per-commitment point {}", *transaction_number - 1); + *self = HolderCommitmentPoint::Available { transaction_number: *transaction_number, current: *current, next }; + } else { + log_trace!(logger, "Next per-commitment point {} is pending", transaction_number); + } + } + } + + /// If we are not pending the next commitment point, this method advances the commitment number + /// and requests the next commitment point from the signer. Returns `Ok` if we were at + /// `Available` and were able to advance our commitment number (even if we are still pending + /// the next commitment point). + /// + /// If our signer is not ready to provide the next commitment point, we will + /// only advance to `PendingNext`, and should be tried again later in `signer_unblocked` + /// via `try_resolve_pending`. + /// + /// If our signer is ready to provide the next commitment point, we will advance all the + /// way to `Available`. + /// + /// This method is used for the following transitions: + /// - `Available` -> `PendingNext` + /// - `Available` -> `PendingNext` -> `Available` (in one fell swoop) + pub fn advance( + &mut self, signer: &ChannelSignerType, secp_ctx: &Secp256k1, logger: &L + ) -> Result<(), ()> + where SP::Target: SignerProvider, L::Target: Logger + { + if let HolderCommitmentPoint::Available { transaction_number, next, .. } = self { + *self = HolderCommitmentPoint::PendingNext { + transaction_number: *transaction_number - 1, + current: *next, + }; + self.try_resolve_pending(signer, secp_ctx, logger); + return Ok(()); + } + Err(()) + } } /// If the majority of the channels funds are to the fundee and the initiator holds only just @@ -642,18 +1121,26 @@ impl_writeable_tlv_based!(PendingChannelMonitorUpdate, { pub(super) enum ChannelPhase where SP::Target: SignerProvider { UnfundedOutboundV1(OutboundV1Channel), UnfundedInboundV1(InboundV1Channel), + #[cfg(any(dual_funding, splicing))] + UnfundedOutboundV2(OutboundV2Channel), + #[cfg(any(dual_funding, splicing))] + UnfundedInboundV2(InboundV2Channel), Funded(Channel), } impl<'a, SP: Deref> ChannelPhase where SP::Target: SignerProvider, - ::Signer: ChannelSigner, + ::EcdsaSigner: ChannelSigner, { pub fn context(&'a self) -> &'a ChannelContext { match self { ChannelPhase::Funded(chan) => &chan.context, ChannelPhase::UnfundedOutboundV1(chan) => &chan.context, ChannelPhase::UnfundedInboundV1(chan) => &chan.context, + #[cfg(any(dual_funding, splicing))] + ChannelPhase::UnfundedOutboundV2(chan) => &chan.context, + #[cfg(any(dual_funding, splicing))] + ChannelPhase::UnfundedInboundV2(chan) => &chan.context, } } @@ -662,6 +1149,10 @@ impl<'a, SP: Deref> ChannelPhase where ChannelPhase::Funded(ref mut chan) => &mut chan.context, ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context, ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context, + #[cfg(any(dual_funding, splicing))] + ChannelPhase::UnfundedOutboundV2(ref mut chan) => &mut chan.context, + #[cfg(any(dual_funding, splicing))] + ChannelPhase::UnfundedInboundV2(ref mut chan) => &mut chan.context, } } } @@ -706,7 +1197,7 @@ pub(super) struct ChannelContext where SP::Target: SignerProvider { /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID. /// Will be `None` for channels created prior to 0.0.115. temporary_channel_id: Option, - channel_state: u32, + channel_state: ChannelState, // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to // our peer. However, we want to make sure they received it, or else rebroadcast it when we @@ -725,7 +1216,7 @@ pub(super) struct ChannelContext where SP::Target: SignerProvider { latest_monitor_update_id: u64, - holder_signer: ChannelSignerType<::Signer>, + holder_signer: ChannelSignerType, shutdown_scriptpubkey: Option, destination_script: ScriptBuf, @@ -733,7 +1224,7 @@ pub(super) struct ChannelContext where SP::Target: SignerProvider { // generation start at 0 and count up...this simplifies some parts of implementation at the // cost of others, but should really just be changed. - cur_holder_commitment_transaction_number: u64, + holder_commitment_point: HolderCommitmentPoint, cur_counterparty_commitment_transaction_number: u64, value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs pending_inbound_htlcs: Vec, @@ -760,7 +1251,16 @@ pub(super) struct ChannelContext where SP::Target: SignerProvider { monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>, monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, monitor_pending_finalized_fulfills: Vec, + monitor_pending_update_adds: Vec, + /// If we went to send a revoke_and_ack but our signer was unable to give us a signature, + /// we should retry at some point in the future when the signer indicates it may have a + /// signature for us. + /// + /// This may also be used to make sure we send a `revoke_and_ack` after a `commitment_signed` + /// if we need to maintain ordering of messages, but are pending the signer on a previous + /// message. + signer_pending_revoke_and_ack: bool, /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`]) /// but our signer (initially) refused to give us a signature, we should retry at some point in /// the future when the signer indicates it may have a signature for us. @@ -838,7 +1338,7 @@ pub(super) struct ChannelContext where SP::Target: SignerProvider { /// Either the height at which this channel was created or the height at which it was last /// serialized if it was serialized by versions prior to 0.0.103. /// We use this to close if funding is never broadcasted. - channel_creation_height: u32, + pub(super) channel_creation_height: u32, counterparty_dust_limit_satoshis: u64, @@ -968,9 +1468,15 @@ pub(super) struct ChannelContext where SP::Target: SignerProvider { // We track whether we already emitted a `ChannelReady` event. channel_ready_event_emitted: bool, + /// Some if we initiated to shut down the channel. + local_initiated_shutdown: Option<()>, + /// The unique identifier used to re-derive the private key material for the channel through /// [`SignerProvider::derive_channel_signer`]. + #[cfg(not(test))] channel_keys_id: [u8; 32], + #[cfg(test)] + pub channel_keys_id: [u8; 32], /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we /// store it here and only release it to the `ChannelManager` once it asks for it. @@ -978,100 +1484,667 @@ pub(super) struct ChannelContext where SP::Target: SignerProvider { } impl ChannelContext where SP::Target: SignerProvider { - /// Allowed in any state (including after shutdown) - pub fn get_update_time_counter(&self) -> u32 { - self.update_time_counter - } - - pub fn get_latest_monitor_update_id(&self) -> u64 { - self.latest_monitor_update_id - } - - pub fn should_announce(&self) -> bool { - self.config.announced_channel - } - - pub fn is_outbound(&self) -> bool { - self.channel_transaction_parameters.is_outbound_from_holder - } + fn new_for_inbound_channel<'a, ES: Deref, F: Deref, L: Deref>( + fee_estimator: &'a LowerBoundedFeeEstimator, + entropy_source: &'a ES, + signer_provider: &'a SP, + counterparty_node_id: PublicKey, + their_features: &'a InitFeatures, + user_id: u128, + config: &'a UserConfig, + current_chain_height: u32, + logger: &'a L, + is_0conf: bool, + our_funding_satoshis: u64, + counterparty_pubkeys: ChannelPublicKeys, + channel_type: ChannelTypeFeatures, + holder_selected_channel_reserve_satoshis: u64, + msg_channel_reserve_satoshis: u64, + msg_push_msat: u64, + open_channel_fields: msgs::CommonOpenChannelFields, + ) -> Result, ChannelError> + where + ES::Target: EntropySource, + F::Target: FeeEstimator, + L::Target: Logger, + SP::Target: SignerProvider, + { + let logger = WithContext::from(logger, Some(counterparty_node_id), Some(open_channel_fields.temporary_channel_id), None); + let announced_channel = if (open_channel_fields.channel_flags & 1) == 1 { true } else { false }; - /// Gets the fee we'd want to charge for adding an HTLC output to this Channel - /// Allowed in any state (including after shutdown) - pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 { - self.config.options.forwarding_fee_base_msat - } + let channel_value_satoshis = our_funding_satoshis.saturating_add(open_channel_fields.funding_satoshis); - /// Returns true if we've ever received a message from the remote end for this Channel - pub fn have_received_message(&self) -> bool { - self.channel_state & !STATE_FLAGS > (ChannelState::OurInitSent as u32) - } + let channel_keys_id = signer_provider.generate_channel_keys_id(true, channel_value_satoshis, user_id); + let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id); + let pubkeys = holder_signer.pubkeys().clone(); - /// Returns true if this channel is fully established and not known to be closing. - /// Allowed in any state (including after shutdown) - pub fn is_usable(&self) -> bool { - let mask = ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK; - (self.channel_state & mask) == (ChannelState::ChannelReady as u32) && !self.monitor_pending_channel_ready - } + if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT { + return Err(ChannelError::close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT))); + } - /// shutdown state returns the state of the channel in its various stages of shutdown - pub fn shutdown_state(&self) -> ChannelShutdownState { - if self.channel_state & (ChannelState::ShutdownComplete as u32) != 0 { - return ChannelShutdownState::ShutdownComplete; + // Check sanity of message fields: + if channel_value_satoshis > config.channel_handshake_limits.max_funding_satoshis { + return Err(ChannelError::close(format!( + "Per our config, funding must be at most {}. It was {}. Peer contribution: {}. Our contribution: {}", + config.channel_handshake_limits.max_funding_satoshis, channel_value_satoshis, + open_channel_fields.funding_satoshis, our_funding_satoshis))); + } + if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS { + return Err(ChannelError::close(format!("Funding must be smaller than the total bitcoin supply. It was {}", channel_value_satoshis))); } - if self.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 && self.channel_state & (ChannelState::RemoteShutdownSent as u32) == 0 { - return ChannelShutdownState::ShutdownInitiated; + if msg_channel_reserve_satoshis > channel_value_satoshis { + return Err(ChannelError::close(format!("Bogus channel_reserve_satoshis ({}). Must be no greater than channel_value_satoshis: {}", msg_channel_reserve_satoshis, channel_value_satoshis))); } - if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && !self.closing_negotiation_ready() { - return ChannelShutdownState::ResolvingHTLCs; + let full_channel_value_msat = (channel_value_satoshis - msg_channel_reserve_satoshis) * 1000; + if msg_push_msat > full_channel_value_msat { + return Err(ChannelError::close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg_push_msat, full_channel_value_msat))); } - if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && self.closing_negotiation_ready() { - return ChannelShutdownState::NegotiatingClosingFee; + if open_channel_fields.dust_limit_satoshis > channel_value_satoshis { + return Err(ChannelError::close(format!("dust_limit_satoshis {} was larger than channel_value_satoshis {}. Peer never wants payout outputs?", open_channel_fields.dust_limit_satoshis, channel_value_satoshis))); } - return ChannelShutdownState::NotShuttingDown; - } + if open_channel_fields.htlc_minimum_msat >= full_channel_value_msat { + return Err(ChannelError::close(format!("Minimum htlc value ({}) was larger than full channel value ({})", open_channel_fields.htlc_minimum_msat, full_channel_value_msat))); + } + Channel::::check_remote_fee(&channel_type, fee_estimator, open_channel_fields.commitment_feerate_sat_per_1000_weight, None, &&logger)?; - fn closing_negotiation_ready(&self) -> bool { - self.pending_inbound_htlcs.is_empty() && - self.pending_outbound_htlcs.is_empty() && - self.pending_update_fee.is_none() && - self.channel_state & - (BOTH_SIDES_SHUTDOWN_MASK | - ChannelState::AwaitingRemoteRevoke as u32 | - ChannelState::PeerDisconnected as u32 | - ChannelState::MonitorUpdateInProgress as u32) == BOTH_SIDES_SHUTDOWN_MASK - } + let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT); + if open_channel_fields.to_self_delay > max_counterparty_selected_contest_delay { + return Err(ChannelError::close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, open_channel_fields.to_self_delay))); + } + if open_channel_fields.max_accepted_htlcs < 1 { + return Err(ChannelError::close("0 max_accepted_htlcs makes for a useless channel".to_owned())); + } + if open_channel_fields.max_accepted_htlcs > MAX_HTLCS { + return Err(ChannelError::close(format!("max_accepted_htlcs was {}. It must not be larger than {}", open_channel_fields.max_accepted_htlcs, MAX_HTLCS))); + } - /// Returns true if this channel is currently available for use. This is a superset of - /// is_usable() and considers things like the channel being temporarily disabled. - /// Allowed in any state (including after shutdown) - pub fn is_live(&self) -> bool { - self.is_usable() && (self.channel_state & (ChannelState::PeerDisconnected as u32) == 0) - } + // Now check against optional parameters as set by config... + if channel_value_satoshis < config.channel_handshake_limits.min_funding_satoshis { + return Err(ChannelError::close(format!("Funding satoshis ({}) is less than the user specified limit ({})", channel_value_satoshis, config.channel_handshake_limits.min_funding_satoshis))); + } + if open_channel_fields.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat { + return Err(ChannelError::close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", open_channel_fields.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat))); + } + if open_channel_fields.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat { + return Err(ChannelError::close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", open_channel_fields.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat))); + } + if msg_channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis { + return Err(ChannelError::close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg_channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis))); + } + if open_channel_fields.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs { + return Err(ChannelError::close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", open_channel_fields.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs))); + } + if open_channel_fields.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { + return Err(ChannelError::close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", open_channel_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS))); + } + if open_channel_fields.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS { + return Err(ChannelError::close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", open_channel_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS))); + } - // Public utilities: + // Convert things into internal flags and prep our state: - pub fn channel_id(&self) -> ChannelId { - self.channel_id - } + if config.channel_handshake_limits.force_announced_channel_preference { + if config.channel_handshake_config.announced_channel != announced_channel { + return Err(ChannelError::close("Peer tried to open channel but their announcement preference is different from ours".to_owned())); + } + } - // Return the `temporary_channel_id` used during channel establishment. - // - // Will return `None` for channels created prior to LDK version 0.0.115. - pub fn temporary_channel_id(&self) -> Option { - self.temporary_channel_id - } + if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { + // Protocol level safety check in place, although it should never happen because + // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS` + return Err(ChannelError::close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS))); + } + if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat { + return Err(ChannelError::close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg_push_msat))); + } + if msg_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { + log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.", + msg_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS); + } + if holder_selected_channel_reserve_satoshis < open_channel_fields.dust_limit_satoshis { + return Err(ChannelError::close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", open_channel_fields.dust_limit_satoshis, holder_selected_channel_reserve_satoshis))); + } - pub fn minimum_depth(&self) -> Option { - self.minimum_depth - } + // check if the funder's amount for the initial commitment tx is sufficient + // for full fee payment plus a few HTLCs to ensure the channel will be useful. + let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() { + ANCHOR_OUTPUT_VALUE_SATOSHI * 2 + } else { + 0 + }; + let funders_amount_msat = open_channel_fields.funding_satoshis * 1000 - msg_push_msat; + let commitment_tx_fee = commit_tx_fee_msat(open_channel_fields.commitment_feerate_sat_per_1000_weight, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000; + if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee { + return Err(ChannelError::close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee))); + } - /// Gets the "user_id" value passed into the construction of this channel. It has no special - /// meaning and exists only to allow users to have a persistent identifier of a channel. - pub fn get_user_id(&self) -> u128 { - self.user_id - } + let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value; + // While it's reasonable for us to not meet the channel reserve initially (if they don't + // want to push much to us), our counterparty should always have more than our reserve. + if to_remote_satoshis < holder_selected_channel_reserve_satoshis { + return Err(ChannelError::close("Insufficient funding amount for initial reserve".to_owned())); + } - /// Gets the channel's type + let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() { + match &open_channel_fields.shutdown_scriptpubkey { + &Some(ref script) => { + // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything + if script.len() == 0 { + None + } else { + if !script::is_bolt2_compliant(&script, their_features) { + return Err(ChannelError::close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script))) + } + Some(script.clone()) + } + }, + // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel + &None => { + return Err(ChannelError::close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned())); + } + } + } else { None }; + + let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey { + match signer_provider.get_shutdown_scriptpubkey() { + Ok(scriptpubkey) => Some(scriptpubkey), + Err(_) => return Err(ChannelError::close("Failed to get upfront shutdown scriptpubkey".to_owned())), + } + } else { None }; + + if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey { + if !shutdown_scriptpubkey.is_compatible(&their_features) { + return Err(ChannelError::close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey))); + } + } + + let destination_script = match signer_provider.get_destination_script(channel_keys_id) { + Ok(script) => script, + Err(_) => return Err(ChannelError::close("Failed to get destination script".to_owned())), + }; + + let mut secp_ctx = Secp256k1::new(); + secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes()); + + let minimum_depth = if is_0conf { + Some(0) + } else { + Some(cmp::max(config.channel_handshake_config.minimum_depth, 1)) + }; + + let value_to_self_msat = our_funding_satoshis * 1000 + msg_push_msat; + + let holder_signer = ChannelSignerType::Ecdsa(holder_signer); + let holder_commitment_point = HolderCommitmentPoint::new(&holder_signer, &secp_ctx); + + // TODO(dual_funding): Checks for `funding_feerate_sat_per_1000_weight`? + + let channel_context = ChannelContext { + user_id, + + config: LegacyChannelConfig { + options: config.channel_config.clone(), + announced_channel, + commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey, + }, + + prev_config: None, + + inbound_handshake_limits_override: None, + + temporary_channel_id: Some(open_channel_fields.temporary_channel_id), + channel_id: open_channel_fields.temporary_channel_id, + channel_state: ChannelState::NegotiatingFunding( + NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT + ), + announcement_sigs_state: AnnouncementSigsState::NotSent, + secp_ctx, + + latest_monitor_update_id: 0, + + holder_signer, + shutdown_scriptpubkey, + destination_script, + + holder_commitment_point, + cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, + value_to_self_msat, + + pending_inbound_htlcs: Vec::new(), + pending_outbound_htlcs: Vec::new(), + holding_cell_htlc_updates: Vec::new(), + pending_update_fee: None, + holding_cell_update_fee: None, + next_holder_htlc_id: 0, + next_counterparty_htlc_id: 0, + update_time_counter: 1, + + resend_order: RAACommitmentOrder::CommitmentFirst, + + monitor_pending_channel_ready: false, + monitor_pending_revoke_and_ack: false, + monitor_pending_commitment_signed: false, + monitor_pending_forwards: Vec::new(), + monitor_pending_failures: Vec::new(), + monitor_pending_finalized_fulfills: Vec::new(), + monitor_pending_update_adds: Vec::new(), + + signer_pending_revoke_and_ack: false, + signer_pending_commitment_update: false, + signer_pending_funding: false, + + + #[cfg(debug_assertions)] + holder_max_commitment_tx_output: Mutex::new((value_to_self_msat, (channel_value_satoshis * 1000 - msg_push_msat).saturating_sub(value_to_self_msat))), + #[cfg(debug_assertions)] + counterparty_max_commitment_tx_output: Mutex::new((value_to_self_msat, (channel_value_satoshis * 1000 - msg_push_msat).saturating_sub(value_to_self_msat))), + + last_sent_closing_fee: None, + pending_counterparty_closing_signed: None, + expecting_peer_commitment_signed: false, + closing_fee_limits: None, + target_closing_feerate_sats_per_kw: None, + + funding_tx_confirmed_in: None, + funding_tx_confirmation_height: 0, + short_channel_id: None, + channel_creation_height: current_chain_height, + + feerate_per_kw: open_channel_fields.commitment_feerate_sat_per_1000_weight, + channel_value_satoshis, + counterparty_dust_limit_satoshis: open_channel_fields.dust_limit_satoshis, + holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS, + counterparty_max_htlc_value_in_flight_msat: cmp::min(open_channel_fields.max_htlc_value_in_flight_msat, channel_value_satoshis * 1000), + holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config), + counterparty_selected_channel_reserve_satoshis: Some(msg_channel_reserve_satoshis), + holder_selected_channel_reserve_satoshis, + counterparty_htlc_minimum_msat: open_channel_fields.htlc_minimum_msat, + holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat }, + counterparty_max_accepted_htlcs: open_channel_fields.max_accepted_htlcs, + holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS), + minimum_depth, + + counterparty_forwarding_info: None, + + channel_transaction_parameters: ChannelTransactionParameters { + holder_pubkeys: pubkeys, + holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay, + is_outbound_from_holder: false, + counterparty_parameters: Some(CounterpartyChannelTransactionParameters { + selected_contest_delay: open_channel_fields.to_self_delay, + pubkeys: counterparty_pubkeys, + }), + funding_outpoint: None, + channel_type_features: channel_type.clone() + }, + funding_transaction: None, + is_batch_funding: None, + + counterparty_cur_commitment_point: Some(open_channel_fields.first_per_commitment_point), + counterparty_prev_commitment_point: None, + counterparty_node_id, + + counterparty_shutdown_scriptpubkey, + + commitment_secrets: CounterpartyCommitmentSecrets::new(), + + channel_update_status: ChannelUpdateStatus::Enabled, + closing_signed_in_flight: false, + + announcement_sigs: None, + + #[cfg(any(test, fuzzing))] + next_local_commitment_tx_fee_info_cached: Mutex::new(None), + #[cfg(any(test, fuzzing))] + next_remote_commitment_tx_fee_info_cached: Mutex::new(None), + + workaround_lnd_bug_4006: None, + sent_message_awaiting_response: None, + + latest_inbound_scid_alias: None, + outbound_scid_alias: 0, + + channel_pending_event_emitted: false, + channel_ready_event_emitted: false, + + #[cfg(any(test, fuzzing))] + historical_inbound_htlc_fulfills: new_hash_set(), + + channel_type, + channel_keys_id, + + local_initiated_shutdown: None, + + blocked_monitor_updates: Vec::new(), + }; + + Ok(channel_context) + } + + fn new_for_outbound_channel<'a, ES: Deref, F: Deref, L: Deref>( + fee_estimator: &'a LowerBoundedFeeEstimator, + entropy_source: &'a ES, + signer_provider: &'a SP, + counterparty_node_id: PublicKey, + their_features: &'a InitFeatures, + funding_satoshis: u64, + push_msat: u64, + user_id: u128, + config: &'a UserConfig, + current_chain_height: u32, + outbound_scid_alias: u64, + temporary_channel_id: Option, + holder_selected_channel_reserve_satoshis: u64, + channel_keys_id: [u8; 32], + holder_signer: ::EcdsaSigner, + pubkeys: ChannelPublicKeys, + _logger: L, + ) -> Result, APIError> + where + ES::Target: EntropySource, + F::Target: FeeEstimator, + SP::Target: SignerProvider, + L::Target: Logger, + { + // This will be updated with the counterparty contribution if this is a dual-funded channel + let channel_value_satoshis = funding_satoshis; + + let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay; + + if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO { + return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)}); + } + if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS { + return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)}); + } + let channel_value_msat = channel_value_satoshis * 1000; + if push_msat > channel_value_msat { + return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) }); + } + if holder_selected_contest_delay < BREAKDOWN_TIMEOUT { + return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)}); + } + + let channel_type = get_initial_channel_type(&config, their_features); + debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config))); + + let (commitment_conf_target, anchor_outputs_value_msat) = if channel_type.supports_anchors_zero_fee_htlc_tx() { + (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000) + } else { + (ConfirmationTarget::NonAnchorChannelFee, 0) + }; + let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target); + + let value_to_self_msat = channel_value_satoshis * 1000 - push_msat; + let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type); + if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee { + return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) }); + } + + let mut secp_ctx = Secp256k1::new(); + secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes()); + + let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey { + match signer_provider.get_shutdown_scriptpubkey() { + Ok(scriptpubkey) => Some(scriptpubkey), + Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}), + } + } else { None }; + + if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey { + if !shutdown_scriptpubkey.is_compatible(&their_features) { + return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() }); + } + } + + let destination_script = match signer_provider.get_destination_script(channel_keys_id) { + Ok(script) => script, + Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}), + }; + + let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source)); + + let holder_signer = ChannelSignerType::Ecdsa(holder_signer); + let holder_commitment_point = HolderCommitmentPoint::new(&holder_signer, &secp_ctx); + + Ok(Self { + user_id, + + config: LegacyChannelConfig { + options: config.channel_config.clone(), + announced_channel: config.channel_handshake_config.announced_channel, + commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey, + }, + + prev_config: None, + + inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()), + + channel_id: temporary_channel_id, + temporary_channel_id: Some(temporary_channel_id), + channel_state: ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT), + announcement_sigs_state: AnnouncementSigsState::NotSent, + secp_ctx, + // We'll add our counterparty's `funding_satoshis` when we receive `accept_channel2`. + channel_value_satoshis, + + latest_monitor_update_id: 0, + + holder_signer, + shutdown_scriptpubkey, + destination_script, + + holder_commitment_point, + cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, + value_to_self_msat, + + pending_inbound_htlcs: Vec::new(), + pending_outbound_htlcs: Vec::new(), + holding_cell_htlc_updates: Vec::new(), + pending_update_fee: None, + holding_cell_update_fee: None, + next_holder_htlc_id: 0, + next_counterparty_htlc_id: 0, + update_time_counter: 1, + + resend_order: RAACommitmentOrder::CommitmentFirst, + + monitor_pending_channel_ready: false, + monitor_pending_revoke_and_ack: false, + monitor_pending_commitment_signed: false, + monitor_pending_forwards: Vec::new(), + monitor_pending_failures: Vec::new(), + monitor_pending_finalized_fulfills: Vec::new(), + monitor_pending_update_adds: Vec::new(), + + signer_pending_revoke_and_ack: false, + signer_pending_commitment_update: false, + signer_pending_funding: false, + + // We'll add our counterparty's `funding_satoshis` to these max commitment output assertions + // when we receive `accept_channel2`. + #[cfg(debug_assertions)] + holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)), + #[cfg(debug_assertions)] + counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)), + + last_sent_closing_fee: None, + pending_counterparty_closing_signed: None, + expecting_peer_commitment_signed: false, + closing_fee_limits: None, + target_closing_feerate_sats_per_kw: None, + + funding_tx_confirmed_in: None, + funding_tx_confirmation_height: 0, + short_channel_id: None, + channel_creation_height: current_chain_height, + + feerate_per_kw: commitment_feerate, + counterparty_dust_limit_satoshis: 0, + holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS, + counterparty_max_htlc_value_in_flight_msat: 0, + // We'll adjust this to include our counterparty's `funding_satoshis` when we + // receive `accept_channel2`. + holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config), + counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel + holder_selected_channel_reserve_satoshis, + counterparty_htlc_minimum_msat: 0, + holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat }, + counterparty_max_accepted_htlcs: 0, + holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS), + minimum_depth: None, // Filled in in accept_channel + + counterparty_forwarding_info: None, + + channel_transaction_parameters: ChannelTransactionParameters { + holder_pubkeys: pubkeys, + holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay, + is_outbound_from_holder: true, + counterparty_parameters: None, + funding_outpoint: None, + channel_type_features: channel_type.clone() + }, + funding_transaction: None, + is_batch_funding: None, + + counterparty_cur_commitment_point: None, + counterparty_prev_commitment_point: None, + counterparty_node_id, + + counterparty_shutdown_scriptpubkey: None, + + commitment_secrets: CounterpartyCommitmentSecrets::new(), + + channel_update_status: ChannelUpdateStatus::Enabled, + closing_signed_in_flight: false, + + announcement_sigs: None, + + #[cfg(any(test, fuzzing))] + next_local_commitment_tx_fee_info_cached: Mutex::new(None), + #[cfg(any(test, fuzzing))] + next_remote_commitment_tx_fee_info_cached: Mutex::new(None), + + workaround_lnd_bug_4006: None, + sent_message_awaiting_response: None, + + latest_inbound_scid_alias: None, + outbound_scid_alias, + + channel_pending_event_emitted: false, + channel_ready_event_emitted: false, + + #[cfg(any(test, fuzzing))] + historical_inbound_htlc_fulfills: new_hash_set(), + + channel_type, + channel_keys_id, + + blocked_monitor_updates: Vec::new(), + local_initiated_shutdown: None, + }) + } + + /// Allowed in any state (including after shutdown) + pub fn get_update_time_counter(&self) -> u32 { + self.update_time_counter + } + + pub fn get_latest_monitor_update_id(&self) -> u64 { + self.latest_monitor_update_id + } + + pub fn should_announce(&self) -> bool { + self.config.announced_channel + } + + pub fn is_outbound(&self) -> bool { + self.channel_transaction_parameters.is_outbound_from_holder + } + + /// Gets the fee we'd want to charge for adding an HTLC output to this Channel + /// Allowed in any state (including after shutdown) + pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 { + self.config.options.forwarding_fee_base_msat + } + + /// Returns true if we've ever received a message from the remote end for this Channel + pub fn have_received_message(&self) -> bool { + self.channel_state > ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT) + } + + /// Returns true if this channel is fully established and not known to be closing. + /// Allowed in any state (including after shutdown) + pub fn is_usable(&self) -> bool { + matches!(self.channel_state, ChannelState::ChannelReady(_)) && + !self.channel_state.is_local_shutdown_sent() && + !self.channel_state.is_remote_shutdown_sent() && + !self.monitor_pending_channel_ready + } + + /// shutdown state returns the state of the channel in its various stages of shutdown + pub fn shutdown_state(&self) -> ChannelShutdownState { + match self.channel_state { + ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_) => + if self.channel_state.is_local_shutdown_sent() && !self.channel_state.is_remote_shutdown_sent() { + ChannelShutdownState::ShutdownInitiated + } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && !self.closing_negotiation_ready() { + ChannelShutdownState::ResolvingHTLCs + } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && self.closing_negotiation_ready() { + ChannelShutdownState::NegotiatingClosingFee + } else { + ChannelShutdownState::NotShuttingDown + }, + ChannelState::ShutdownComplete => ChannelShutdownState::ShutdownComplete, + _ => ChannelShutdownState::NotShuttingDown, + } + } + + fn closing_negotiation_ready(&self) -> bool { + let is_ready_to_close = match self.channel_state { + ChannelState::AwaitingChannelReady(flags) => + flags & FundedStateFlags::ALL == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT, + ChannelState::ChannelReady(flags) => + flags == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT, + _ => false, + }; + self.pending_inbound_htlcs.is_empty() && + self.pending_outbound_htlcs.is_empty() && + self.pending_update_fee.is_none() && + is_ready_to_close + } + + /// Returns true if this channel is currently available for use. This is a superset of + /// is_usable() and considers things like the channel being temporarily disabled. + /// Allowed in any state (including after shutdown) + pub fn is_live(&self) -> bool { + self.is_usable() && !self.channel_state.is_peer_disconnected() + } + + // Public utilities: + + pub fn channel_id(&self) -> ChannelId { + self.channel_id + } + + // Return the `temporary_channel_id` used during channel establishment. + // + // Will return `None` for channels created prior to LDK version 0.0.115. + pub fn temporary_channel_id(&self) -> Option { + self.temporary_channel_id + } + + pub fn minimum_depth(&self) -> Option { + self.minimum_depth + } + + /// Gets the "user_id" value passed into the construction of this channel. It has no special + /// meaning and exists only to allow users to have a persistent identifier of a channel. + pub fn get_user_id(&self) -> u128 { + self.user_id + } + + /// Gets the channel's type pub fn get_channel_type(&self) -> &ChannelTypeFeatures { &self.channel_type } @@ -1095,8 +2168,8 @@ impl ChannelContext where SP::Target: SignerProvider { /// Returns the holder signer for this channel. #[cfg(test)] - pub fn get_signer(&self) -> &ChannelSignerType<::Signer> { - return &self.holder_signer + pub fn get_mut_signer(&mut self) -> &mut ChannelSignerType { + return &mut self.holder_signer } /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0, @@ -1123,26 +2196,163 @@ impl ChannelContext where SP::Target: SignerProvider { } } - /// Returns the block hash in which our funding transaction was confirmed. - pub fn get_funding_tx_confirmed_in(&self) -> Option { - self.funding_tx_confirmed_in - } + /// Performs checks against necessary constraints after receiving either an `accept_channel` or + /// `accept_channel2` message. + pub fn do_accept_channel_checks( + &mut self, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures, + common_fields: &msgs::CommonAcceptChannelFields, channel_reserve_satoshis: u64, + ) -> Result<(), ChannelError> { + let peer_limits = if let Some(ref limits) = self.inbound_handshake_limits_override { limits } else { default_limits }; - /// Returns the current number of confirmations on the funding transaction. - pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 { - if self.funding_tx_confirmation_height == 0 { - // We either haven't seen any confirmation yet, or observed a reorg. - return 0; + // Check sanity of message fields: + if !self.is_outbound() { + return Err(ChannelError::close("Got an accept_channel message from an inbound peer".to_owned())); + } + if !matches!(self.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) { + return Err(ChannelError::close("Got an accept_channel message at a strange time".to_owned())); + } + if common_fields.dust_limit_satoshis > 21000000 * 100000000 { + return Err(ChannelError::close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", common_fields.dust_limit_satoshis))); + } + if channel_reserve_satoshis > self.channel_value_satoshis { + return Err(ChannelError::close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", channel_reserve_satoshis, self.channel_value_satoshis))); + } + if common_fields.dust_limit_satoshis > self.holder_selected_channel_reserve_satoshis { + return Err(ChannelError::close(format!("Dust limit ({}) is bigger than our channel reserve ({})", common_fields.dust_limit_satoshis, self.holder_selected_channel_reserve_satoshis))); + } + if channel_reserve_satoshis > self.channel_value_satoshis - self.holder_selected_channel_reserve_satoshis { + return Err(ChannelError::close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})", + channel_reserve_satoshis, self.channel_value_satoshis - self.holder_selected_channel_reserve_satoshis))); + } + let full_channel_value_msat = (self.channel_value_satoshis - channel_reserve_satoshis) * 1000; + if common_fields.htlc_minimum_msat >= full_channel_value_msat { + return Err(ChannelError::close(format!("Minimum htlc value ({}) is full channel value ({})", common_fields.htlc_minimum_msat, full_channel_value_msat))); + } + let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT); + if common_fields.to_self_delay > max_delay_acceptable { + return Err(ChannelError::close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, common_fields.to_self_delay))); + } + if common_fields.max_accepted_htlcs < 1 { + return Err(ChannelError::close("0 max_accepted_htlcs makes for a useless channel".to_owned())); + } + if common_fields.max_accepted_htlcs > MAX_HTLCS { + return Err(ChannelError::close(format!("max_accepted_htlcs was {}. It must not be larger than {}", common_fields.max_accepted_htlcs, MAX_HTLCS))); } - height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1) - } - - fn get_holder_selected_contest_delay(&self) -> u16 { - self.channel_transaction_parameters.holder_selected_contest_delay - } + // Now check against optional parameters as set by config... + if common_fields.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat { + return Err(ChannelError::close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", common_fields.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat))); + } + if common_fields.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat { + return Err(ChannelError::close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", common_fields.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat))); + } + if channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis { + return Err(ChannelError::close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis))); + } + if common_fields.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs { + return Err(ChannelError::close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", common_fields.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs))); + } + if common_fields.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { + return Err(ChannelError::close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", common_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS))); + } + if common_fields.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS { + return Err(ChannelError::close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", common_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS))); + } + if common_fields.minimum_depth > peer_limits.max_minimum_depth { + return Err(ChannelError::close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, common_fields.minimum_depth))); + } - fn get_holder_pubkeys(&self) -> &ChannelPublicKeys { + if let Some(ty) = &common_fields.channel_type { + if *ty != self.channel_type { + return Err(ChannelError::close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned())); + } + } else if their_features.supports_channel_type() { + // Assume they've accepted the channel type as they said they understand it. + } else { + let channel_type = ChannelTypeFeatures::from_init(&their_features); + if channel_type != ChannelTypeFeatures::only_static_remote_key() { + return Err(ChannelError::close("Only static_remote_key is supported for non-negotiated channel types".to_owned())); + } + self.channel_type = channel_type.clone(); + self.channel_transaction_parameters.channel_type_features = channel_type; + } + + let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() { + match &common_fields.shutdown_scriptpubkey { + &Some(ref script) => { + // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything + if script.len() == 0 { + None + } else { + if !script::is_bolt2_compliant(&script, their_features) { + return Err(ChannelError::close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script))); + } + Some(script.clone()) + } + }, + // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel + &None => { + return Err(ChannelError::close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned())); + } + } + } else { None }; + + self.counterparty_dust_limit_satoshis = common_fields.dust_limit_satoshis; + self.counterparty_max_htlc_value_in_flight_msat = cmp::min(common_fields.max_htlc_value_in_flight_msat, self.channel_value_satoshis * 1000); + self.counterparty_selected_channel_reserve_satoshis = Some(channel_reserve_satoshis); + self.counterparty_htlc_minimum_msat = common_fields.htlc_minimum_msat; + self.counterparty_max_accepted_htlcs = common_fields.max_accepted_htlcs; + + if peer_limits.trust_own_funding_0conf { + self.minimum_depth = Some(common_fields.minimum_depth); + } else { + self.minimum_depth = Some(cmp::max(1, common_fields.minimum_depth)); + } + + let counterparty_pubkeys = ChannelPublicKeys { + funding_pubkey: common_fields.funding_pubkey, + revocation_basepoint: RevocationBasepoint::from(common_fields.revocation_basepoint), + payment_point: common_fields.payment_basepoint, + delayed_payment_basepoint: DelayedPaymentBasepoint::from(common_fields.delayed_payment_basepoint), + htlc_basepoint: HtlcBasepoint::from(common_fields.htlc_basepoint) + }; + + self.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters { + selected_contest_delay: common_fields.to_self_delay, + pubkeys: counterparty_pubkeys, + }); + + self.counterparty_cur_commitment_point = Some(common_fields.first_per_commitment_point); + self.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey; + + self.channel_state = ChannelState::NegotiatingFunding( + NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT + ); + self.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now. + + Ok(()) + } + + /// Returns the block hash in which our funding transaction was confirmed. + pub fn get_funding_tx_confirmed_in(&self) -> Option { + self.funding_tx_confirmed_in + } + + /// Returns the current number of confirmations on the funding transaction. + pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 { + if self.funding_tx_confirmation_height == 0 { + // We either haven't seen any confirmation yet, or observed a reorg. + return 0; + } + + height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1) + } + + fn get_holder_selected_contest_delay(&self) -> u16 { + self.channel_transaction_parameters.holder_selected_contest_delay + } + + fn get_holder_pubkeys(&self) -> &ChannelPublicKeys { &self.channel_transaction_parameters.holder_pubkeys } @@ -1214,15 +2424,16 @@ impl ChannelContext where SP::Target: SignerProvider { cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA) } - pub fn get_max_dust_htlc_exposure_msat(&self, - fee_estimator: &LowerBoundedFeeEstimator) -> u64 - where F::Target: FeeEstimator - { + fn get_dust_exposure_limiting_feerate(&self, + fee_estimator: &LowerBoundedFeeEstimator, + ) -> u32 where F::Target: FeeEstimator { + fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::OnChainSweep) + } + + pub fn get_max_dust_htlc_exposure_msat(&self, limiting_feerate_sat_per_kw: u32) -> u64 { match self.config.options.max_dust_htlc_exposure { MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => { - let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight( - ConfirmationTarget::OnChainSweep) as u64; - feerate_per_kw.saturating_mul(multiplier) + (limiting_feerate_sat_per_kw as u64).saturating_mul(multiplier) }, MaxDustHTLCExposure::FixedLimitMsat(limit) => limit, } @@ -1297,8 +2508,8 @@ impl ChannelContext where SP::Target: SignerProvider { /// Returns true if funding_signed was sent/received and the /// funding transaction has been broadcast if necessary. pub fn is_funding_broadcast(&self) -> bool { - self.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 && - self.channel_state & ChannelState::WaitingForBatch as u32 == 0 + !self.channel_state.is_pre_funded_state() && + !matches!(self.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH)) } /// Transaction nomenclature is somewhat confusing here as there are many different cases - a @@ -1392,6 +2603,8 @@ impl ChannelContext where SP::Target: SignerProvider { } } + let mut inbound_htlc_preimages: Vec = Vec::new(); + for ref htlc in self.pending_inbound_htlcs.iter() { let (include, state_name) = match htlc.state { InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"), @@ -1409,7 +2622,8 @@ impl ChannelContext where SP::Target: SignerProvider { match &htlc.state { &InboundHTLCState::LocalRemoved(ref reason) => { if generated_by_local { - if let &InboundHTLCRemovalReason::Fulfill(_) = reason { + if let &InboundHTLCRemovalReason::Fulfill(preimage) = reason { + inbound_htlc_preimages.push(preimage); value_to_self_msat_offset += htlc.amount_msat as i64; } } @@ -1419,7 +2633,8 @@ impl ChannelContext where SP::Target: SignerProvider { } } - let mut preimages: Vec = Vec::new(); + + let mut outbound_htlc_preimages: Vec = Vec::new(); for ref htlc in self.pending_outbound_htlcs.iter() { let (include, state_name) = match htlc.state { @@ -1438,7 +2653,7 @@ impl ChannelContext where SP::Target: SignerProvider { }; if let Some(preimage) = preimage_opt { - preimages.push(preimage); + outbound_htlc_preimages.push(preimage); } if include { @@ -1460,13 +2675,13 @@ impl ChannelContext where SP::Target: SignerProvider { } } - let mut value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset; + let value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset; assert!(value_to_self_msat >= 0); // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to // "violate" their reserve value by couting those against it. Thus, we have to convert // everything to i64 before subtracting as otherwise we can overflow. - let mut value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset; + let value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset; assert!(value_to_remote_msat >= 0); #[cfg(debug_assertions)] @@ -1532,10 +2747,6 @@ impl ChannelContext where SP::Target: SignerProvider { htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap()); htlcs_included.append(&mut included_dust_htlcs); - // For the stats, trimmed-to-0 the value in msats accordingly - value_to_self_msat = if (value_to_self_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_self_msat }; - value_to_remote_msat = if (value_to_remote_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_remote_msat }; - CommitmentStats { tx, feerate_per_kw, @@ -1544,7 +2755,8 @@ impl ChannelContext where SP::Target: SignerProvider { htlcs_included, local_balance_msat: value_to_self_msat as u64, remote_balance_msat: value_to_remote_msat as u64, - preimages + inbound_htlc_preimages, + outbound_htlc_preimages, } } @@ -1554,8 +2766,8 @@ impl ChannelContext where SP::Target: SignerProvider { /// our counterparty!) /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction) /// TODO Some magic rust shit to compile-time check this? - fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys { - let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx); + fn build_holder_transaction_keys(&self) -> TxCreationKeys { + let per_commitment_point = self.holder_commitment_point.current_point(); let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint; let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint; let counterparty_pubkeys = self.get_counterparty_pubkeys(); @@ -1605,7 +2817,8 @@ impl ChannelContext where SP::Target: SignerProvider { if let Some(feerate) = outbound_feerate_update { feerate_per_kw = cmp::max(feerate_per_kw, feerate); } - cmp::max(2530, feerate_per_kw * 1250 / 1000) + let feerate_plus_quarter = feerate_per_kw.checked_mul(1250).map(|v| v / 1000); + cmp::max(feerate_per_kw.saturating_add(2530), feerate_plus_quarter.unwrap_or(u32::MAX)) } /// Get forwarding information for the counterparty. @@ -1613,86 +2826,204 @@ impl ChannelContext where SP::Target: SignerProvider { self.counterparty_forwarding_info.clone() } - /// Returns a HTLCStats about inbound pending htlcs - fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option) -> HTLCStats { + /// Returns a HTLCStats about pending htlcs + fn get_pending_htlc_stats(&self, outbound_feerate_update: Option, dust_exposure_limiting_feerate: u32) -> HTLCStats { let context = self; - let mut stats = HTLCStats { - pending_htlcs: context.pending_inbound_htlcs.len() as u32, - pending_htlcs_value_msat: 0, - on_counterparty_tx_dust_exposure_msat: 0, - on_holder_tx_dust_exposure_msat: 0, - holding_cell_msat: 0, - on_holder_tx_holding_cell_htlcs_count: 0, - }; + let uses_0_htlc_fee_anchors = self.get_channel_type().supports_anchors_zero_fee_htlc_tx(); - let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { + let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update); + let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if uses_0_htlc_fee_anchors { (0, 0) } else { - let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64; - (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000, - dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000) + (dust_buffer_feerate as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000, + dust_buffer_feerate as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000) }; - let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis; - let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis; - for ref htlc in context.pending_inbound_htlcs.iter() { - stats.pending_htlcs_value_msat += htlc.amount_msat; - if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat { - stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat; - } - if htlc.amount_msat / 1000 < holder_dust_limit_success_sat { - stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat; + + let mut on_holder_tx_dust_exposure_msat = 0; + let mut on_counterparty_tx_dust_exposure_msat = 0; + + let mut on_counterparty_tx_offered_nondust_htlcs = 0; + let mut on_counterparty_tx_accepted_nondust_htlcs = 0; + + let mut pending_inbound_htlcs_value_msat = 0; + + { + let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis; + let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis; + for ref htlc in context.pending_inbound_htlcs.iter() { + pending_inbound_htlcs_value_msat += htlc.amount_msat; + if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat { + on_counterparty_tx_dust_exposure_msat += htlc.amount_msat; + } else { + on_counterparty_tx_offered_nondust_htlcs += 1; + } + if htlc.amount_msat / 1000 < holder_dust_limit_success_sat { + on_holder_tx_dust_exposure_msat += htlc.amount_msat; + } } } - stats - } - /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell. - fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option) -> HTLCStats { - let context = self; - let mut stats = HTLCStats { - pending_htlcs: context.pending_outbound_htlcs.len() as u32, - pending_htlcs_value_msat: 0, - on_counterparty_tx_dust_exposure_msat: 0, - on_holder_tx_dust_exposure_msat: 0, - holding_cell_msat: 0, - on_holder_tx_holding_cell_htlcs_count: 0, - }; + let mut pending_outbound_htlcs_value_msat = 0; + let mut outbound_holding_cell_msat = 0; + let mut on_holder_tx_outbound_holding_cell_htlcs_count = 0; + let mut pending_outbound_htlcs = self.pending_outbound_htlcs.len(); + { + let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis; + let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis; + for ref htlc in context.pending_outbound_htlcs.iter() { + pending_outbound_htlcs_value_msat += htlc.amount_msat; + if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat { + on_counterparty_tx_dust_exposure_msat += htlc.amount_msat; + } else { + on_counterparty_tx_accepted_nondust_htlcs += 1; + } + if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat { + on_holder_tx_dust_exposure_msat += htlc.amount_msat; + } + } - let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { - (0, 0) + for update in context.holding_cell_htlc_updates.iter() { + if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update { + pending_outbound_htlcs += 1; + pending_outbound_htlcs_value_msat += amount_msat; + outbound_holding_cell_msat += amount_msat; + if *amount_msat / 1000 < counterparty_dust_limit_success_sat { + on_counterparty_tx_dust_exposure_msat += amount_msat; + } else { + on_counterparty_tx_accepted_nondust_htlcs += 1; + } + if *amount_msat / 1000 < holder_dust_limit_timeout_sat { + on_holder_tx_dust_exposure_msat += amount_msat; + } else { + on_holder_tx_outbound_holding_cell_htlcs_count += 1; + } + } + } + } + + // Include any mining "excess" fees in the dust calculation + let excess_feerate_opt = outbound_feerate_update + .or(self.pending_update_fee.map(|(fee, _)| fee)) + .unwrap_or(self.feerate_per_kw) + .checked_sub(dust_exposure_limiting_feerate); + if let Some(excess_feerate) = excess_feerate_opt { + let on_counterparty_tx_nondust_htlcs = + on_counterparty_tx_accepted_nondust_htlcs + on_counterparty_tx_offered_nondust_htlcs; + on_counterparty_tx_dust_exposure_msat += + commit_tx_fee_msat(excess_feerate, on_counterparty_tx_nondust_htlcs, &self.channel_type); + if !self.channel_type.supports_anchors_zero_fee_htlc_tx() { + on_counterparty_tx_dust_exposure_msat += + on_counterparty_tx_accepted_nondust_htlcs as u64 * htlc_success_tx_weight(&self.channel_type) + * excess_feerate as u64 / 1000; + on_counterparty_tx_dust_exposure_msat += + on_counterparty_tx_offered_nondust_htlcs as u64 * htlc_timeout_tx_weight(&self.channel_type) + * excess_feerate as u64 / 1000; + } + } + + HTLCStats { + pending_inbound_htlcs: self.pending_inbound_htlcs.len(), + pending_outbound_htlcs, + pending_inbound_htlcs_value_msat, + pending_outbound_htlcs_value_msat, + on_counterparty_tx_dust_exposure_msat, + on_holder_tx_dust_exposure_msat, + outbound_holding_cell_msat, + on_holder_tx_outbound_holding_cell_htlcs_count, + } + } + + /// Returns information on all pending inbound HTLCs. + pub fn get_pending_inbound_htlc_details(&self) -> Vec { + let mut holding_cell_states = new_hash_map(); + for holding_cell_update in self.holding_cell_htlc_updates.iter() { + match holding_cell_update { + HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => { + holding_cell_states.insert( + htlc_id, + InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFulfill, + ); + }, + HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => { + holding_cell_states.insert( + htlc_id, + InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail, + ); + }, + HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } => { + holding_cell_states.insert( + htlc_id, + InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail, + ); + }, + // Outbound HTLC. + HTLCUpdateAwaitingACK::AddHTLC { .. } => {}, + } + } + let mut inbound_details = Vec::new(); + let htlc_success_dust_limit = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() { + 0 } else { - let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64; - (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000, - dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000) + let dust_buffer_feerate = self.get_dust_buffer_feerate(None) as u64; + dust_buffer_feerate * htlc_success_tx_weight(self.get_channel_type()) / 1000 }; - let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis; - let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis; - for ref htlc in context.pending_outbound_htlcs.iter() { - stats.pending_htlcs_value_msat += htlc.amount_msat; - if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat { - stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat; - } - if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat { - stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat; + let holder_dust_limit_success_sat = htlc_success_dust_limit + self.holder_dust_limit_satoshis; + for htlc in self.pending_inbound_htlcs.iter() { + if let Some(state_details) = (&htlc.state).into() { + inbound_details.push(InboundHTLCDetails{ + htlc_id: htlc.htlc_id, + amount_msat: htlc.amount_msat, + cltv_expiry: htlc.cltv_expiry, + payment_hash: htlc.payment_hash, + state: Some(holding_cell_states.remove(&htlc.htlc_id).unwrap_or(state_details)), + is_dust: htlc.amount_msat / 1000 < holder_dust_limit_success_sat, + }); } } + inbound_details + } - for update in context.holding_cell_htlc_updates.iter() { - if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update { - stats.pending_htlcs += 1; - stats.pending_htlcs_value_msat += amount_msat; - stats.holding_cell_msat += amount_msat; - if *amount_msat / 1000 < counterparty_dust_limit_success_sat { - stats.on_counterparty_tx_dust_exposure_msat += amount_msat; - } - if *amount_msat / 1000 < holder_dust_limit_timeout_sat { - stats.on_holder_tx_dust_exposure_msat += amount_msat; - } else { - stats.on_holder_tx_holding_cell_htlcs_count += 1; - } + /// Returns information on all pending outbound HTLCs. + pub fn get_pending_outbound_htlc_details(&self) -> Vec { + let mut outbound_details = Vec::new(); + let htlc_timeout_dust_limit = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() { + 0 + } else { + let dust_buffer_feerate = self.get_dust_buffer_feerate(None) as u64; + dust_buffer_feerate * htlc_success_tx_weight(self.get_channel_type()) / 1000 + }; + let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + self.holder_dust_limit_satoshis; + for htlc in self.pending_outbound_htlcs.iter() { + outbound_details.push(OutboundHTLCDetails{ + htlc_id: Some(htlc.htlc_id), + amount_msat: htlc.amount_msat, + cltv_expiry: htlc.cltv_expiry, + payment_hash: htlc.payment_hash, + skimmed_fee_msat: htlc.skimmed_fee_msat, + state: Some((&htlc.state).into()), + is_dust: htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat, + }); + } + for holding_cell_update in self.holding_cell_htlc_updates.iter() { + if let HTLCUpdateAwaitingACK::AddHTLC { + amount_msat, + cltv_expiry, + payment_hash, + skimmed_fee_msat, + .. + } = *holding_cell_update { + outbound_details.push(OutboundHTLCDetails{ + htlc_id: None, + amount_msat: amount_msat, + cltv_expiry: cltv_expiry, + payment_hash: payment_hash, + skimmed_fee_msat: skimmed_fee_msat, + state: Some(OutboundHTLCStateDetails::AwaitingRemoteRevokeToAdd), + is_dust: amount_msat / 1000 < holder_dust_limit_timeout_sat, + }); } } - stats + outbound_details } /// Get the available balances, see [`AvailableBalances`]'s fields for more info. @@ -1704,9 +3035,11 @@ impl ChannelContext where SP::Target: SignerProvider { where F::Target: FeeEstimator { let context = &self; - // Note that we have to handle overflow due to the above case. - let inbound_stats = context.get_inbound_pending_htlc_stats(None); - let outbound_stats = context.get_outbound_pending_htlc_stats(None); + // Note that we have to handle overflow due to the case mentioned in the docs in general + // here. + + let dust_exposure_limiting_feerate = self.get_dust_exposure_limiting_feerate(&fee_estimator); + let htlc_stats = context.get_pending_htlc_stats(None, dust_exposure_limiting_feerate); let mut balance_msat = context.value_to_self_msat; for ref htlc in context.pending_inbound_htlcs.iter() { @@ -1714,10 +3047,10 @@ impl ChannelContext where SP::Target: SignerProvider { balance_msat += htlc.amount_msat; } } - balance_msat -= outbound_stats.pending_htlcs_value_msat; + balance_msat -= htlc_stats.pending_outbound_htlcs_value_msat; let outbound_capacity_msat = context.value_to_self_msat - .saturating_sub(outbound_stats.pending_htlcs_value_msat) + .saturating_sub(htlc_stats.pending_outbound_htlcs_value_msat) .saturating_sub( context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000); @@ -1777,7 +3110,7 @@ impl ChannelContext where SP::Target: SignerProvider { let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000; let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat) - .saturating_sub(inbound_stats.pending_htlcs_value_msat); + .saturating_sub(htlc_stats.pending_inbound_htlcs_value_msat); if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat + anchor_outputs_value_msat { // If another HTLC's fee would reduce the remote's balance below the reserve limit @@ -1794,7 +3127,7 @@ impl ChannelContext where SP::Target: SignerProvider { // send above the dust limit (as the router can always overpay to meet the dust limit). let mut remaining_msat_below_dust_exposure_limit = None; let mut dust_exposure_dust_limit_msat = 0; - let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator); + let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(dust_exposure_limiting_feerate); let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis) @@ -1803,18 +3136,32 @@ impl ChannelContext where SP::Target: SignerProvider { (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000, context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000) }; - let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat; - if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) { + + let excess_feerate_opt = self.feerate_per_kw.checked_sub(dust_exposure_limiting_feerate); + if let Some(excess_feerate) = excess_feerate_opt { + let htlc_dust_exposure_msat = + per_outbound_htlc_counterparty_commit_tx_fee_msat(excess_feerate, &context.channel_type); + let nondust_htlc_counterparty_tx_dust_exposure = + htlc_stats.on_counterparty_tx_dust_exposure_msat.saturating_add(htlc_dust_exposure_msat); + if nondust_htlc_counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat { + // If adding an extra HTLC would put us over the dust limit in total fees, we cannot + // send any non-dust HTLCs. + available_capacity_msat = cmp::min(available_capacity_msat, htlc_success_dust_limit * 1000); + } + } + + if htlc_stats.on_counterparty_tx_dust_exposure_msat.saturating_add(htlc_success_dust_limit * 1000) > max_dust_htlc_exposure_msat.saturating_add(1) { + // Note that we don't use the `counterparty_tx_dust_exposure` (with + // `htlc_dust_exposure_msat`) here as it only applies to non-dust HTLCs. remaining_msat_below_dust_exposure_limit = - Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat)); + Some(max_dust_htlc_exposure_msat.saturating_sub(htlc_stats.on_counterparty_tx_dust_exposure_msat)); dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000); } - let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat; - if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) { + if htlc_stats.on_holder_tx_dust_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) { remaining_msat_below_dust_exposure_limit = Some(cmp::min( remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()), - max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat))); + max_dust_htlc_exposure_msat.saturating_sub(htlc_stats.on_holder_tx_dust_exposure_msat))); dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000); } @@ -1827,16 +3174,16 @@ impl ChannelContext where SP::Target: SignerProvider { } available_capacity_msat = cmp::min(available_capacity_msat, - context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat); + context.counterparty_max_htlc_value_in_flight_msat - htlc_stats.pending_outbound_htlcs_value_msat); - if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 { + if htlc_stats.pending_outbound_htlcs + 1 > context.counterparty_max_accepted_htlcs as usize { available_capacity_msat = 0; } AvailableBalances { inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000 - context.value_to_self_msat as i64 - - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64 + - htlc_stats.pending_inbound_htlcs_value_msat as i64 - context.holder_selected_channel_reserve_satoshis as i64 * 1000, 0) as u64, outbound_capacity_msat, @@ -2045,13 +3392,18 @@ impl ChannelContext where SP::Target: SignerProvider { res } - fn if_unbroadcasted_funding(&self, f: F) -> Option - where F: Fn() -> Option { - if self.channel_state & ChannelState::FundingCreated as u32 != 0 || - self.channel_state & ChannelState::WaitingForBatch as u32 != 0 { - f() - } else { - None + fn if_unbroadcasted_funding(&self, f: F) -> Option where F: Fn() -> Option { + match self.channel_state { + ChannelState::FundingNegotiated => f(), + ChannelState::AwaitingChannelReady(flags) => + if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) || + flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into()) + { + f() + } else { + None + }, + _ => None, } } @@ -2085,12 +3437,12 @@ impl ChannelContext where SP::Target: SignerProvider { /// those explicitly stated to be allowed after shutdown completes, eg some simple getters). /// Also returns the list of payment_hashes for channels which we can safely fail backwards /// immediately (others we will have to allow to time out). - pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult { + pub fn force_shutdown(&mut self, should_broadcast: bool, closure_reason: ClosureReason) -> ShutdownResult { // Note that we MUST only generate a monitor update that indicates force-closure - we're // called during initialization prior to the chain_monitor in the encompassing ChannelManager // being fully configured in some cases. Thus, its likely any monitor events we generate will // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more. - assert!(self.channel_state != ChannelState::ShutdownComplete as u32); + assert!(!matches!(self.channel_state, ChannelState::ShutdownComplete)); // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and // return them to fail the payment. @@ -2105,61 +3457,42 @@ impl ChannelContext where SP::Target: SignerProvider { } } let monitor_update = if let Some(funding_txo) = self.get_funding_txo() { - // If we haven't yet exchanged funding signatures (ie channel_state < FundingSent), + // If we haven't yet exchanged funding signatures (ie channel_state < AwaitingChannelReady), // returning a channel monitor update here would imply a channel monitor update before // we even registered the channel monitor to begin with, which is invalid. // Thus, if we aren't actually at a point where we could conceivably broadcast the // funding transaction, don't return a funding txo (which prevents providing the // monitor update to the user, even if we return one). // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more. - if self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 { + if !self.channel_state.is_pre_funded_state() { self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID; - Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate { + Some((self.get_counterparty_node_id(), funding_txo, self.channel_id(), ChannelMonitorUpdate { update_id: self.latest_monitor_update_id, + counterparty_node_id: Some(self.counterparty_node_id), updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }], + channel_id: Some(self.channel_id()), })) } else { None } } else { None }; let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid(); + let unbroadcasted_funding_tx = self.unbroadcasted_funding(); - self.channel_state = ChannelState::ShutdownComplete as u32; + self.channel_state = ChannelState::ShutdownComplete; self.update_time_counter += 1; ShutdownResult { + closure_reason, monitor_update, dropped_outbound_htlcs, unbroadcasted_batch_funding_txid, + channel_id: self.channel_id, + user_channel_id: self.user_id, + channel_capacity_satoshis: self.channel_value_satoshis, + counterparty_node_id: self.counterparty_node_id, + unbroadcasted_funding_tx, + channel_funding_txo: self.get_funding_txo(), } } - /// Only allowed after [`Self::channel_transaction_parameters`] is set. - fn get_funding_created_msg(&mut self, logger: &L) -> Option where L::Target: Logger { - let counterparty_keys = self.build_remote_transaction_keys(); - let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx; - let signature = match &self.holder_signer { - // TODO (taproot|arik): move match into calling method for Taproot - ChannelSignerType::Ecdsa(ecdsa) => { - ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.secp_ctx) - .map(|(sig, _)| sig).ok()? - } - }; - - if self.signer_pending_funding { - log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding"); - self.signer_pending_funding = false; - } - - Some(msgs::FundingCreated { - temporary_channel_id: self.temporary_channel_id.unwrap(), - funding_txid: self.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid, - funding_output_index: self.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index, - signature, - #[cfg(taproot)] - partial_signature_with_nonce: None, - #[cfg(taproot)] - next_local_nonce: None, - }) - } - /// Only allowed after [`Self::channel_transaction_parameters`] is set. fn get_funding_signed_msg(&mut self, logger: &L) -> (CommitmentTransaction, Option) where L::Target: Logger { let counterparty_keys = self.build_remote_transaction_keys(); @@ -2173,7 +3506,7 @@ impl ChannelContext where SP::Target: SignerProvider { match &self.holder_signer { // TODO (arik): move match into calling method for Taproot ChannelSignerType::Ecdsa(ecdsa) => { - let funding_signed = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.secp_ctx) + let funding_signed = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.secp_ctx) .map(|(signature, _)| msgs::FundingSigned { channel_id: self.channel_id(), signature, @@ -2183,8 +3516,13 @@ impl ChannelContext where SP::Target: SignerProvider { .ok(); if funding_signed.is_none() { - log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding"); - self.signer_pending_funding = true; + #[cfg(not(async_signing))] { + panic!("Failed to get signature for funding_signed"); + } + #[cfg(async_signing)] { + log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding"); + self.signer_pending_funding = true; + } } else if self.signer_pending_funding { log_trace!(logger, "Counterparty commitment signature available for funding_signed message; clearing signer_pending_funding"); self.signer_pending_funding = false; @@ -2192,8 +3530,54 @@ impl ChannelContext where SP::Target: SignerProvider { // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish. (counterparty_initial_commitment_tx, funding_signed) - } + }, + // TODO (taproot|arik) + #[cfg(taproot)] + _ => todo!() + } + } + + /// If we receive an error message when attempting to open a channel, it may only be a rejection + /// of the channel type we tried, not of our ability to open any channel at all. We can see if a + /// downgrade of channel features would be possible so that we can still open the channel. + pub(crate) fn maybe_downgrade_channel_features( + &mut self, fee_estimator: &LowerBoundedFeeEstimator + ) -> Result<(), ()> + where + F::Target: FeeEstimator + { + if !self.is_outbound() || + !matches!( + self.channel_state, ChannelState::NegotiatingFunding(flags) + if flags == NegotiatingFundingFlags::OUR_INIT_SENT + ) + { + return Err(()); + } + if self.channel_type == ChannelTypeFeatures::only_static_remote_key() { + // We've exhausted our options + return Err(()); + } + // We support opening a few different types of channels. Try removing our additional + // features one by one until we've either arrived at our default or the counterparty has + // accepted one. + // + // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the + // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type` + // checks whether the counterparty supports every feature, this would only happen if the + // counterparty is advertising the feature, but rejecting channels proposing the feature for + // whatever reason. + if self.channel_type.supports_anchors_zero_fee_htlc_tx() { + self.channel_type.clear_anchors_zero_fee_htlc_tx(); + self.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee); + assert!(!self.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx()); + } else if self.channel_type.supports_scid_privacy() { + self.channel_type.clear_scid_privacy(); + } else { + self.channel_type = ChannelTypeFeatures::only_static_remote_key(); } + self.channel_transaction_parameters.channel_type_features = self.channel_type.clone(); + Ok(()) } } @@ -2239,6 +3623,20 @@ pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channe cmp::min(channel_value_satoshis, cmp::max(q, 1000)) } +/// Returns a minimum channel reserve value each party needs to maintain, fixed in the spec to a +/// default of 1% of the total channel value. +/// +/// Guaranteed to return a value no larger than channel_value_satoshis +/// +/// This is used both for outbound and inbound channels and has lower bound +/// of `dust_limit_satoshis`. +#[cfg(any(dual_funding, splicing))] +fn get_v2_channel_reserve_satoshis(channel_value_satoshis: u64, dust_limit_satoshis: u64) -> u64 { + // Fixed at 1% of channel value by spec. + let (q, _) = channel_value_satoshis.overflowing_div(100); + cmp::min(channel_value_satoshis, cmp::max(q, dust_limit_satoshis)) +} + // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs. // Note that num_htlcs should not include dust HTLCs. #[inline] @@ -2254,10 +3652,37 @@ pub(crate) fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_ (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000 } +pub(crate) fn per_outbound_htlc_counterparty_commit_tx_fee_msat(feerate_per_kw: u32, channel_type_features: &ChannelTypeFeatures) -> u64 { + // Note that we need to divide before multiplying to round properly, + // since the lowest denomination of bitcoin on-chain is the satoshi. + let commitment_tx_fee = COMMITMENT_TX_WEIGHT_PER_HTLC * feerate_per_kw as u64 / 1000 * 1000; + if channel_type_features.supports_anchors_zero_fee_htlc_tx() { + commitment_tx_fee + htlc_success_tx_weight(channel_type_features) * feerate_per_kw as u64 / 1000 + } else { + commitment_tx_fee + } +} + +/// Context for dual-funded channels. +#[cfg(any(dual_funding, splicing))] +pub(super) struct DualFundingChannelContext { + /// The amount in satoshis we will be contributing to the channel. + pub our_funding_satoshis: u64, + /// The amount in satoshis our counterparty will be contributing to the channel. + pub their_funding_satoshis: u64, + /// The funding transaction locktime suggested by the initiator. If set by us, it is always set + /// to the current block height to align incentives against fee-sniping. + pub funding_tx_locktime: u32, + /// The feerate set by the initiator to be used for the funding transaction. + pub funding_feerate_sat_per_1000_weight: u32, +} + // Holder designates channel data owned for the benefit of the user client. // Counterparty designates channel data owned by the another channel participant entity. pub(super) struct Channel where SP::Target: SignerProvider { pub context: ChannelContext, + #[cfg(any(dual_funding, splicing))] + pub dual_funding_channel_context: Option, } #[cfg(any(test, fuzzing))] @@ -2269,9 +3694,65 @@ struct CommitmentTxInfoCached { feerate: u32, } +/// Contents of a wire message that fails an HTLC backwards. Useful for [`Channel::fail_htlc`] to +/// fail with either [`msgs::UpdateFailMalformedHTLC`] or [`msgs::UpdateFailHTLC`] as needed. +trait FailHTLCContents { + type Message: FailHTLCMessageName; + fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message; + fn to_inbound_htlc_state(self) -> InboundHTLCState; + fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK; +} +impl FailHTLCContents for msgs::OnionErrorPacket { + type Message = msgs::UpdateFailHTLC; + fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message { + msgs::UpdateFailHTLC { htlc_id, channel_id, reason: self } + } + fn to_inbound_htlc_state(self) -> InboundHTLCState { + InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(self)) + } + fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK { + HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet: self } + } +} +impl FailHTLCContents for ([u8; 32], u16) { + type Message = msgs::UpdateFailMalformedHTLC; + fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message { + msgs::UpdateFailMalformedHTLC { + htlc_id, + channel_id, + sha256_of_onion: self.0, + failure_code: self.1 + } + } + fn to_inbound_htlc_state(self) -> InboundHTLCState { + InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed(self)) + } + fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK { + HTLCUpdateAwaitingACK::FailMalformedHTLC { + htlc_id, + sha256_of_onion: self.0, + failure_code: self.1 + } + } +} + +trait FailHTLCMessageName { + fn name() -> &'static str; +} +impl FailHTLCMessageName for msgs::UpdateFailHTLC { + fn name() -> &'static str { + "update_fail_htlc" + } +} +impl FailHTLCMessageName for msgs::UpdateFailMalformedHTLC { + fn name() -> &'static str { + "update_fail_malformed_htlc" + } +} + impl Channel where SP::Target: SignerProvider, - ::Signer: WriteableEcdsaChannelSigner + ::EcdsaSigner: EcdsaChannelSigner { fn check_remote_fee( channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator, @@ -2293,7 +3774,12 @@ impl Channel where return Ok(()); } } - return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit))); + return Err(ChannelError::Close((format!( + "Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit + ), ClosureReason::PeerFeerateTooLow { + peer_feerate_sat_per_kw: feerate_per_kw, + required_feerate_sat_per_kw: lower_limit, + }))); } Ok(()) } @@ -2385,7 +3871,7 @@ impl Channel where where L::Target: Logger { // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc` // (see equivalent if condition there). - assert!(self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0); + assert!(!self.context.channel_state.can_generate_new_commitment()); let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger); self.context.latest_monitor_update_id = mon_update_id; @@ -2399,10 +3885,9 @@ impl Channel where // caller thought we could have something claimed (cause we wouldn't have accepted in an // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us, // either. - if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { + if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) { panic!("Was asked to fulfill an HTLC when channel was not in an operational state"); } - assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0); // ChannelManager may generate duplicate claims/fails due to HTLC update events from // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop @@ -2450,12 +3935,14 @@ impl Channel where self.context.latest_monitor_update_id += 1; let monitor_update = ChannelMonitorUpdate { update_id: self.context.latest_monitor_update_id, + counterparty_node_id: Some(self.context.counterparty_node_id), updates: vec![ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage: payment_preimage_arg.clone(), }], + channel_id: Some(self.context.channel_id()), }; - if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 { + if !self.context.channel_state.can_generate_new_commitment() { // Note that this condition is the same as the assertion in // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly - // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we @@ -2471,7 +3958,9 @@ impl Channel where return UpdateFulfillFetch::DuplicateClaim {}; } }, - &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => { + &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } | + &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } => + { if htlc_id_arg == htlc_id { log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id()); // TODO: We may actually be able to switch to a fulfill here, though its @@ -2483,7 +3972,7 @@ impl Channel where _ => {} } } - log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state); + log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state.to_u32()); self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC { payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg, }); @@ -2568,6 +4057,17 @@ impl Channel where .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?")) } + /// Used for failing back with [`msgs::UpdateFailMalformedHTLC`]. For now, this is used when we + /// want to fail blinded HTLCs where we are not the intro node. + /// + /// See [`Self::queue_fail_htlc`] for more info. + pub fn queue_fail_malformed_htlc( + &mut self, htlc_id_arg: u64, failure_code: u16, sha256_of_onion: [u8; 32], logger: &L + ) -> Result<(), ChannelError> where L::Target: Logger { + self.fail_htlc(htlc_id_arg, (sha256_of_onion, failure_code), true, logger) + .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?")) + } + /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot, /// however, fail more than once as we wait for an upstream failure to be irrevocably committed @@ -2576,12 +4076,13 @@ impl Channel where /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be /// [`ChannelError::Ignore`]. - fn fail_htlc(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, mut force_holding_cell: bool, logger: &L) - -> Result, ChannelError> where L::Target: Logger { - if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { + fn fail_htlc( + &mut self, htlc_id_arg: u64, err_contents: E, mut force_holding_cell: bool, + logger: &L + ) -> Result, ChannelError> where L::Target: Logger { + if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) { panic!("Was asked to fail an HTLC when channel was not in an operational state"); } - assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0); // ChannelManager may generate duplicate claims/fails due to HTLC update events from // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop @@ -2615,7 +4116,7 @@ impl Channel where return Ok(None); } - if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 { + if !self.context.channel_state.can_generate_new_commitment() { debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!"); force_holding_cell = true; } @@ -2631,7 +4132,9 @@ impl Channel where return Ok(None); } }, - &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => { + &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } | + &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } => + { if htlc_id_arg == htlc_id { debug_assert!(false, "Tried to fail an HTLC that was already failed"); return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned())); @@ -2641,144 +4144,58 @@ impl Channel where } } log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id()); - self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC { - htlc_id: htlc_id_arg, - err_packet, - }); + self.context.holding_cell_htlc_updates.push(err_contents.to_htlc_update_awaiting_ack(htlc_id_arg)); return Ok(None); } - log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, &self.context.channel_id()); + log_trace!(logger, "Failing HTLC ID {} back with {} message in channel {}.", htlc_id_arg, + E::Message::name(), &self.context.channel_id()); { let htlc = &mut self.context.pending_inbound_htlcs[pending_idx]; - htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone())); + htlc.state = err_contents.clone().to_inbound_htlc_state(); } - Ok(Some(msgs::UpdateFailHTLC { - channel_id: self.context.channel_id(), - htlc_id: htlc_id_arg, - reason: err_packet - })) + Ok(Some(err_contents.to_message(htlc_id_arg, self.context.channel_id()))) } // Message handlers: + /// Updates the state of the channel to indicate that all channels in the batch have received + /// funding_signed and persisted their monitors. + /// The funding transaction is consequently allowed to be broadcast, and the channel can be + /// treated as a non-batch channel going forward. + pub fn set_batch_ready(&mut self) { + self.context.is_batch_funding = None; + self.context.channel_state.clear_waiting_for_batch(); + } - /// Handles a funding_signed message from the remote end. - /// If this call is successful, broadcast the funding transaction (and not before!) - pub fn funding_signed( - &mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L - ) -> Result::Signer>, ChannelError> + /// Unsets the existing funding information. + /// + /// This must only be used if the channel has not yet completed funding and has not been used. + /// + /// Further, the channel must be immediately shut down after this with a call to + /// [`ChannelContext::force_shutdown`]. + pub fn unset_funding_info(&mut self, temporary_channel_id: ChannelId) { + debug_assert!(matches!( + self.context.channel_state, ChannelState::AwaitingChannelReady(_) + )); + self.context.channel_transaction_parameters.funding_outpoint = None; + self.context.channel_id = temporary_channel_id; + } + + /// Handles a channel_ready message from our peer. If we've already sent our channel_ready + /// and the channel is now usable (and public), this may generate an announcement_signatures to + /// reply with. + pub fn channel_ready( + &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash, + user_config: &UserConfig, best_block: &BestBlock, logger: &L + ) -> Result, ChannelError> where + NS::Target: NodeSigner, L::Target: Logger { - if !self.context.is_outbound() { - return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned())); - } - if self.context.channel_state & !(ChannelState::MonitorUpdateInProgress as u32) != ChannelState::FundingCreated as u32 { - return Err(ChannelError::Close("Received funding_signed in strange state!".to_owned())); - } - if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) || - self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER || - self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { - panic!("Should not have advanced channel commitment tx numbers prior to funding_created"); - } - - let funding_script = self.context.get_funding_redeemscript(); - - let counterparty_keys = self.context.build_remote_transaction_keys(); - let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx; - let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust(); - let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction(); - - log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}", - &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction)); - - let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); - let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx; - { - let trusted_tx = initial_commitment_tx.trust(); - let initial_commitment_bitcoin_tx = trusted_tx.built_transaction(); - let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis); - // They sign our commitment transaction, allowing us to broadcast the tx if we wish. - if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) { - return Err(ChannelError::Close("Invalid funding_signed signature from peer".to_owned())); - } - } - - let holder_commitment_tx = HolderCommitmentTransaction::new( - initial_commitment_tx, - msg.signature, - Vec::new(), - &self.context.get_holder_pubkeys().funding_pubkey, - self.context.counterparty_funding_pubkey() - ); - - self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) - .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?; - - - let funding_redeemscript = self.context.get_funding_redeemscript(); - let funding_txo = self.context.get_funding_txo().unwrap(); - let funding_txo_script = funding_redeemscript.to_v0_p2wsh(); - let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound()); - let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner()); - let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id); - monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters); - let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer, - shutdown_script, self.context.get_holder_selected_contest_delay(), - &self.context.destination_script, (funding_txo, funding_txo_script), - &self.context.channel_transaction_parameters, - funding_redeemscript.clone(), self.context.channel_value_satoshis, - obscure_factor, - holder_commitment_tx, best_block, self.context.counterparty_node_id); - - channel_monitor.provide_initial_counterparty_commitment_tx( - counterparty_initial_bitcoin_tx.txid, Vec::new(), - self.context.cur_counterparty_commitment_transaction_number, - self.context.counterparty_cur_commitment_point.unwrap(), - counterparty_initial_commitment_tx.feerate_per_kw(), - counterparty_initial_commitment_tx.to_broadcaster_value_sat(), - counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger); - - assert_eq!(self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32), 0); // We have no had any monitor(s) yet to fail update! - if self.context.is_batch_funding() { - self.context.channel_state = ChannelState::FundingSent as u32 | ChannelState::WaitingForBatch as u32; - } else { - self.context.channel_state = ChannelState::FundingSent as u32; - } - self.context.cur_holder_commitment_transaction_number -= 1; - self.context.cur_counterparty_commitment_transaction_number -= 1; - - log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id()); - - let need_channel_ready = self.check_get_channel_ready(0).is_some(); - self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new()); - Ok(channel_monitor) - } - - /// Updates the state of the channel to indicate that all channels in the batch have received - /// funding_signed and persisted their monitors. - /// The funding transaction is consequently allowed to be broadcast, and the channel can be - /// treated as a non-batch channel going forward. - pub fn set_batch_ready(&mut self) { - self.context.is_batch_funding = None; - self.context.channel_state &= !(ChannelState::WaitingForBatch as u32); - } - - /// Handles a channel_ready message from our peer. If we've already sent our channel_ready - /// and the channel is now usable (and public), this may generate an announcement_signatures to - /// reply with. - pub fn channel_ready( - &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash, - user_config: &UserConfig, best_block: &BestBlock, logger: &L - ) -> Result, ChannelError> - where - NS::Target: NodeSigner, - L::Target: Logger - { - if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { - self.context.workaround_lnd_bug_4006 = Some(msg.clone()); - return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned())); + if self.context.channel_state.is_peer_disconnected() { + self.context.workaround_lnd_bug_4006 = Some(msg.clone()); + return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned())); } if let Some(scid_alias) = msg.short_channel_id_alias { @@ -2790,24 +4207,31 @@ impl Channel where } } - let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS); - // Our channel_ready shouldn't have been sent if we are waiting for other channels in the // batch, but we can receive channel_ready messages. - debug_assert!( - non_shutdown_state & ChannelState::OurChannelReady as u32 == 0 || - non_shutdown_state & ChannelState::WaitingForBatch as u32 == 0 - ); - if non_shutdown_state & !(ChannelState::WaitingForBatch as u32) == ChannelState::FundingSent as u32 { - self.context.channel_state |= ChannelState::TheirChannelReady as u32; - } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) { - self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS); - self.context.update_time_counter += 1; - } else if self.context.channel_state & (ChannelState::ChannelReady as u32) != 0 || - // If we reconnected before sending our `channel_ready` they may still resend theirs: - (self.context.channel_state & (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) == - (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32)) - { + let mut check_reconnection = false; + match &self.context.channel_state { + ChannelState::AwaitingChannelReady(flags) => { + let flags = flags.clone().clear(FundedStateFlags::ALL.into()); + debug_assert!(!flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY) || !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH)); + if flags.clone().clear(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY { + // If we reconnected before sending our `channel_ready` they may still resend theirs. + check_reconnection = true; + } else if flags.clone().clear(AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty() { + self.context.channel_state.set_their_channel_ready(); + } else if flags == AwaitingChannelReadyFlags::OUR_CHANNEL_READY { + self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into()); + self.context.update_time_counter += 1; + } else { + // We're in `WAITING_FOR_BATCH`, so we should wait until we're ready. + debug_assert!(flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH)); + } + } + // If we reconnected before sending our `channel_ready` they may still resend theirs. + ChannelState::ChannelReady(_) => check_reconnection = true, + _ => return Err(ChannelError::close("Peer sent a channel_ready at a strange time".to_owned())), + } + if check_reconnection { // They probably disconnected/reconnected and re-sent the channel_ready, which is // required, or they're sending a fresh SCID alias. let expected_point = @@ -2828,11 +4252,9 @@ impl Channel where ).expect("We already advanced, so previous secret keys should have been validated already"))) }; if expected_point != Some(msg.next_per_commitment_point) { - return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned())); + return Err(ChannelError::close("Peer sent a reconnect channel_ready with a different point".to_owned())); } return Ok(None); - } else { - return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned())); } self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point; @@ -2840,46 +4262,40 @@ impl Channel where log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id()); - Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger)) + Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height, logger)) } - pub fn update_add_htlc( - &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus, - create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator, logger: &L - ) -> Result<(), ChannelError> - where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus, - FE::Target: FeeEstimator, L::Target: Logger, - { - // We can't accept HTLCs sent after we've sent a shutdown. - let local_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::LocalShutdownSent as u32)) != (ChannelState::ChannelReady as u32); - if local_sent_shutdown { - pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8); + pub fn update_add_htlc( + &mut self, msg: &msgs::UpdateAddHTLC, pending_forward_status: PendingHTLCStatus, + fee_estimator: &LowerBoundedFeeEstimator, + ) -> Result<(), ChannelError> where F::Target: FeeEstimator { + if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) { + return Err(ChannelError::close("Got add HTLC message when channel was not in an operational state".to_owned())); } // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec. - let remote_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::RemoteShutdownSent as u32)) != (ChannelState::ChannelReady as u32); - if remote_sent_shutdown { - return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned())); + if self.context.channel_state.is_remote_shutdown_sent() { + return Err(ChannelError::close("Got add HTLC message when channel was not in an operational state".to_owned())); } - if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { - return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned())); + if self.context.channel_state.is_peer_disconnected() { + return Err(ChannelError::close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned())); } if msg.amount_msat > self.context.channel_value_satoshis * 1000 { - return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned())); + return Err(ChannelError::close("Remote side tried to send more than the total value of the channel".to_owned())); } if msg.amount_msat == 0 { - return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned())); + return Err(ChannelError::close("Remote side tried to send a 0-msat HTLC".to_owned())); } if msg.amount_msat < self.context.holder_htlc_minimum_msat { - return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat))); + return Err(ChannelError::close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat))); } - let inbound_stats = self.context.get_inbound_pending_htlc_stats(None); - let outbound_stats = self.context.get_outbound_pending_htlc_stats(None); - if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 { - return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs))); + let dust_exposure_limiting_feerate = self.context.get_dust_exposure_limiting_feerate(&fee_estimator); + let htlc_stats = self.context.get_pending_htlc_stats(None, dust_exposure_limiting_feerate); + if htlc_stats.pending_inbound_htlcs + 1 > self.context.holder_max_accepted_htlcs as usize { + return Err(ChannelError::close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs))); } - if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat { - return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat))); + if htlc_stats.pending_inbound_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat { + return Err(ChannelError::close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat))); } // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet @@ -2903,40 +4319,12 @@ impl Channel where } } - let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator); - let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { - (0, 0) - } else { - let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64; - (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000, - dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000) - }; - let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis; - if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats { - let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat; - if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat { - log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", - on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat); - pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7); - } - } - - let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis; - if msg.amount_msat / 1000 < exposure_dust_limit_success_sats { - let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat; - if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat { - log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", - on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat); - pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7); - } - } - let pending_value_to_self_msat = - self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat; + self.context.value_to_self_msat + htlc_stats.pending_inbound_htlcs_value_msat - removed_outbound_total_msat; let pending_remote_value_msat = self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat; if pending_remote_value_msat < msg.amount_msat { - return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned())); + return Err(ChannelError::close("Remote HTLC add would overdraw remaining funds".to_owned())); } // Check that the remote can afford to pay for this HTLC on-chain at the current @@ -2952,10 +4340,10 @@ impl Channel where 0 }; if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(anchor_outputs_value_msat) < remote_commit_tx_fee_msat { - return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned())); + return Err(ChannelError::close("Remote HTLC add would not leave enough to pay for fees".to_owned())); }; if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(remote_commit_tx_fee_msat).saturating_sub(anchor_outputs_value_msat) < self.context.holder_selected_channel_reserve_satoshis * 1000 { - return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned())); + return Err(ChannelError::close("Remote HTLC add would put them under remote reserve value".to_owned())); } } @@ -2964,38 +4352,22 @@ impl Channel where } else { 0 }; - if !self.context.is_outbound() { - // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from - // the spec because the fee spike buffer requirement doesn't exist on the receiver's - // side, only on the sender's. Note that with anchor outputs we are no longer as - // sensitive to fee spikes, so we need to account for them. - let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered); - let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(())); - if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { - remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE; - } - if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat { - // Note that if the pending_forward_status is not updated here, then it's because we're already failing - // the HTLC, i.e. its status is already set to failing. - log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id()); - pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7); - } - } else { + if self.context.is_outbound() { // Check that they won't violate our local required channel reserve by adding this HTLC. let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered); let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None); if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat + anchor_outputs_value_msat { - return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned())); + return Err(ChannelError::close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned())); } } if self.context.next_counterparty_htlc_id != msg.htlc_id { - return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id))); + return Err(ChannelError::close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id))); } if msg.cltv_expiry >= 500000000 { - return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned())); + return Err(ChannelError::close("Remote provided CLTV expiry in seconds instead of block height".to_owned())); } - if self.context.channel_state & ChannelState::LocalShutdownSent as u32 != 0 { + if self.context.channel_state.is_local_shutdown_sent() { if let PendingHTLCStatus::Forward(_) = pending_forward_status { panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing"); } @@ -3008,7 +4380,9 @@ impl Channel where amount_msat: msg.amount_msat, payment_hash: msg.payment_hash, cltv_expiry: msg.cltv_expiry, - state: InboundHTLCState::RemoteAnnounced(pending_forward_status), + state: InboundHTLCState::RemoteAnnounced(InboundHTLCResolution::Resolved { + pending_htlc_status: pending_forward_status + }), }); Ok(()) } @@ -3024,43 +4398,43 @@ impl Channel where Some(payment_preimage) => { let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array()); if payment_hash != htlc.payment_hash { - return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id))); + return Err(ChannelError::close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id))); } OutboundHTLCOutcome::Success(Some(payment_preimage)) } }; match htlc.state { OutboundHTLCState::LocalAnnounced(_) => - return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))), + return Err(ChannelError::close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))), OutboundHTLCState::Committed => { htlc.state = OutboundHTLCState::RemoteRemoved(outcome); }, OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) => - return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))), + return Err(ChannelError::close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))), } return Ok(htlc); } } - Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned())) + Err(ChannelError::close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned())) } - pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> { - if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { - return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned())); + pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64, Option), ChannelError> { + if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) { + return Err(ChannelError::close("Got fulfill HTLC message when channel was not in an operational state".to_owned())); } - if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { - return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned())); + if self.context.channel_state.is_peer_disconnected() { + return Err(ChannelError::close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned())); } - self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat)) + self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat, htlc.skimmed_fee_msat)) } pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> { - if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { - return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned())); + if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) { + return Err(ChannelError::close("Got fail HTLC message when channel was not in an operational state".to_owned())); } - if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { - return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned())); + if self.context.channel_state.is_peer_disconnected() { + return Err(ChannelError::close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned())); } self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?; @@ -3068,11 +4442,11 @@ impl Channel where } pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> { - if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { - return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned())); + if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) { + return Err(ChannelError::close("Got fail malformed HTLC message when channel was not in an operational state".to_owned())); } - if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { - return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned())); + if self.context.channel_state.is_peer_disconnected() { + return Err(ChannelError::close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned())); } self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?; @@ -3082,21 +4456,21 @@ impl Channel where pub fn commitment_signed(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result, ChannelError> where L::Target: Logger { - if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { - return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned())); + if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) { + return Err(ChannelError::close("Got commitment signed message when channel was not in an operational state".to_owned())); } - if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { - return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned())); + if self.context.channel_state.is_peer_disconnected() { + return Err(ChannelError::close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned())); } - if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() { - return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned())); + if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() { + return Err(ChannelError::close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned())); } let funding_script = self.context.get_funding_redeemscript(); - let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); + let keys = self.context.build_holder_transaction_keys(); - let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger); + let commitment_stats = self.context.build_commitment_transaction(self.context.holder_commitment_point.transaction_number(), &keys, true, false, logger); let commitment_txid = { let trusted_tx = commitment_stats.tx.trust(); let bitcoin_tx = trusted_tx.built_transaction(); @@ -3107,7 +4481,7 @@ impl Channel where log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction), log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id()); if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) { - return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned())); + return Err(ChannelError::close("Invalid commitment tx signature from peer".to_owned())); } bitcoin_tx.txid }; @@ -3122,7 +4496,7 @@ impl Channel where debug_assert!(!self.context.is_outbound()); let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000; if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat { - return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned())); + return Err(ChannelError::close("Funding remote cannot afford proposed new fee".to_owned())); } } #[cfg(any(test, fuzzing))] @@ -3144,7 +4518,7 @@ impl Channel where } if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs { - return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs))); + return Err(ChannelError::close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs))); } // Up to LDK 0.0.115, HTLC information was required to be duplicated in the @@ -3172,12 +4546,12 @@ impl Channel where let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys); let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All }; - let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]); + let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).p2wsh_signature_hash(0, &htlc_redeemscript, htlc.to_bitcoin_amount(), htlc_sighashtype).unwrap()[..]); log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.", log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.to_public_key().serialize()), encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id()); if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key.to_public_key()) { - return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned())); + return Err(ChannelError::close("Invalid HTLC tx signature from peer".to_owned())); } if !separate_nondust_htlc_sources { htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take())); @@ -3201,8 +4575,8 @@ impl Channel where self.context.counterparty_funding_pubkey() ); - self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.preimages) - .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?; + self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.outbound_htlc_preimages) + .map_err(|_| ChannelError::close("Failed to validate our commitment".to_owned()))?; // Update state now that we've passed all the can-fail calls... let mut need_commitment = false; @@ -3214,13 +4588,13 @@ impl Channel where } for htlc in self.context.pending_inbound_htlcs.iter_mut() { - let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state { - Some(forward_info.clone()) + let htlc_resolution = if let &InboundHTLCState::RemoteAnnounced(ref resolution) = &htlc.state { + Some(resolution.clone()) } else { None }; - if let Some(forward_info) = new_forward { + if let Some(htlc_resolution) = htlc_resolution { log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.", &htlc.payment_hash, &self.context.channel_id); - htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info); + htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(htlc_resolution); need_commitment = true; } } @@ -3249,25 +4623,38 @@ impl Channel where self.context.latest_monitor_update_id += 1; let mut monitor_update = ChannelMonitorUpdate { update_id: self.context.latest_monitor_update_id, + counterparty_node_id: Some(self.context.counterparty_node_id), updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo { commitment_tx: holder_commitment_tx, htlc_outputs: htlcs_and_sigs, claimed_htlcs, nondust_htlc_sources, - }] + }], + channel_id: Some(self.context.channel_id()), }; - self.context.cur_holder_commitment_transaction_number -= 1; + if self.context.holder_commitment_point.advance(&self.context.holder_signer, &self.context.secp_ctx, logger).is_err() { + // We only fail to advance our commitment point/number if we're currently + // waiting for our signer to unblock and provide a commitment point. + // During post-funding channel operation, we only advance our point upon + // receiving a commitment_signed, and our counterparty cannot send us + // another commitment signed until we've provided a new commitment point + // in revoke_and_ack, which requires unblocking our signer and completing + // the advance to the next point. This should be unreachable since + // a new commitment_signed should fail at our signature checks above. + debug_assert!(false, "We should be ready to advance our commitment point by the time we receive commitment_signed"); + return Err(ChannelError::close("Failed to advance our commitment point".to_owned())); + } self.context.expecting_peer_commitment_signed = false; // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call // build_commitment_no_status_check() next which will reset this to RAAFirst. self.context.resend_order = RAACommitmentOrder::CommitmentFirst; - if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 { + if self.context.channel_state.is_monitor_update_in_progress() { // In case we initially failed monitor updating without requiring a response, we need // to make sure the RAA gets sent first. self.context.monitor_pending_revoke_and_ack = true; - if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 { + if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() { // If we were going to send a commitment_signed after the RAA, go ahead and do all // the corresponding HTLC status updates so that // get_last_commitment_update_for_send includes the right HTLCs. @@ -3283,7 +4670,7 @@ impl Channel where return Ok(self.push_ret_blockable_mon_update(monitor_update)); } - let need_commitment_signed = if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 { + let need_commitment_signed = if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() { // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok - // we'll send one right away when we get the revoke_and_ack when we // free_holding_cell_htlcs(). @@ -3309,8 +4696,7 @@ impl Channel where ) -> (Option, Vec<(HTLCSource, PaymentHash)>) where F::Target: FeeEstimator, L::Target: Logger { - if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 && - (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 { + if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.channel_state.can_generate_new_commitment() { self.free_holding_cell_htlcs(fee_estimator, logger) } else { (None, Vec::new()) } } @@ -3322,14 +4708,16 @@ impl Channel where ) -> (Option, Vec<(HTLCSource, PaymentHash)>) where F::Target: FeeEstimator, L::Target: Logger { - assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0); + assert!(!self.context.channel_state.is_monitor_update_in_progress()); if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() { log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(), if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id()); let mut monitor_update = ChannelMonitorUpdate { update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet! + counterparty_node_id: Some(self.context.counterparty_node_id), updates: Vec::new(), + channel_id: Some(self.context.channel_id()), }; let mut htlc_updates = Vec::new(); @@ -3344,14 +4732,15 @@ impl Channel where // the limit. In case it's less rare than I anticipate, we may want to revisit // handling this case better and maybe fulfilling some of the HTLCs while attempting // to rebalance channels. - match &htlc_update { + let fail_htlc_res = match &htlc_update { &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet, - skimmed_fee_msat, .. + skimmed_fee_msat, blinding_point, .. } => { - match self.send_htlc(amount_msat, *payment_hash, cltv_expiry, source.clone(), - onion_routing_packet.clone(), false, skimmed_fee_msat, fee_estimator, logger) - { + match self.send_htlc( + amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(), + false, skimmed_fee_msat, blinding_point, fee_estimator, logger + ) { Ok(_) => update_add_count += 1, Err(e) => { match e { @@ -3371,6 +4760,7 @@ impl Channel where } } } + None }, &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => { // If an HTLC claim was previously added to the holding cell (via @@ -3384,26 +4774,33 @@ impl Channel where { monitor_update } else { unreachable!() }; update_fulfill_count += 1; monitor_update.updates.append(&mut additional_monitor_update.updates); + None }, &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => { - match self.fail_htlc(htlc_id, err_packet.clone(), false, logger) { - Ok(update_fail_msg_option) => { - // If an HTLC failure was previously added to the holding cell (via - // `queue_fail_htlc`) then generating the fail message itself must - // not fail - we should never end up in a state where we double-fail - // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait - // for a full revocation before failing. - debug_assert!(update_fail_msg_option.is_some()); - update_fail_count += 1; - }, - Err(e) => { - if let ChannelError::Ignore(_) = e {} - else { - panic!("Got a non-IgnoreError action trying to fail holding cell HTLC"); - } - } - } + Some(self.fail_htlc(htlc_id, err_packet.clone(), false, logger) + .map(|fail_msg_opt| fail_msg_opt.map(|_| ()))) }, + &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => { + Some(self.fail_htlc(htlc_id, (sha256_of_onion, failure_code), false, logger) + .map(|fail_msg_opt| fail_msg_opt.map(|_| ()))) + } + }; + if let Some(res) = fail_htlc_res { + match res { + Ok(fail_msg_opt) => { + // If an HTLC failure was previously added to the holding cell (via + // `queue_fail_{malformed_}htlc`) then generating the fail message itself must + // not fail - we should never end up in a state where we double-fail + // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait + // for a full revocation before failing. + debug_assert!(fail_msg_opt.is_some()); + update_fail_count += 1; + }, + Err(ChannelError::Ignore(_)) => {}, + Err(_) => { + panic!("Got a non-IgnoreError action trying to fail holding cell HTLC"); + }, + } } } if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() { @@ -3442,25 +4839,25 @@ impl Channel where ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option), ChannelError> where F::Target: FeeEstimator, L::Target: Logger, { - if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { - return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned())); + if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) { + return Err(ChannelError::close("Got revoke/ACK message when channel was not in an operational state".to_owned())); } - if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { - return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned())); + if self.context.channel_state.is_peer_disconnected() { + return Err(ChannelError::close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned())); } - if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() { - return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned())); + if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() { + return Err(ChannelError::close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned())); } let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned()); if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point { if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point { - return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned())); + return Err(ChannelError::close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned())); } } - if self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 == 0 { + if !self.context.channel_state.is_awaiting_remote_revoke() { // Our counterparty seems to have burned their coins to us (by revoking a state when we // haven't given them a new commitment transaction to broadcast). We should probably // take advantage of this by updating our channel monitor, sending them an error, and @@ -3468,7 +4865,7 @@ impl Channel where // lot of work, and there's some chance this is all a misunderstanding anyway. // We have to do *something*, though, since our signer may get mad at us for otherwise // jumping a remote commitment number, so best to just force-close and move on. - return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned())); + return Err(ChannelError::close("Received an unexpected revoke_and_ack".to_owned())); } #[cfg(any(test, fuzzing))] @@ -3482,26 +4879,31 @@ impl Channel where ecdsa.validate_counterparty_revocation( self.context.cur_counterparty_commitment_transaction_number + 1, &secret - ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?; - } + ).map_err(|_| ChannelError::close("Failed to validate revocation from peer".to_owned()))?; + }, + // TODO (taproot|arik) + #[cfg(taproot)] + _ => todo!() }; self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret) - .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?; + .map_err(|_| ChannelError::close("Previous secrets did not match new one".to_owned()))?; self.context.latest_monitor_update_id += 1; let mut monitor_update = ChannelMonitorUpdate { update_id: self.context.latest_monitor_update_id, + counterparty_node_id: Some(self.context.counterparty_node_id), updates: vec![ChannelMonitorUpdateStep::CommitmentSecret { idx: self.context.cur_counterparty_commitment_transaction_number + 1, secret: msg.per_commitment_secret, }], + channel_id: Some(self.context.channel_id()), }; // Update state now that we've passed all the can-fail calls... // (note that we may still fail to generate the new commitment_signed message, but that's // OK, we step the channel here and *then* if the new generation fails we can fail the // channel based on that, but stepping stuff here should be safe either way. - self.context.channel_state &= !(ChannelState::AwaitingRemoteRevoke as u32); + self.context.channel_state.clear_awaiting_remote_revoke(); self.context.sent_message_awaiting_response = None; self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point; self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point); @@ -3513,6 +4915,7 @@ impl Channel where log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id()); let mut to_forward_infos = Vec::new(); + let mut pending_update_adds = Vec::new(); let mut revoked_htlcs = Vec::new(); let mut finalized_claimed_htlcs = Vec::new(); let mut update_fail_htlcs = Vec::new(); @@ -3560,29 +4963,37 @@ impl Channel where let mut state = InboundHTLCState::Committed; mem::swap(&mut state, &mut htlc.state); - if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state { + if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(resolution) = state { log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash); - htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info); + htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution); require_commitment = true; - } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state { - match forward_info { - PendingHTLCStatus::Fail(fail_msg) => { - log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash); - require_commitment = true; - match fail_msg { - HTLCFailureMsg::Relay(msg) => { - htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone())); - update_fail_htlcs.push(msg) - }, - HTLCFailureMsg::Malformed(msg) => { - htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code))); - update_fail_malformed_htlcs.push(msg) + } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution) = state { + match resolution { + InboundHTLCResolution::Resolved { pending_htlc_status } => + match pending_htlc_status { + PendingHTLCStatus::Fail(fail_msg) => { + log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash); + require_commitment = true; + match fail_msg { + HTLCFailureMsg::Relay(msg) => { + htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone())); + update_fail_htlcs.push(msg) + }, + HTLCFailureMsg::Malformed(msg) => { + htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code))); + update_fail_malformed_htlcs.push(msg) + }, + } }, + PendingHTLCStatus::Forward(forward_info) => { + log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed, attempting to forward", &htlc.payment_hash); + to_forward_infos.push((forward_info, htlc.htlc_id)); + htlc.state = InboundHTLCState::Committed; + } } - }, - PendingHTLCStatus::Forward(forward_info) => { + InboundHTLCResolution::Pending { update_add_htlc } => { log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash); - to_forward_infos.push((forward_info, htlc.htlc_id)); + pending_update_adds.push(update_add_htlc); htlc.state = InboundHTLCState::Committed; } } @@ -3643,7 +5054,9 @@ impl Channel where } } - if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) == ChannelState::MonitorUpdateInProgress as u32 { + self.context.monitor_pending_update_adds.append(&mut pending_update_adds); + + if self.context.channel_state.is_monitor_update_in_progress() { // We can't actually generate a new commitment transaction (incl by freeing holding // cells) while we can't update the monitor, so we just return what we have. if require_commitment { @@ -3740,12 +5153,12 @@ impl Channel where } // Before proposing a feerate update, check that we can actually afford the new fee. - let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw)); - let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw)); - let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); - let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger); - let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000; - let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat; + let dust_exposure_limiting_feerate = self.context.get_dust_exposure_limiting_feerate(&fee_estimator); + let htlc_stats = self.context.get_pending_htlc_stats(Some(feerate_per_kw), dust_exposure_limiting_feerate); + let keys = self.context.build_holder_transaction_keys(); + let commitment_stats = self.context.build_commitment_transaction(self.context.holder_commitment_point.transaction_number(), &keys, true, true, logger); + let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + htlc_stats.on_holder_tx_outbound_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000; + let holder_balance_msat = commitment_stats.local_balance_msat - htlc_stats.outbound_holding_cell_msat; if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 { //TODO: auto-close after a number of failures? log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw); @@ -3753,19 +5166,17 @@ impl Channel where } // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`. - let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat; - let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat; - let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator); - if holder_tx_dust_exposure > max_dust_htlc_exposure_msat { + let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(dust_exposure_limiting_feerate); + if htlc_stats.on_holder_tx_dust_exposure_msat > max_dust_htlc_exposure_msat { log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw); return None; } - if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat { + if htlc_stats.on_counterparty_tx_dust_exposure_msat > max_dust_htlc_exposure_msat { log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw); return None; } - if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 { + if self.context.channel_state.is_awaiting_remote_revoke() || self.context.channel_state.is_monitor_update_in_progress() { force_holding_cell = true; } @@ -3790,12 +5201,12 @@ impl Channel where /// completed. /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately. pub fn remove_uncommitted_htlcs_and_mark_paused(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger { - assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0); - if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 { - return Err(()); + assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete)); + if self.context.channel_state.is_pre_funded_state() { + return Err(()) } - if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == (ChannelState::PeerDisconnected as u32) { + if self.context.channel_state.is_peer_disconnected() { // While the below code should be idempotent, it's simpler to just return early, as // redundant disconnect events can fire, though they should be rare. return Ok(()); @@ -3857,7 +5268,7 @@ impl Channel where self.context.sent_message_awaiting_response = None; - self.context.channel_state |= ChannelState::PeerDisconnected as u32; + self.context.channel_state.set_peer_disconnected(); log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id()); Ok(()) } @@ -3884,7 +5295,7 @@ impl Channel where self.context.monitor_pending_forwards.append(&mut pending_forwards); self.context.monitor_pending_failures.append(&mut pending_fails); self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs); - self.context.channel_state |= ChannelState::MonitorUpdateInProgress as u32; + self.context.channel_state.set_monitor_update_in_progress(); } /// Indicates that the latest ChannelMonitor update has been committed by the client @@ -3898,19 +5309,22 @@ impl Channel where L::Target: Logger, NS::Target: NodeSigner { - assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, ChannelState::MonitorUpdateInProgress as u32); - self.context.channel_state &= !(ChannelState::MonitorUpdateInProgress as u32); + assert!(self.context.channel_state.is_monitor_update_in_progress()); + self.context.channel_state.clear_monitor_update_in_progress(); - // If we're past (or at) the FundingSent stage on an outbound channel, try to + // If we're past (or at) the AwaitingChannelReady stage on an outbound channel, try to // (re-)broadcast the funding transaction as we may have declined to broadcast it when we // first received the funding_signed. let mut funding_broadcastable = - if self.context.is_outbound() && self.context.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 && self.context.channel_state & ChannelState::WaitingForBatch as u32 == 0 { + if self.context.is_outbound() && + (matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH)) || + matches!(self.context.channel_state, ChannelState::ChannelReady(_))) + { self.context.funding_transaction.take() } else { None }; // That said, if the funding transaction is already confirmed (ie we're active with a // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx. - if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 && self.context.minimum_depth != Some(0) { + if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.minimum_depth != Some(0) { funding_broadcastable = None; } @@ -3924,12 +5338,7 @@ impl Channel where assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0), "Funding transaction broadcast by the local client before it should have - LDK didn't do it!"); self.context.monitor_pending_channel_ready = false; - let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); - Some(msgs::ChannelReady { - channel_id: self.context.channel_id(), - next_per_commitment_point, - short_channel_id_alias: Some(self.context.outbound_scid_alias), - }) + Some(self.get_channel_ready()) } else { None }; let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger); @@ -3940,22 +5349,36 @@ impl Channel where mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures); let mut finalized_claimed_htlcs = Vec::new(); mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills); + let mut pending_update_adds = Vec::new(); + mem::swap(&mut pending_update_adds, &mut self.context.monitor_pending_update_adds); - if self.context.channel_state & (ChannelState::PeerDisconnected as u32) != 0 { + if self.context.channel_state.is_peer_disconnected() { self.context.monitor_pending_revoke_and_ack = false; self.context.monitor_pending_commitment_signed = false; return MonitorRestoreUpdates { raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst, - accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs + accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, pending_update_adds, + funding_broadcastable, channel_ready, announcement_sigs }; } - let raa = if self.context.monitor_pending_revoke_and_ack { - Some(self.get_last_revoke_and_ack()) + let mut raa = if self.context.monitor_pending_revoke_and_ack { + self.get_last_revoke_and_ack(logger) } else { None }; - let commitment_update = if self.context.monitor_pending_commitment_signed { + let mut commitment_update = if self.context.monitor_pending_commitment_signed { self.get_last_commitment_update_for_send(logger).ok() } else { None }; + if self.context.resend_order == RAACommitmentOrder::CommitmentFirst + && self.context.signer_pending_commitment_update && raa.is_some() { + self.context.signer_pending_revoke_and_ack = true; + raa = None; + } + if self.context.resend_order == RAACommitmentOrder::RevokeAndACKFirst + && self.context.signer_pending_revoke_and_ack && commitment_update.is_some() { + self.context.signer_pending_commitment_update = true; + commitment_update = None; + } + if commitment_update.is_some() { self.mark_awaiting_response(); } @@ -3968,7 +5391,28 @@ impl Channel where if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" }, match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"}); MonitorRestoreUpdates { - raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs + raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, + pending_update_adds, funding_broadcastable, channel_ready, announcement_sigs + } + } + + pub fn check_for_stale_feerate(&mut self, logger: &L, min_feerate: u32) -> Result<(), ClosureReason> { + if self.context.is_outbound() { + // While its possible our fee is too low for an outbound channel because we've been + // unable to increase the fee, we don't try to force-close directly here. + return Ok(()); + } + if self.context.feerate_per_kw < min_feerate { + log_info!(logger, + "Closing channel as feerate of {} is below required {} (the minimum required rate over the past day)", + self.context.feerate_per_kw, min_feerate + ); + Err(ClosureReason::PeerFeerateTooLow { + peer_feerate_sat_per_kw: self.context.feerate_per_kw, + required_feerate_sat_per_kw: min_feerate, + }) + } else { + Ok(()) } } @@ -3976,74 +5420,122 @@ impl Channel where where F::Target: FeeEstimator, L::Target: Logger { if self.context.is_outbound() { - return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned())); + return Err(ChannelError::close("Non-funding remote tried to update channel fee".to_owned())); } - if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { - return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned())); + if self.context.channel_state.is_peer_disconnected() { + return Err(ChannelError::close("Peer sent update_fee when we needed a channel_reestablish".to_owned())); } Channel::::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?; self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced)); self.context.update_time_counter += 1; // Check that we won't be pushed over our dust exposure limit by the feerate increase. - if !self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { - let inbound_stats = self.context.get_inbound_pending_htlc_stats(None); - let outbound_stats = self.context.get_outbound_pending_htlc_stats(None); - let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat; - let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat; - let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator); - if holder_tx_dust_exposure > max_dust_htlc_exposure_msat { - return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)", - msg.feerate_per_kw, holder_tx_dust_exposure))); - } - if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat { - return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)", - msg.feerate_per_kw, counterparty_tx_dust_exposure))); - } + let dust_exposure_limiting_feerate = self.context.get_dust_exposure_limiting_feerate(&fee_estimator); + let htlc_stats = self.context.get_pending_htlc_stats(None, dust_exposure_limiting_feerate); + let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(dust_exposure_limiting_feerate); + if htlc_stats.on_holder_tx_dust_exposure_msat > max_dust_htlc_exposure_msat { + return Err(ChannelError::close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)", + msg.feerate_per_kw, htlc_stats.on_holder_tx_dust_exposure_msat))); + } + if htlc_stats.on_counterparty_tx_dust_exposure_msat > max_dust_htlc_exposure_msat { + return Err(ChannelError::close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)", + msg.feerate_per_kw, htlc_stats.on_counterparty_tx_dust_exposure_msat))); } Ok(()) } /// Indicates that the signer may have some signatures for us, so we should retry if we're /// blocked. - #[allow(unused)] + #[cfg(async_signing)] pub fn signer_maybe_unblocked(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger { - let commitment_update = if self.context.signer_pending_commitment_update { - self.get_last_commitment_update_for_send(logger).ok() - } else { None }; + if !self.context.holder_commitment_point.is_available() { + log_trace!(logger, "Attempting to update holder per-commitment point..."); + self.context.holder_commitment_point.try_resolve_pending(&self.context.holder_signer, &self.context.secp_ctx, logger); + } let funding_signed = if self.context.signer_pending_funding && !self.context.is_outbound() { self.context.get_funding_signed_msg(logger).1 } else { None }; let channel_ready = if funding_signed.is_some() { - self.check_get_channel_ready(0) + self.check_get_channel_ready(0, logger) + } else { None }; + + let mut commitment_update = if self.context.signer_pending_commitment_update { + log_trace!(logger, "Attempting to generate pending commitment update..."); + self.get_last_commitment_update_for_send(logger).ok() } else { None }; - let funding_created = if self.context.signer_pending_funding && self.context.is_outbound() { - self.context.get_funding_created_msg(logger) + let mut revoke_and_ack = if self.context.signer_pending_revoke_and_ack { + log_trace!(logger, "Attempting to generate pending revoke and ack..."); + self.get_last_revoke_and_ack(logger) } else { None }; - log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed, {} funding_created, and {} channel_ready", + if self.context.resend_order == RAACommitmentOrder::CommitmentFirst + && self.context.signer_pending_commitment_update && revoke_and_ack.is_some() { + log_trace!(logger, "Signer unblocked for revoke and ack, but unable to send due to resend order, waiting on signer for commitment update"); + self.context.signer_pending_revoke_and_ack = true; + revoke_and_ack = None; + } + if self.context.resend_order == RAACommitmentOrder::RevokeAndACKFirst + && self.context.signer_pending_revoke_and_ack && commitment_update.is_some() { + log_trace!(logger, "Signer unblocked for commitment update, but unable to send due to resend order, waiting on signer for revoke and ack"); + self.context.signer_pending_commitment_update = true; + commitment_update = None; + } + + log_trace!(logger, "Signer unblocked with {} commitment_update, {} revoke_and_ack, {} funding_signed and {} channel_ready, with resend order {:?}", if commitment_update.is_some() { "a" } else { "no" }, + if revoke_and_ack.is_some() { "a" } else { "no" }, if funding_signed.is_some() { "a" } else { "no" }, - if funding_created.is_some() { "a" } else { "no" }, - if channel_ready.is_some() { "a" } else { "no" }); + if channel_ready.is_some() { "a" } else { "no" }, + self.context.resend_order); SignerResumeUpdates { commitment_update, + revoke_and_ack, funding_signed, - funding_created, channel_ready, + order: self.context.resend_order.clone(), } } - fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK { - let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); - let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2); - msgs::RevokeAndACK { - channel_id: self.context.channel_id, - per_commitment_secret, - next_per_commitment_point, - #[cfg(taproot)] - next_local_nonce: None, + fn get_last_revoke_and_ack(&mut self, logger: &L) -> Option where L::Target: Logger { + debug_assert!(self.context.holder_commitment_point.transaction_number() <= INITIAL_COMMITMENT_NUMBER - 2); + self.context.holder_commitment_point.try_resolve_pending(&self.context.holder_signer, &self.context.secp_ctx, logger); + let per_commitment_secret = self.context.holder_signer.as_ref() + .release_commitment_secret(self.context.holder_commitment_point.transaction_number() + 2).ok(); + if let (HolderCommitmentPoint::Available { current, .. }, Some(per_commitment_secret)) = + (self.context.holder_commitment_point, per_commitment_secret) { + self.context.signer_pending_revoke_and_ack = false; + return Some(msgs::RevokeAndACK { + channel_id: self.context.channel_id, + per_commitment_secret, + next_per_commitment_point: current, + #[cfg(taproot)] + next_local_nonce: None, + }) + } + if !self.context.holder_commitment_point.is_available() { + log_trace!(logger, "Last revoke-and-ack pending in channel {} for sequence {} because the next per-commitment point is not available", + &self.context.channel_id(), self.context.holder_commitment_point.transaction_number()); + } + if per_commitment_secret.is_none() { + log_trace!(logger, "Last revoke-and-ack pending in channel {} for sequence {} because the next per-commitment secret for {} is not available", + &self.context.channel_id(), self.context.holder_commitment_point.transaction_number(), + self.context.holder_commitment_point.transaction_number() + 2); + } + #[cfg(not(async_signing))] { + panic!("Holder commitment point and per commitment secret must be available when generating revoke_and_ack"); + } + #[cfg(async_signing)] { + // Technically if we're at HolderCommitmentPoint::PendingNext, + // we have a commitment point ready to send in an RAA, however we + // choose to wait since if we send RAA now, we could get another + // CS before we have any commitment point available. Blocking our + // RAA here is a convenient way to make sure that post-funding + // we're only ever waiting on one commitment point at a time. + log_trace!(logger, "Last revoke-and-ack pending in channel {} for sequence {} because the next per-commitment point is not available", + &self.context.channel_id(), self.context.holder_commitment_point.transaction_number()); + self.context.signer_pending_revoke_and_ack = true; + None } } @@ -4064,6 +5556,7 @@ impl Channel where cltv_expiry: htlc.cltv_expiry, onion_routing_packet: (**onion_packet).clone(), skimmed_fee_msat: htlc.skimmed_fee_msat, + blinding_point: htlc.blinding_point, }); } } @@ -4114,11 +5607,16 @@ impl Channel where } update } else { - if !self.context.signer_pending_commitment_update { - log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update"); - self.context.signer_pending_commitment_update = true; + #[cfg(not(async_signing))] { + panic!("Failed to get signature for new commitment state"); + } + #[cfg(async_signing)] { + if !self.context.signer_pending_commitment_update { + log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update"); + self.context.signer_pending_commitment_update = true; + } + return Err(()); } - return Err(()); }; Ok(msgs::CommitmentUpdate { update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee, @@ -4128,7 +5626,7 @@ impl Channel where /// Gets the `Shutdown` message we should send our peer on reconnect, if any. pub fn get_outbound_shutdown(&self) -> Option { - if self.context.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 { + if self.context.channel_state.is_local_shutdown_sent() { assert!(self.context.shutdown_scriptpubkey.is_some()); Some(msgs::Shutdown { channel_id: self.context.channel_id, @@ -4152,26 +5650,29 @@ impl Channel where L::Target: Logger, NS::Target: NodeSigner { - if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 { + if !self.context.channel_state.is_peer_disconnected() { // While BOLT 2 doesn't indicate explicitly we should error this channel here, it // almost certainly indicates we are going to end up out-of-sync in some way, so we // just close here instead of trying to recover. - return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned())); + return Err(ChannelError::close("Peer sent a loose channel_reestablish not after reconnect".to_owned())); } if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_local_commitment_number == 0 { - return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned())); + return Err(ChannelError::close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned())); } + let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.holder_commitment_point.transaction_number() - 1; if msg.next_remote_commitment_number > 0 { - let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx); + let expected_point = self.context.holder_signer.as_ref() + .get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx) + .expect("TODO: async signing is not yet supported for per commitment points upon channel reestablishment"); let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret) - .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?; + .map_err(|_| ChannelError::close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?; if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) { - return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned())); + return Err(ChannelError::close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned())); } - if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number { + if msg.next_remote_commitment_number > our_commitment_transaction { macro_rules! log_and_panic { ($err_msg: expr) => { log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id)); @@ -4191,28 +5692,29 @@ impl Channel where // Before we change the state of the channel, we check if the peer is sending a very old // commitment transaction number, if yes we send a warning message. - let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1; - if msg.next_remote_commitment_number + 1 < our_commitment_transaction { - return Err( - ChannelError::Warn(format!("Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)", msg.next_remote_commitment_number, our_commitment_transaction)) - ); + if msg.next_remote_commitment_number + 1 < our_commitment_transaction { + return Err(ChannelError::Warn(format!( + "Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)", + msg.next_remote_commitment_number, + our_commitment_transaction + ))); } // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all // remaining cases either succeed or ErrorMessage-fail). - self.context.channel_state &= !(ChannelState::PeerDisconnected as u32); + self.context.channel_state.clear_peer_disconnected(); self.context.sent_message_awaiting_response = None; let shutdown_msg = self.get_outbound_shutdown(); - let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger); + let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height, logger); - if self.context.channel_state & (ChannelState::FundingSent as u32) == ChannelState::FundingSent as u32 { + if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(_)) { // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's. - if self.context.channel_state & (ChannelState::OurChannelReady as u32) == 0 || - self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 { + if !self.context.channel_state.is_our_channel_ready() || + self.context.channel_state.is_monitor_update_in_progress() { if msg.next_remote_commitment_number != 0 { - return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned())); + return Err(ChannelError::close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned())); } // Short circuit the whole handler as there is nothing we can resend them return Ok(ReestablishResponses { @@ -4224,56 +5726,51 @@ impl Channel where } // We have OurChannelReady set! - let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); return Ok(ReestablishResponses { - channel_ready: Some(msgs::ChannelReady { - channel_id: self.context.channel_id(), - next_per_commitment_point, - short_channel_id_alias: Some(self.context.outbound_scid_alias), - }), + channel_ready: Some(self.get_channel_ready()), raa: None, commitment_update: None, order: RAACommitmentOrder::CommitmentFirst, shutdown_msg, announcement_sigs, }); } - let required_revoke = if msg.next_remote_commitment_number + 1 == INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number { + let required_revoke = if msg.next_remote_commitment_number == our_commitment_transaction { // Remote isn't waiting on any RevokeAndACK from us! // Note that if we need to repeat our ChannelReady we'll do that in the next if block. None - } else if msg.next_remote_commitment_number + 1 == (INITIAL_COMMITMENT_NUMBER - 1) - self.context.cur_holder_commitment_transaction_number { - if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 { + } else if msg.next_remote_commitment_number + 1 == our_commitment_transaction { + if self.context.channel_state.is_monitor_update_in_progress() { self.context.monitor_pending_revoke_and_ack = true; None } else { - Some(self.get_last_revoke_and_ack()) + self.get_last_revoke_and_ack(logger) } } else { - return Err(ChannelError::Close("Peer attempted to reestablish channel with a very old local commitment transaction".to_owned())); + debug_assert!(false, "All values should have been handled in the four cases above"); + return Err(ChannelError::close(format!( + "Peer attempted to reestablish channel expecting a future local commitment transaction: {} (received) vs {} (expected)", + msg.next_remote_commitment_number, + our_commitment_transaction + ))); }; // We increment cur_counterparty_commitment_transaction_number only upon receipt of // revoke_and_ack, not on sending commitment_signed, so we add one if have // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten // the corresponding revoke_and_ack back yet. - let is_awaiting_remote_revoke = self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 != 0; + let is_awaiting_remote_revoke = self.context.channel_state.is_awaiting_remote_revoke(); if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() { self.mark_awaiting_response(); } let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 }; - let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 { + let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.holder_commitment_point.transaction_number() == 1 { // We should never have to worry about MonitorUpdateInProgress resending ChannelReady - let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); - Some(msgs::ChannelReady { - channel_id: self.context.channel_id(), - next_per_commitment_point, - short_channel_id_alias: Some(self.context.outbound_scid_alias), - }) + Some(self.get_channel_ready()) } else { None }; if msg.next_local_commitment_number == next_counterparty_commitment_number { - if required_revoke.is_some() { + if required_revoke.is_some() || self.context.signer_pending_revoke_and_ack { log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id()); } else { log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id()); @@ -4286,13 +5783,13 @@ impl Channel where order: self.context.resend_order.clone(), }) } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 { - if required_revoke.is_some() { + if required_revoke.is_some() || self.context.signer_pending_revoke_and_ack { log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id()); } else { log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id()); } - if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 { + if self.context.channel_state.is_monitor_update_in_progress() { self.context.monitor_pending_commitment_signed = true; Ok(ReestablishResponses { channel_ready, shutdown_msg, announcement_sigs, @@ -4300,15 +5797,40 @@ impl Channel where order: self.context.resend_order.clone(), }) } else { + let commitment_update = if self.context.resend_order == RAACommitmentOrder::RevokeAndACKFirst + && self.context.signer_pending_revoke_and_ack { + log_trace!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx, but unable to send due to resend order, waiting on signer for revoke and ack", &self.context.channel_id()); + self.context.signer_pending_commitment_update = true; + None + } else { + self.get_last_commitment_update_for_send(logger).ok() + }; + let raa = if self.context.resend_order == RAACommitmentOrder::CommitmentFirst + && self.context.signer_pending_commitment_update && required_revoke.is_some() { + log_trace!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx, but unable to send due to resend order, waiting on signer for commitment update", &self.context.channel_id()); + self.context.signer_pending_revoke_and_ack = true; + None + } else { + required_revoke + }; Ok(ReestablishResponses { channel_ready, shutdown_msg, announcement_sigs, - raa: required_revoke, - commitment_update: self.get_last_commitment_update_for_send(logger).ok(), + raa, commitment_update, order: self.context.resend_order.clone(), }) } + } else if msg.next_local_commitment_number < next_counterparty_commitment_number { + Err(ChannelError::close(format!( + "Peer attempted to reestablish channel with a very old remote commitment transaction: {} (received) vs {} (expected)", + msg.next_local_commitment_number, + next_counterparty_commitment_number, + ))) } else { - Err(ChannelError::Close("Peer attempted to reestablish channel with a very old remote commitment transaction".to_owned())) + Err(ChannelError::close(format!( + "Peer attempted to reestablish channel with a future remote commitment transaction: {} (received) vs {} (expected)", + msg.next_local_commitment_number, + next_counterparty_commitment_number, + ))) } } @@ -4379,7 +5901,7 @@ impl Channel where pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> { if self.closing_negotiation_ready() { if self.context.closing_signed_in_flight { - return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned())); + return Err(ChannelError::close("closing_signed negotiation failed to finish within two timer ticks".to_owned())); } else { self.context.closing_signed_in_flight = true; } @@ -4424,7 +5946,7 @@ impl Channel where ChannelSignerType::Ecdsa(ecdsa) => { let sig = ecdsa .sign_closing_transaction(&closing_tx, &self.context.secp_ctx) - .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?; + .map_err(|()| ChannelError::close("Failed to get signature for closing transaction.".to_owned()))?; self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone())); Ok((Some(msgs::ClosingSigned { @@ -4436,7 +5958,10 @@ impl Channel where max_fee_satoshis: our_max_fee, }), }), None, None)) - } + }, + // TODO (taproot|arik) + #[cfg(taproot)] + _ => todo!() } } @@ -4466,21 +5991,21 @@ impl Channel where &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown ) -> Result<(Option, Option, Vec<(HTLCSource, PaymentHash)>), ChannelError> { - if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { - return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned())); + if self.context.channel_state.is_peer_disconnected() { + return Err(ChannelError::close("Peer sent shutdown when we needed a channel_reestablish".to_owned())); } - if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 { + if self.context.channel_state.is_pre_funded_state() { // Spec says we should fail the connection, not the channel, but that's nonsense, there // are plenty of reasons you may want to fail a channel pre-funding, and spec says you // can do that via error message without getting a connection fail anyway... - return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned())); + return Err(ChannelError::close("Peer sent shutdown pre-funding generation".to_owned())); } for htlc in self.context.pending_inbound_htlcs.iter() { if let InboundHTLCState::RemoteAnnounced(_) = htlc.state { - return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned())); + return Err(ChannelError::close("Got shutdown with remote pending HTLCs".to_owned())); } } - assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0); + assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete)); if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) { return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_hex_string()))); @@ -4497,7 +6022,7 @@ impl Channel where // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc // immediately after the commitment dance, but we can send a Shutdown because we won't send // any further commitment updates after we set LocalShutdownSent. - let send_shutdown = (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != ChannelState::LocalShutdownSent as u32; + let send_shutdown = !self.context.channel_state.is_local_shutdown_sent(); let update_shutdown_script = match self.context.shutdown_scriptpubkey { Some(_) => false, @@ -4505,10 +6030,10 @@ impl Channel where assert!(send_shutdown); let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() { Ok(scriptpubkey) => scriptpubkey, - Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())), + Err(_) => return Err(ChannelError::close("Failed to get shutdown scriptpubkey".to_owned())), }; if !shutdown_scriptpubkey.is_compatible(their_features) { - return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey))); + return Err(ChannelError::close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey))); } self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey); true @@ -4517,16 +6042,18 @@ impl Channel where // From here on out, we may not fail! - self.context.channel_state |= ChannelState::RemoteShutdownSent as u32; + self.context.channel_state.set_remote_shutdown_sent(); self.context.update_time_counter += 1; let monitor_update = if update_shutdown_script { self.context.latest_monitor_update_id += 1; let monitor_update = ChannelMonitorUpdate { update_id: self.context.latest_monitor_update_id, + counterparty_node_id: Some(self.context.counterparty_node_id), updates: vec![ChannelMonitorUpdateStep::ShutdownScript { scriptpubkey: self.get_closing_scriptpubkey(), }], + channel_id: Some(self.context.channel_id()), }; self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); self.push_ret_blockable_mon_update(monitor_update) @@ -4553,7 +6080,7 @@ impl Channel where } }); - self.context.channel_state |= ChannelState::LocalShutdownSent as u32; + self.context.channel_state.set_local_shutdown_sent(); self.context.update_time_counter += 1; Ok((shutdown, monitor_update, dropped_outbound_htlcs)) @@ -4587,24 +6114,24 @@ impl Channel where -> Result<(Option, Option, Option), ChannelError> where F::Target: FeeEstimator { - if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != BOTH_SIDES_SHUTDOWN_MASK { - return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned())); + if !self.context.channel_state.is_both_sides_shutdown() { + return Err(ChannelError::close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned())); } - if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { - return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned())); + if self.context.channel_state.is_peer_disconnected() { + return Err(ChannelError::close("Peer sent closing_signed when we needed a channel_reestablish".to_owned())); } if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() { - return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned())); + return Err(ChannelError::close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned())); } if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction - return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned())); + return Err(ChannelError::close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned())); } if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() { - return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned())); + return Err(ChannelError::close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned())); } - if self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32 != 0 { + if self.context.channel_state.is_monitor_update_in_progress() { self.context.pending_counterparty_closing_signed = Some(msg.clone()); return Ok((None, None, None)); } @@ -4612,7 +6139,7 @@ impl Channel where let funding_redeemscript = self.context.get_funding_redeemscript(); let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false); if used_total_fee != msg.fee_satoshis { - return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee))); + return Err(ChannelError::close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee))); } let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis); @@ -4628,21 +6155,34 @@ impl Channel where }; for outp in closing_tx.trust().built_transaction().output.iter() { - if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS { - return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned())); + if !outp.script_pubkey.is_witness_program() && outp.value < Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS) { + return Err(ChannelError::close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned())); } } + let closure_reason = if self.initiated_shutdown() { + ClosureReason::LocallyInitiatedCooperativeClosure + } else { + ClosureReason::CounterpartyInitiatedCooperativeClosure + }; + assert!(self.context.shutdown_scriptpubkey.is_some()); if let Some((last_fee, sig)) = self.context.last_sent_closing_fee { if last_fee == msg.fee_satoshis { let shutdown_result = ShutdownResult { + closure_reason, monitor_update: None, dropped_outbound_htlcs: Vec::new(), unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(), + channel_id: self.context.channel_id, + user_channel_id: self.context.user_id, + channel_capacity_satoshis: self.context.channel_value_satoshis, + counterparty_node_id: self.context.counterparty_node_id, + unbroadcasted_funding_tx: self.context.unbroadcasted_funding(), + channel_funding_txo: self.context.get_funding_txo(), }; let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig); - self.context.channel_state = ChannelState::ShutdownComplete as u32; + self.context.channel_state = ChannelState::ShutdownComplete; self.context.update_time_counter += 1; return Ok((None, Some(tx), Some(shutdown_result))); } @@ -4662,14 +6202,21 @@ impl Channel where ChannelSignerType::Ecdsa(ecdsa) => { let sig = ecdsa .sign_closing_transaction(&closing_tx, &self.context.secp_ctx) - .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?; + .map_err(|_| ChannelError::close("External signer refused to sign closing transaction".to_owned()))?; let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis { let shutdown_result = ShutdownResult { + closure_reason, monitor_update: None, dropped_outbound_htlcs: Vec::new(), unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(), + channel_id: self.context.channel_id, + user_channel_id: self.context.user_id, + channel_capacity_satoshis: self.context.channel_value_satoshis, + counterparty_node_id: self.context.counterparty_node_id, + unbroadcasted_funding_tx: self.context.unbroadcasted_funding(), + channel_funding_txo: self.context.get_funding_txo(), }; - self.context.channel_state = ChannelState::ShutdownComplete as u32; + self.context.channel_state = ChannelState::ShutdownComplete; self.context.update_time_counter += 1; let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig); (Some(tx), Some(shutdown_result)) @@ -4687,14 +6234,17 @@ impl Channel where max_fee_satoshis: our_max_fee, }), }), signed_tx, shutdown_result)) - } + }, + // TODO (taproot|arik) + #[cfg(taproot)] + _ => todo!() } } } if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range { if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis { - return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis))); + return Err(ChannelError::close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis))); } if max_fee_satoshis < our_min_fee { return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee))); @@ -4710,7 +6260,7 @@ impl Channel where propose_fee!(cmp::min(max_fee_satoshis, our_max_fee)); } else { if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee { - return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.", + return Err(ChannelError::close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.", msg.fee_satoshis, our_min_fee, our_max_fee))); } // The proposed fee is in our acceptable range, accept it and broadcast! @@ -4726,7 +6276,7 @@ impl Channel where } else if last_fee < our_max_fee { propose_fee!(our_max_fee); } else { - return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee))); + return Err(ChannelError::close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee))); } } else { if msg.fee_satoshis > our_min_fee { @@ -4734,7 +6284,7 @@ impl Channel where } else if last_fee > our_min_fee { propose_fee!(our_min_fee); } else { - return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee))); + return Err(ChannelError::close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee))); } } } else { @@ -4786,27 +6336,117 @@ impl Channel where }) } - pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 { - self.context.cur_holder_commitment_transaction_number + 1 - } + pub fn can_accept_incoming_htlc( + &self, msg: &msgs::UpdateAddHTLC, fee_estimator: &LowerBoundedFeeEstimator, logger: L + ) -> Result<(), (&'static str, u16)> + where + F::Target: FeeEstimator, + L::Target: Logger + { + if self.context.channel_state.is_local_shutdown_sent() { + return Err(("Shutdown was already sent", 0x4000|8)) + } - pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 { - self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32) != 0 { 1 } else { 0 } - } + let dust_exposure_limiting_feerate = self.context.get_dust_exposure_limiting_feerate(&fee_estimator); + let htlc_stats = self.context.get_pending_htlc_stats(None, dust_exposure_limiting_feerate); + let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(dust_exposure_limiting_feerate); + let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { + (0, 0) + } else { + let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64; + (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000, + dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000) + }; + let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis; + if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats { + let on_counterparty_tx_dust_htlc_exposure_msat = htlc_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat; + if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat { + log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", + on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat); + return Err(("Exceeded our dust exposure limit on counterparty commitment tx", 0x1000|7)) + } + } else { + let htlc_dust_exposure_msat = + per_outbound_htlc_counterparty_commit_tx_fee_msat(self.context.feerate_per_kw, &self.context.channel_type); + let counterparty_tx_dust_exposure = + htlc_stats.on_counterparty_tx_dust_exposure_msat.saturating_add(htlc_dust_exposure_msat); + if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat { + log_info!(logger, "Cannot accept value that would put our exposure to tx fee dust at {} over the limit {} on counterparty commitment tx", + counterparty_tx_dust_exposure, max_dust_htlc_exposure_msat); + return Err(("Exceeded our tx fee dust exposure limit on counterparty commitment tx", 0x1000|7)) + } + } - pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 { - self.context.cur_counterparty_commitment_transaction_number + 2 - } + let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis; + if msg.amount_msat / 1000 < exposure_dust_limit_success_sats { + let on_holder_tx_dust_htlc_exposure_msat = htlc_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat; + if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat { + log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", + on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat); + return Err(("Exceeded our dust exposure limit on holder commitment tx", 0x1000|7)) + } + } - #[cfg(test)] - pub fn get_signer(&self) -> &ChannelSignerType<::Signer> { - &self.context.holder_signer - } + let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { + ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000 + } else { + 0 + }; - #[cfg(test)] - pub fn get_value_stat(&self) -> ChannelValueStat { - ChannelValueStat { - value_to_self_msat: self.context.value_to_self_msat, + let mut removed_outbound_total_msat = 0; + for ref htlc in self.context.pending_outbound_htlcs.iter() { + if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state { + removed_outbound_total_msat += htlc.amount_msat; + } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state { + removed_outbound_total_msat += htlc.amount_msat; + } + } + + let pending_value_to_self_msat = + self.context.value_to_self_msat + htlc_stats.pending_inbound_htlcs_value_msat - removed_outbound_total_msat; + let pending_remote_value_msat = + self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat; + + if !self.context.is_outbound() { + // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from + // the spec because the fee spike buffer requirement doesn't exist on the receiver's + // side, only on the sender's. Note that with anchor outputs we are no longer as + // sensitive to fee spikes, so we need to account for them. + let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered); + let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(())); + if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { + remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE; + } + if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat { + log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id()); + return Err(("Fee spike buffer violation", 0x1000|7)); + } + } + + Ok(()) + } + + pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 { + self.context.holder_commitment_point.transaction_number() + 1 + } + + pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 { + self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state.is_awaiting_remote_revoke() { 1 } else { 0 } + } + + pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 { + self.context.cur_counterparty_commitment_transaction_number + 2 + } + + #[cfg(test)] + pub fn get_signer(&self) -> &ChannelSignerType { + &self.context.holder_signer + } + + #[cfg(test)] + pub fn get_value_stat(&self) -> ChannelValueStat { + ChannelValueStat { + value_to_self_msat: self.context.value_to_self_msat, channel_value_msat: self.context.channel_value_satoshis * 1000, channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000, pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::(), @@ -4831,7 +6471,7 @@ impl Channel where /// Returns true if this channel has been marked as awaiting a monitor update to move forward. /// Allowed in any state (including after shutdown) pub fn is_awaiting_monitor_update(&self) -> bool { - (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 + self.context.channel_state.is_monitor_update_in_progress() } /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight. @@ -4863,6 +6503,26 @@ impl Channel where } } + /// On startup, its possible we detect some monitor updates have actually completed (and the + /// ChannelManager was simply stale). In that case, we should simply drop them, which we do + /// here after logging them. + pub fn on_startup_drop_completed_blocked_mon_updates_through(&mut self, logger: &L, loaded_mon_update_id: u64) { + let channel_id = self.context.channel_id(); + self.context.blocked_monitor_updates.retain(|update| { + if update.update.update_id <= loaded_mon_update_id { + log_info!( + logger, + "Dropping completed ChannelMonitorUpdate id {} on channel {} due to a stale ChannelManager", + update.update.update_id, + channel_id, + ); + false + } else { + true + } + }); + } + pub fn blocked_monitor_updates_pending(&self) -> usize { self.context.blocked_monitor_updates.len() } @@ -4873,17 +6533,18 @@ impl Channel where /// advanced state. pub fn is_awaiting_initial_mon_persist(&self) -> bool { if !self.is_awaiting_monitor_update() { return false; } - if self.context.channel_state & - !(ChannelState::TheirChannelReady as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32 | ChannelState::WaitingForBatch as u32) - == ChannelState::FundingSent as u32 { + if matches!( + self.context.channel_state, ChannelState::AwaitingChannelReady(flags) + if flags.clone().clear(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY | FundedStateFlags::PEER_DISCONNECTED | FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS | AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty() + ) { // If we're not a 0conf channel, we'll be waiting on a monitor update with only - // FundingSent set, though our peer could have sent their channel_ready. + // AwaitingChannelReady set, though our peer could have sent their channel_ready. debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0); return true; } - if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 && + if self.context.holder_commitment_point.transaction_number() == INITIAL_COMMITMENT_NUMBER - 1 && self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 { - // If we're a 0-conf channel, we'll move beyond FundingSent immediately even while + // If we're a 0-conf channel, we'll move beyond AwaitingChannelReady immediately even while // waiting for the initial monitor persistence. Thus, we check if our commitment // transaction numbers have both been iterated only exactly once (for the // funding_signed), and we're awaiting monitor update. @@ -4905,27 +6566,30 @@ impl Channel where /// Returns true if our channel_ready has been sent pub fn is_our_channel_ready(&self) -> bool { - (self.context.channel_state & ChannelState::OurChannelReady as u32) != 0 || self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 + matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY)) || + matches!(self.context.channel_state, ChannelState::ChannelReady(_)) } /// Returns true if our peer has either initiated or agreed to shut down the channel. pub fn received_shutdown(&self) -> bool { - (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) != 0 + self.context.channel_state.is_remote_shutdown_sent() } /// Returns true if we either initiated or agreed to shut down the channel. pub fn sent_shutdown(&self) -> bool { - (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != 0 + self.context.channel_state.is_local_shutdown_sent() + } + + /// Returns true if we initiated to shut down the channel. + pub fn initiated_shutdown(&self) -> bool { + self.context.local_initiated_shutdown.is_some() } /// Returns true if this channel is fully shut down. True here implies that no further actions /// may/will be taken on this channel, and thus this object should be freed. Any future changes /// will be handled appropriately by the chain monitor. pub fn is_shutdown(&self) -> bool { - if (self.context.channel_state & ChannelState::ShutdownComplete as u32) == ChannelState::ShutdownComplete as u32 { - assert!(self.context.channel_state == ChannelState::ShutdownComplete as u32); - true - } else { false } + matches!(self.context.channel_state, ChannelState::ShutdownComplete) } pub fn channel_update_status(&self) -> ChannelUpdateStatus { @@ -4937,7 +6601,9 @@ impl Channel where self.context.channel_update_status = status; } - fn check_get_channel_ready(&mut self, height: u32) -> Option { + fn check_get_channel_ready(&mut self, height: u32, logger: &L) -> Option + where L::Target: Logger + { // Called: // * always when a new block/transactions are confirmed with the new height // * when funding is signed with a height of 0 @@ -4957,53 +6623,69 @@ impl Channel where // If we're still pending the signature on a funding transaction, then we're not ready to send a // channel_ready yet. if self.context.signer_pending_funding { + // TODO: set signer_pending_channel_ready + log_debug!(logger, "Can't produce channel_ready: the signer is pending funding."); return None; } // Note that we don't include ChannelState::WaitingForBatch as we don't want to send // channel_ready until the entire batch is ready. - let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS); - let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 { - self.context.channel_state |= ChannelState::OurChannelReady as u32; + let need_commitment_update = if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()).is_empty()) { + self.context.channel_state.set_our_channel_ready(); true - } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) { - self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS); + } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()) == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY) { + self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into()); self.context.update_time_counter += 1; true - } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) { + } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()) == AwaitingChannelReadyFlags::OUR_CHANNEL_READY) { // We got a reorg but not enough to trigger a force close, just ignore. false } else { - if self.context.funding_tx_confirmation_height != 0 && self.context.channel_state & !STATE_FLAGS < ChannelState::ChannelReady as u32 { + if self.context.funding_tx_confirmation_height != 0 && + self.context.channel_state < ChannelState::ChannelReady(ChannelReadyFlags::new()) + { // We should never see a funding transaction on-chain until we've received // funding_signed (if we're an outbound channel), or seen funding_generated (if we're // an inbound channel - before that we have no known funding TXID). The fuzzer, // however, may do this and we shouldn't treat it as a bug. #[cfg(not(fuzzing))] - panic!("Started confirming a channel in a state pre-FundingSent: {}.\n\ + panic!("Started confirming a channel in a state pre-AwaitingChannelReady: {}.\n\ Do NOT broadcast a funding transaction manually - let LDK do it for you!", - self.context.channel_state); + self.context.channel_state.to_u32()); } // We got a reorg but not enough to trigger a force close, just ignore. false }; - if need_commitment_update { - if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) == 0 { - if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 { - let next_per_commitment_point = - self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx); - return Some(msgs::ChannelReady { - channel_id: self.context.channel_id, - next_per_commitment_point, - short_channel_id_alias: Some(self.context.outbound_scid_alias), - }); - } - } else { - self.context.monitor_pending_channel_ready = true; - } + if !need_commitment_update { + log_debug!(logger, "Not producing channel_ready: we do not need a commitment update"); + return None; + } + + if self.context.channel_state.is_monitor_update_in_progress() { + log_debug!(logger, "Not producing channel_ready: a monitor update is in progress. Setting monitor_pending_channel_ready."); + self.context.monitor_pending_channel_ready = true; + return None; + } + + if self.context.channel_state.is_peer_disconnected() { + log_debug!(logger, "Not producing channel_ready: the peer is disconnected."); + return None; + } + + // TODO: when get_per_commiment_point becomes async, check if the point is + // available, if not, set signer_pending_channel_ready and return None + + Some(self.get_channel_ready()) + } + + fn get_channel_ready(&self) -> msgs::ChannelReady { + debug_assert!(self.context.holder_commitment_point.is_available()); + msgs::ChannelReady { + channel_id: self.context.channel_id(), + next_per_commitment_point: self.context.holder_commitment_point.current_point(), + short_channel_id_alias: Some(self.context.outbound_scid_alias), } - None } /// When a transaction is confirmed, we check whether it is or spends the funding transaction @@ -5025,8 +6707,8 @@ impl Channel where if self.context.funding_tx_confirmation_height == 0 { if tx.txid() == funding_txo.txid { let txo_idx = funding_txo.index as usize; - if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() || - tx.output[txo_idx].value != self.context.channel_value_satoshis { + if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_p2wsh() || + tx.output[txo_idx].value.to_sat() != self.context.channel_value_satoshis { if self.context.is_outbound() { // If we generated the funding transaction and it doesn't match what it // should, the client is really broken and we should just panic and @@ -5041,7 +6723,7 @@ impl Channel where return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() }); } else { if self.context.is_outbound() { - if !tx.is_coin_base() { + if !tx.is_coinbase() { for input in tx.input.iter() { if input.witness.is_empty() { // We generated a malleable funding transaction, implying we've @@ -5061,7 +6743,7 @@ impl Channel where } // If this is a coinbase transaction and not a 0-conf channel // we should update our min_depth to 100 to handle coinbase maturity - if tx.is_coin_base() && + if tx.is_coinbase() && self.context.minimum_depth.unwrap_or(0) > 0 && self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY { self.context.minimum_depth = Some(COINBASE_MATURITY); @@ -5070,7 +6752,7 @@ impl Channel where // If we allow 1-conf funding, we may need to check for channel_ready here and // send it immediately instead of waiting for a best_block_updated call (which // may have already happened for this block). - if let Some(channel_ready) = self.check_get_channel_ready(height) { + if let Some(channel_ready) = self.check_get_channel_ready(height, logger) { log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id); let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger); msgs = (Some(channel_ready), announcement_sigs); @@ -5136,7 +6818,7 @@ impl Channel where self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time); - if let Some(channel_ready) = self.check_get_channel_ready(height) { + if let Some(channel_ready) = self.check_get_channel_ready(height, logger) { let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer { self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger) } else { None }; @@ -5144,9 +6826,8 @@ impl Channel where return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs)); } - let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS); - if non_shutdown_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 || - (non_shutdown_state & ChannelState::OurChannelReady as u32) == ChannelState::OurChannelReady as u32 { + if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) || + self.context.channel_state.is_our_channel_ready() { let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1; if self.context.funding_tx_confirmation_height == 0 { // Note that check_get_channel_ready may reset funding_tx_confirmation_height to @@ -5173,8 +6854,8 @@ impl Channel where height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS { log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id); // If funding_tx_confirmed_in is unset, the channel must not be active - assert!(non_shutdown_state & !STATE_FLAGS <= ChannelState::ChannelReady as u32); - assert_eq!(non_shutdown_state & ChannelState::OurChannelReady as u32, 0); + assert!(self.context.channel_state <= ChannelState::ChannelReady(ChannelReadyFlags::new())); + assert!(!self.context.channel_state.is_our_channel_ready()); return Err(ClosureReason::FundingTimedOut); } @@ -5196,7 +6877,7 @@ impl Channel where // larger. If we don't know that time has moved forward, we can just set it to the last // time we saw and it will be ignored. let best_time = self.context.update_time_counter; - match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&NodeSigner, &UserConfig)>, logger) { + match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&dyn NodeSigner, &UserConfig)>, logger) { Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => { assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?"); assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?"); @@ -5272,7 +6953,7 @@ impl Channel where return None; } - if self.context.channel_state & ChannelState::PeerDisconnected as u32 != 0 { + if self.context.channel_state.is_peer_disconnected() { log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected"); return None; } @@ -5318,7 +6999,10 @@ impl Channel where node_signature: our_node_sig, bitcoin_signature: our_bitcoin_sig, }) - } + }, + // TODO (taproot|arik) + #[cfg(taproot)] + _ => todo!() } } @@ -5345,7 +7029,10 @@ impl Channel where bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig }, contents: announcement, }) - } + }, + // TODO (taproot|arik) + #[cfg(taproot)] + _ => todo!() } } else { Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string())) @@ -5364,12 +7051,12 @@ impl Channel where let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]); if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() { - return Err(ChannelError::Close(format!( + return Err(ChannelError::close(format!( "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}", &announcement, self.context.get_counterparty_node_id()))); } if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() { - return Err(ChannelError::Close(format!( + return Err(ChannelError::close(format!( "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})", &announcement, self.context.counterparty_funding_pubkey()))); } @@ -5404,7 +7091,7 @@ impl Channel where /// May panic if called on a channel that wasn't immediately-previously /// self.remove_uncommitted_htlcs_and_mark_paused()'d pub fn get_channel_reestablish(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger { - assert_eq!(self.context.channel_state & ChannelState::PeerDisconnected as u32, ChannelState::PeerDisconnected as u32); + assert!(self.context.channel_state.is_peer_disconnected()); assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER); // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming // current to_remote balances. However, it no longer has any use, and thus is now simply @@ -5434,13 +7121,13 @@ impl Channel where // next_local_commitment_number is the next commitment_signed number we expect to // receive (indicating if they need to resend one that we missed). - next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number, + next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.holder_commitment_point.transaction_number(), // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to // receive, however we track it by the next commitment number for a remote transaction // (which is one further, as they always revoke previous commitment transaction, not // the one we send) so we have to decrement by 1. Note that if // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have - // dropped this channel on disconnect as it hasn't yet reached FundingSent so we can't + // dropped this channel on disconnect as it hasn't yet reached AwaitingChannelReady so we can't // overflow here. next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1, your_last_per_commitment_secret: remote_last_secret, @@ -5463,13 +7150,13 @@ impl Channel where pub fn queue_add_htlc( &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option, - fee_estimator: &LowerBoundedFeeEstimator, logger: &L + blinding_point: Option, fee_estimator: &LowerBoundedFeeEstimator, logger: &L ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger { self .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true, - skimmed_fee_msat, fee_estimator, logger) + skimmed_fee_msat, blinding_point, fee_estimator, logger) .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?")) .map_err(|err| { if let ChannelError::Ignore(_) = err { /* fine */ } @@ -5497,11 +7184,15 @@ impl Channel where fn send_htlc( &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool, - skimmed_fee_msat: Option, fee_estimator: &LowerBoundedFeeEstimator, logger: &L + skimmed_fee_msat: Option, blinding_point: Option, + fee_estimator: &LowerBoundedFeeEstimator, logger: &L ) -> Result, ChannelError> where F::Target: FeeEstimator, L::Target: Logger { - if (self.context.channel_state & (ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelReady as u32) { + if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) || + self.context.channel_state.is_local_shutdown_sent() || + self.context.channel_state.is_remote_shutdown_sent() + { return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned())); } let channel_total_msat = self.context.channel_value_satoshis * 1000; @@ -5524,7 +7215,7 @@ impl Channel where available_balances.next_outbound_htlc_limit_msat))); } - if (self.context.channel_state & (ChannelState::PeerDisconnected as u32)) != 0 { + if self.context.channel_state.is_peer_disconnected() { // Note that this should never really happen, if we're !is_live() on receipt of an // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow // the user to send directly into a !is_live() channel. However, if we @@ -5534,7 +7225,7 @@ impl Channel where return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned())); } - let need_holding_cell = (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0; + let need_holding_cell = !self.context.channel_state.can_generate_new_commitment(); log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}", payment_hash, amount_msat, if force_holding_cell { "into holding cell" } @@ -5554,6 +7245,7 @@ impl Channel where source, onion_routing_packet, skimmed_fee_msat, + blinding_point, }); return Ok(None); } @@ -5565,6 +7257,7 @@ impl Channel where cltv_expiry, state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())), source, + blinding_point, skimmed_fee_msat, }); @@ -5576,6 +7269,7 @@ impl Channel where cltv_expiry, onion_routing_packet, skimmed_fee_msat, + blinding_point, }; self.context.next_holder_htlc_id += 1; @@ -5628,6 +7322,7 @@ impl Channel where self.context.latest_monitor_update_id += 1; let monitor_update = ChannelMonitorUpdate { update_id: self.context.latest_monitor_update_id, + counterparty_node_id: Some(self.context.counterparty_node_id), updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { commitment_txid: counterparty_commitment_txid, htlc_outputs: htlcs.clone(), @@ -5636,9 +7331,10 @@ impl Channel where feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()), to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()), to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()), - }] + }], + channel_id: Some(self.context.channel_id()), }; - self.context.channel_state |= ChannelState::AwaitingRemoteRevoke as u32; + self.context.channel_state.set_awaiting_remote_revoke(); monitor_update } @@ -5692,8 +7388,12 @@ impl Channel where htlcs.push(htlc); } - let res = ecdsa.sign_counterparty_commitment(&commitment_stats.tx, commitment_stats.preimages, &self.context.secp_ctx) - .map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?; + let res = ecdsa.sign_counterparty_commitment( + &commitment_stats.tx, + commitment_stats.inbound_htlc_preimages, + commitment_stats.outbound_htlc_preimages, + &self.context.secp_ctx, + ).map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?; signature = res.0; htlc_signatures = res.1; @@ -5715,10 +7415,14 @@ impl Channel where channel_id: self.context.channel_id, signature, htlc_signatures, + batch: None, #[cfg(taproot)] partial_signature_with_nonce: None, }, (counterparty_commitment_txid, commitment_stats.htlcs_included))) - } + }, + // TODO (taproot|arik) + #[cfg(taproot)] + _ => todo!() } } @@ -5735,7 +7439,7 @@ impl Channel where where F::Target: FeeEstimator, L::Target: Logger { let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source, - onion_routing_packet, false, skimmed_fee_msat, fee_estimator, logger); + onion_routing_packet, false, skimmed_fee_msat, None, fee_estimator, logger); if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } } match send_res? { Some(_) => { @@ -5765,44 +7469,32 @@ impl Channel where /// Begins the shutdown process, getting a message for the remote peer and returning all /// holding cell HTLCs for payment failure. - /// - /// May jump to the channel being fully shutdown (see [`Self::is_shutdown`]) in which case no - /// [`ChannelMonitorUpdate`] will be returned). pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures, target_feerate_sats_per_kw: Option, override_shutdown_script: Option) - -> Result<(msgs::Shutdown, Option, Vec<(HTLCSource, PaymentHash)>, Option), APIError> + -> Result<(msgs::Shutdown, Option, Vec<(HTLCSource, PaymentHash)>), APIError> { for htlc in self.context.pending_outbound_htlcs.iter() { if let OutboundHTLCState::LocalAnnounced(_) = htlc.state { return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()}); } } - if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0 { - if (self.context.channel_state & ChannelState::LocalShutdownSent as u32) == ChannelState::LocalShutdownSent as u32 { - return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()}); - } - else if (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) == ChannelState::RemoteShutdownSent as u32 { - return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()}); - } + if self.context.channel_state.is_local_shutdown_sent() { + return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()}); + } + else if self.context.channel_state.is_remote_shutdown_sent() { + return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()}); } if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() { return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()}); } - assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0); - if self.context.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0 { + assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete)); + if self.context.channel_state.is_peer_disconnected() || self.context.channel_state.is_monitor_update_in_progress() { return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()}); } - // If we haven't funded the channel yet, we don't need to bother ensuring the shutdown - // script is set, we just force-close and call it a day. - let mut chan_closed = false; - if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 { - chan_closed = true; - } - let update_shutdown_script = match self.context.shutdown_scriptpubkey { Some(_) => false, - None if !chan_closed => { + None => { // use override shutdown script if provided let shutdown_scriptpubkey = match override_shutdown_script { Some(script) => script, @@ -5820,32 +7512,23 @@ impl Channel where self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey); true }, - None => false, }; // From here on out, we may not fail! self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw; - let shutdown_result = if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 { - let shutdown_result = ShutdownResult { - monitor_update: None, - dropped_outbound_htlcs: Vec::new(), - unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(), - }; - self.context.channel_state = ChannelState::ShutdownComplete as u32; - Some(shutdown_result) - } else { - self.context.channel_state |= ChannelState::LocalShutdownSent as u32; - None - }; + self.context.channel_state.set_local_shutdown_sent(); + self.context.local_initiated_shutdown = Some(()); self.context.update_time_counter += 1; let monitor_update = if update_shutdown_script { self.context.latest_monitor_update_id += 1; let monitor_update = ChannelMonitorUpdate { update_id: self.context.latest_monitor_update_id, + counterparty_node_id: Some(self.context.counterparty_node_id), updates: vec![ChannelMonitorUpdateStep::ShutdownScript { scriptpubkey: self.get_closing_scriptpubkey(), }], + channel_id: Some(self.context.channel_id()), }; self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); self.push_ret_blockable_mon_update(monitor_update) @@ -5872,7 +7555,7 @@ impl Channel where debug_assert!(!self.is_shutdown() || monitor_update.is_none(), "we can't both complete shutdown and return a monitor update"); - Ok((shutdown, monitor_update, dropped_outbound_htlcs, shutdown_result)) + Ok((shutdown, monitor_update, dropped_outbound_htlcs)) } pub fn inflight_htlc_sources(&self) -> impl Iterator { @@ -5895,208 +7578,81 @@ pub(super) struct OutboundV1Channel where SP::Target: SignerProvider } impl OutboundV1Channel where SP::Target: SignerProvider { - pub fn new( + pub fn new( fee_estimator: &LowerBoundedFeeEstimator, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures, channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32, - outbound_scid_alias: u64, temporary_channel_id: Option + outbound_scid_alias: u64, temporary_channel_id: Option, logger: L ) -> Result, APIError> where ES::Target: EntropySource, - F::Target: FeeEstimator + F::Target: FeeEstimator, + L::Target: Logger, { - let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay; - let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id); - let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id); - let pubkeys = holder_signer.pubkeys().clone(); - - if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO { - return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)}); - } - if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS { - return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)}); - } - let channel_value_msat = channel_value_satoshis * 1000; - if push_msat > channel_value_msat { - return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) }); - } - if holder_selected_contest_delay < BREAKDOWN_TIMEOUT { - return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)}); - } let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config); if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { // Protocol level safety check in place, although it should never happen because // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS` - return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) }); - } - - let channel_type = Self::get_initial_channel_type(&config, their_features); - debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config))); - - let (commitment_conf_target, anchor_outputs_value_msat) = if channel_type.supports_anchors_zero_fee_htlc_tx() { - (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000) - } else { - (ConfirmationTarget::NonAnchorChannelFee, 0) - }; - let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target); - - let value_to_self_msat = channel_value_satoshis * 1000 - push_msat; - let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type); - if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee { - return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) }); - } - - let mut secp_ctx = Secp256k1::new(); - secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes()); - - let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey { - match signer_provider.get_shutdown_scriptpubkey() { - Ok(scriptpubkey) => Some(scriptpubkey), - Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}), - } - } else { None }; - - if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey { - if !shutdown_scriptpubkey.is_compatible(&their_features) { - return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() }); - } + return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below \ + implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) }); } - let destination_script = match signer_provider.get_destination_script() { - Ok(script) => script, - Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}), - }; - - let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source)); - - Ok(Self { - context: ChannelContext { - user_id, - - config: LegacyChannelConfig { - options: config.channel_config.clone(), - announced_channel: config.channel_handshake_config.announced_channel, - commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey, - }, - - prev_config: None, - - inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()), - - channel_id: temporary_channel_id, - temporary_channel_id: Some(temporary_channel_id), - channel_state: ChannelState::OurInitSent as u32, - announcement_sigs_state: AnnouncementSigsState::NotSent, - secp_ctx, - channel_value_satoshis, - - latest_monitor_update_id: 0, - - holder_signer: ChannelSignerType::Ecdsa(holder_signer), - shutdown_scriptpubkey, - destination_script, - - cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, - cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, - value_to_self_msat, - - pending_inbound_htlcs: Vec::new(), - pending_outbound_htlcs: Vec::new(), - holding_cell_htlc_updates: Vec::new(), - pending_update_fee: None, - holding_cell_update_fee: None, - next_holder_htlc_id: 0, - next_counterparty_htlc_id: 0, - update_time_counter: 1, - - resend_order: RAACommitmentOrder::CommitmentFirst, - - monitor_pending_channel_ready: false, - monitor_pending_revoke_and_ack: false, - monitor_pending_commitment_signed: false, - monitor_pending_forwards: Vec::new(), - monitor_pending_failures: Vec::new(), - monitor_pending_finalized_fulfills: Vec::new(), - - signer_pending_commitment_update: false, - signer_pending_funding: false, - - #[cfg(debug_assertions)] - holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)), - #[cfg(debug_assertions)] - counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)), - - last_sent_closing_fee: None, - pending_counterparty_closing_signed: None, - expecting_peer_commitment_signed: false, - closing_fee_limits: None, - target_closing_feerate_sats_per_kw: None, - - funding_tx_confirmed_in: None, - funding_tx_confirmation_height: 0, - short_channel_id: None, - channel_creation_height: current_chain_height, - - feerate_per_kw: commitment_feerate, - counterparty_dust_limit_satoshis: 0, - holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS, - counterparty_max_htlc_value_in_flight_msat: 0, - holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config), - counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel - holder_selected_channel_reserve_satoshis, - counterparty_htlc_minimum_msat: 0, - holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat }, - counterparty_max_accepted_htlcs: 0, - holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS), - minimum_depth: None, // Filled in in accept_channel - - counterparty_forwarding_info: None, - - channel_transaction_parameters: ChannelTransactionParameters { - holder_pubkeys: pubkeys, - holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay, - is_outbound_from_holder: true, - counterparty_parameters: None, - funding_outpoint: None, - channel_type_features: channel_type.clone() - }, - funding_transaction: None, - is_batch_funding: None, + let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id); + let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id); + let pubkeys = holder_signer.pubkeys().clone(); - counterparty_cur_commitment_point: None, - counterparty_prev_commitment_point: None, + let chan = Self { + context: ChannelContext::new_for_outbound_channel( + fee_estimator, + entropy_source, + signer_provider, counterparty_node_id, - - counterparty_shutdown_scriptpubkey: None, - - commitment_secrets: CounterpartyCommitmentSecrets::new(), - - channel_update_status: ChannelUpdateStatus::Enabled, - closing_signed_in_flight: false, - - announcement_sigs: None, - - #[cfg(any(test, fuzzing))] - next_local_commitment_tx_fee_info_cached: Mutex::new(None), - #[cfg(any(test, fuzzing))] - next_remote_commitment_tx_fee_info_cached: Mutex::new(None), - - workaround_lnd_bug_4006: None, - sent_message_awaiting_response: None, - - latest_inbound_scid_alias: None, + their_features, + channel_value_satoshis, + push_msat, + user_id, + config, + current_chain_height, outbound_scid_alias, - - channel_pending_event_emitted: false, - channel_ready_event_emitted: false, - - #[cfg(any(test, fuzzing))] - historical_inbound_htlc_fulfills: HashSet::new(), - - channel_type, + temporary_channel_id, + holder_selected_channel_reserve_satoshis, channel_keys_id, + holder_signer, + pubkeys, + logger, + )?, + unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 } + }; + Ok(chan) + } - blocked_monitor_updates: Vec::new(), + /// Only allowed after [`ChannelContext::channel_transaction_parameters`] is set. + fn get_funding_created_msg(&mut self, logger: &L) -> Option where L::Target: Logger { + let counterparty_keys = self.context.build_remote_transaction_keys(); + let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx; + let signature = match &self.context.holder_signer { + // TODO (taproot|arik): move match into calling method for Taproot + ChannelSignerType::Ecdsa(ecdsa) => { + ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.context.secp_ctx) + .map(|(sig, _)| sig).ok()? }, - unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 } + // TODO (taproot|arik) + #[cfg(taproot)] + _ => todo!() + }; + + if self.context.signer_pending_funding { + log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding"); + self.context.signer_pending_funding = false; + } + + Some(msgs::FundingCreated { + temporary_channel_id: self.context.temporary_channel_id.unwrap(), + funding_txid: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid, + funding_output_index: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index, + signature, + #[cfg(taproot)] + partial_signature_with_nonce: None, + #[cfg(taproot)] + next_local_nonce: None, }) } @@ -6107,17 +7663,20 @@ impl OutboundV1Channel where SP::Target: SignerProvider { /// Note that channel_id changes during this call! /// Do NOT broadcast the funding transaction until after a successful funding_signed call! /// If an Err is returned, it is a ChannelError::Close. - pub fn get_funding_created(mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L) - -> Result<(Channel, Option), (Self, ChannelError)> where L::Target: Logger { + pub fn get_funding_created(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L) + -> Result, (Self, ChannelError)> where L::Target: Logger { if !self.context.is_outbound() { panic!("Tried to create outbound funding_created message on an inbound channel!"); } - if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) { + if !matches!( + self.context.channel_state, ChannelState::NegotiatingFunding(flags) + if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT) + ) { panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)"); } if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) || self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER || - self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { + self.context.holder_commitment_point.transaction_number() != INITIAL_COMMITMENT_NUMBER { panic!("Should not have advanced channel commitment tx numbers prior to funding_created"); } @@ -6126,12 +7685,12 @@ impl OutboundV1Channel where SP::Target: SignerProvider { // Now that we're past error-generating stuff, update our local state: - self.context.channel_state = ChannelState::FundingCreated as u32; - self.context.channel_id = funding_txo.to_channel_id(); + self.context.channel_state = ChannelState::FundingNegotiated; + self.context.channel_id = ChannelId::v1_from_funding_outpoint(funding_txo); // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100. // We can skip this if it is a zero-conf channel. - if funding_transaction.is_coin_base() && + if funding_transaction.is_coinbase() && self.context.minimum_depth.unwrap_or(0) > 0 && self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY { self.context.minimum_depth = Some(COINBASE_MATURITY); @@ -6140,42 +7699,20 @@ impl OutboundV1Channel where SP::Target: SignerProvider { self.context.funding_transaction = Some(funding_transaction); self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding); - let funding_created = self.context.get_funding_created_msg(logger); + let funding_created = self.get_funding_created_msg(logger); if funding_created.is_none() { - if !self.context.signer_pending_funding { - log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding"); - self.context.signer_pending_funding = true; + #[cfg(not(async_signing))] { + panic!("Failed to get signature for new funding creation"); + } + #[cfg(async_signing)] { + if !self.context.signer_pending_funding { + log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding"); + self.context.signer_pending_funding = true; + } } } - let channel = Channel { - context: self.context, - }; - - Ok((channel, funding_created)) - } - - fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures { - // The default channel type (ie the first one we try) depends on whether the channel is - // public - if it is, we just go with `only_static_remotekey` as it's the only option - // available. If it's private, we first try `scid_privacy` as it provides better privacy - // with no other changes, and fall back to `only_static_remotekey`. - let mut ret = ChannelTypeFeatures::only_static_remote_key(); - if !config.channel_handshake_config.announced_channel && - config.channel_handshake_config.negotiate_scid_privacy && - their_features.supports_scid_privacy() { - ret.set_scid_privacy_required(); - } - - // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we - // set it now. If they don't understand it, we'll fall back to our default of - // `only_static_remotekey`. - if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx && - their_features.supports_anchors_zero_fee_htlc_tx() { - ret.set_anchors_zero_fee_htlc_tx_required(); - } - - ret + Ok(funding_created) } /// If we receive an error message, it may only be a rejection of the channel type we tried, @@ -6187,204 +7724,184 @@ impl OutboundV1Channel where SP::Target: SignerProvider { where F::Target: FeeEstimator { - if !self.context.is_outbound() || self.context.channel_state != ChannelState::OurInitSent as u32 { return Err(()); } - if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() { - // We've exhausted our options - return Err(()); - } - // We support opening a few different types of channels. Try removing our additional - // features one by one until we've either arrived at our default or the counterparty has - // accepted one. - // - // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the - // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type` - // checks whether the counterparty supports every feature, this would only happen if the - // counterparty is advertising the feature, but rejecting channels proposing the feature for - // whatever reason. - if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { - self.context.channel_type.clear_anchors_zero_fee_htlc_tx(); - self.context.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee); - assert!(!self.context.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx()); - } else if self.context.channel_type.supports_scid_privacy() { - self.context.channel_type.clear_scid_privacy(); - } else { - self.context.channel_type = ChannelTypeFeatures::only_static_remote_key(); - } - self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone(); + self.context.maybe_downgrade_channel_features(fee_estimator)?; Ok(self.get_open_channel(chain_hash)) } + /// Returns true if we can resume the channel by sending the [`msgs::OpenChannel`] again. + pub fn is_resumable(&self) -> bool { + !self.context.have_received_message() && + self.context.holder_commitment_point.transaction_number() == INITIAL_COMMITMENT_NUMBER + } + pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel { if !self.context.is_outbound() { panic!("Tried to open a channel for an inbound channel?"); } - if self.context.channel_state != ChannelState::OurInitSent as u32 { + if self.context.have_received_message() { panic!("Cannot generate an open_channel after we've moved forward"); } - if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { + if self.context.holder_commitment_point.transaction_number() != INITIAL_COMMITMENT_NUMBER { panic!("Tried to send an open_channel for a channel that has already advanced"); } - let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); + debug_assert!(self.context.holder_commitment_point.is_available()); + let first_per_commitment_point = self.context.holder_commitment_point.current_point(); let keys = self.context.get_holder_pubkeys(); msgs::OpenChannel { - chain_hash, - temporary_channel_id: self.context.channel_id, - funding_satoshis: self.context.channel_value_satoshis, + common_fields: msgs::CommonOpenChannelFields { + chain_hash, + temporary_channel_id: self.context.channel_id, + funding_satoshis: self.context.channel_value_satoshis, + dust_limit_satoshis: self.context.holder_dust_limit_satoshis, + max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat, + htlc_minimum_msat: self.context.holder_htlc_minimum_msat, + commitment_feerate_sat_per_1000_weight: self.context.feerate_per_kw as u32, + to_self_delay: self.context.get_holder_selected_contest_delay(), + max_accepted_htlcs: self.context.holder_max_accepted_htlcs, + funding_pubkey: keys.funding_pubkey, + revocation_basepoint: keys.revocation_basepoint.to_public_key(), + payment_basepoint: keys.payment_point, + delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(), + htlc_basepoint: keys.htlc_basepoint.to_public_key(), + first_per_commitment_point, + channel_flags: if self.context.config.announced_channel {1} else {0}, + shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey { + Some(script) => script.clone().into_inner(), + None => Builder::new().into_script(), + }), + channel_type: Some(self.context.channel_type.clone()), + }, push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat, - dust_limit_satoshis: self.context.holder_dust_limit_satoshis, - max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat, channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis, - htlc_minimum_msat: self.context.holder_htlc_minimum_msat, - feerate_per_kw: self.context.feerate_per_kw as u32, - to_self_delay: self.context.get_holder_selected_contest_delay(), - max_accepted_htlcs: self.context.holder_max_accepted_htlcs, - funding_pubkey: keys.funding_pubkey, - revocation_basepoint: keys.revocation_basepoint.to_public_key(), - payment_point: keys.payment_point, - delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(), - htlc_basepoint: keys.htlc_basepoint.to_public_key(), - first_per_commitment_point, - channel_flags: if self.context.config.announced_channel {1} else {0}, - shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey { - Some(script) => script.clone().into_inner(), - None => Builder::new().into_script(), - }), - channel_type: Some(self.context.channel_type.clone()), } } // Message handlers - pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> { - let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits }; + pub fn accept_channel( + &mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, + their_features: &InitFeatures + ) -> Result<(), ChannelError> { + self.context.do_accept_channel_checks(default_limits, their_features, &msg.common_fields, msg.channel_reserve_satoshis) + } - // Check sanity of message fields: + /// Handles a funding_signed message from the remote end. + /// If this call is successful, broadcast the funding transaction (and not before!) + pub fn funding_signed( + mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L + ) -> Result<(Channel, ChannelMonitor<::EcdsaSigner>), (OutboundV1Channel, ChannelError)> + where + L::Target: Logger + { if !self.context.is_outbound() { - return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned())); - } - if self.context.channel_state != ChannelState::OurInitSent as u32 { - return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned())); - } - if msg.dust_limit_satoshis > 21000000 * 100000000 { - return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis))); + return Err((self, ChannelError::close("Received funding_signed for an inbound channel?".to_owned()))); } - if msg.channel_reserve_satoshis > self.context.channel_value_satoshis { - return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis))); + if !matches!(self.context.channel_state, ChannelState::FundingNegotiated) { + return Err((self, ChannelError::close("Received funding_signed in strange state!".to_owned()))); } - if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis { - return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis))); - } - if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis { - return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})", - msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis))); - } - let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000; - if msg.htlc_minimum_msat >= full_channel_value_msat { - return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat))); - } - let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT); - if msg.to_self_delay > max_delay_acceptable { - return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay))); - } - if msg.max_accepted_htlcs < 1 { - return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned())); - } - if msg.max_accepted_htlcs > MAX_HTLCS { - return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS))); + if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) || + self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER || + self.context.holder_commitment_point.transaction_number() != INITIAL_COMMITMENT_NUMBER { + panic!("Should not have advanced channel commitment tx numbers prior to funding_created"); } - // Now check against optional parameters as set by config... - if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat { - return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat))); - } - if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat { - return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat))); - } - if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis { - return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis))); - } - if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs { - return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs))); - } - if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { - return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS))); - } - if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS { - return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS))); - } - if msg.minimum_depth > peer_limits.max_minimum_depth { - return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth))); - } + let funding_script = self.context.get_funding_redeemscript(); - if let Some(ty) = &msg.channel_type { - if *ty != self.context.channel_type { - return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned())); - } - } else if their_features.supports_channel_type() { - // Assume they've accepted the channel type as they said they understand it. - } else { - let channel_type = ChannelTypeFeatures::from_init(&their_features); - if channel_type != ChannelTypeFeatures::only_static_remote_key() { - return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned())); + let counterparty_keys = self.context.build_remote_transaction_keys(); + let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx; + let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust(); + let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction(); + + log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}", + &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction)); + + let holder_signer = self.context.build_holder_transaction_keys(); + let initial_commitment_tx = self.context.build_commitment_transaction(self.context.holder_commitment_point.transaction_number(), &holder_signer, true, false, logger).tx; + { + let trusted_tx = initial_commitment_tx.trust(); + let initial_commitment_bitcoin_tx = trusted_tx.built_transaction(); + let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis); + // They sign our commitment transaction, allowing us to broadcast the tx if we wish. + if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) { + return Err((self, ChannelError::close("Invalid funding_signed signature from peer".to_owned()))); } - self.context.channel_type = channel_type.clone(); - self.context.channel_transaction_parameters.channel_type_features = channel_type; } - let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() { - match &msg.shutdown_scriptpubkey { - &Some(ref script) => { - // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything - if script.len() == 0 { - None - } else { - if !script::is_bolt2_compliant(&script, their_features) { - return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script))); - } - Some(script.clone()) - } - }, - // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel - &None => { - return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned())); - } - } - } else { None }; + let holder_commitment_tx = HolderCommitmentTransaction::new( + initial_commitment_tx, + msg.signature, + Vec::new(), + &self.context.get_holder_pubkeys().funding_pubkey, + self.context.counterparty_funding_pubkey() + ); - self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis; - self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000); - self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis); - self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat; - self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs; + let validated = + self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()); + if validated.is_err() { + return Err((self, ChannelError::close("Failed to validate our commitment".to_owned()))); + } - if peer_limits.trust_own_funding_0conf { - self.context.minimum_depth = Some(msg.minimum_depth); + let funding_redeemscript = self.context.get_funding_redeemscript(); + let funding_txo = self.context.get_funding_txo().unwrap(); + let funding_txo_script = funding_redeemscript.to_p2wsh(); + let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound()); + let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner()); + let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id); + monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters); + let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer, + shutdown_script, self.context.get_holder_selected_contest_delay(), + &self.context.destination_script, (funding_txo, funding_txo_script), + &self.context.channel_transaction_parameters, + funding_redeemscript.clone(), self.context.channel_value_satoshis, + obscure_factor, + holder_commitment_tx, best_block, self.context.counterparty_node_id, self.context.channel_id()); + channel_monitor.provide_initial_counterparty_commitment_tx( + counterparty_initial_bitcoin_tx.txid, Vec::new(), + self.context.cur_counterparty_commitment_transaction_number, + self.context.counterparty_cur_commitment_point.unwrap(), + counterparty_initial_commitment_tx.feerate_per_kw(), + counterparty_initial_commitment_tx.to_broadcaster_value_sat(), + counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger); + + assert!(!self.context.channel_state.is_monitor_update_in_progress()); // We have no had any monitor(s) yet to fail update! + if self.context.is_batch_funding() { + self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH); } else { - self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth)); + self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()); } + if self.context.holder_commitment_point.advance(&self.context.holder_signer, &self.context.secp_ctx, logger).is_err() { + // We only fail to advance our commitment point/number if we're currently + // waiting for our signer to unblock and provide a commitment point. + // We cannot send open_channel before this has occurred, so if we + // err here by the time we receive funding_signed, something has gone wrong. + debug_assert!(false, "We should be ready to advance our commitment point by the time we receive funding_signed"); + return Err((self, ChannelError::close("Failed to advance holder commitment point".to_owned()))); + } + self.context.cur_counterparty_commitment_transaction_number -= 1; - let counterparty_pubkeys = ChannelPublicKeys { - funding_pubkey: msg.funding_pubkey, - revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint), - payment_point: msg.payment_point, - delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint), - htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint) - }; - - self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters { - selected_contest_delay: msg.to_self_delay, - pubkeys: counterparty_pubkeys, - }); + log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id()); - self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point); - self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey; + let mut channel = Channel { + context: self.context, + #[cfg(any(dual_funding, splicing))] + dual_funding_channel_context: None, + }; - self.context.channel_state = ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32; - self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now. + let need_channel_ready = channel.check_get_channel_ready(0, logger).is_some(); + channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new()); + Ok((channel, channel_monitor)) + } - Ok(()) + /// Indicates that the signer may have some signatures for us, so we should retry if we're + /// blocked. + #[cfg(async_signing)] + pub fn signer_maybe_unblocked(&mut self, logger: &L) -> Option where L::Target: Logger { + if self.context.signer_pending_funding && self.context.is_outbound() { + log_trace!(logger, "Signer unblocked a funding_created"); + self.get_funding_created_msg(logger) + } else { None } } } @@ -6394,348 +7911,92 @@ pub(super) struct InboundV1Channel where SP::Target: SignerProvider { pub unfunded_context: UnfundedChannelContext, } -impl InboundV1Channel where SP::Target: SignerProvider { - /// Creates a new channel from a remote sides' request for one. - /// Assumes chain_hash has already been checked and corresponds with what we expect! - pub fn new( - fee_estimator: &LowerBoundedFeeEstimator, entropy_source: &ES, signer_provider: &SP, - counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures, - their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig, - current_chain_height: u32, logger: &L, is_0conf: bool, - ) -> Result, ChannelError> - where ES::Target: EntropySource, - F::Target: FeeEstimator, - L::Target: Logger, - { - let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false }; - - // First check the channel type is known, failing before we do anything else if we don't - // support this channel type. - let channel_type = if let Some(channel_type) = &msg.channel_type { - if channel_type.supports_any_optional_bits() { - return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned())); - } - - // We only support the channel types defined by the `ChannelManager` in - // `provided_channel_type_features`. The channel type must always support - // `static_remote_key`. - if !channel_type.requires_static_remote_key() { - return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned())); - } - // Make sure we support all of the features behind the channel type. - if !channel_type.is_subset(our_supported_features) { - return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned())); - } - if channel_type.requires_scid_privacy() && announced_channel { - return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned())); - } - channel_type.clone() - } else { - let channel_type = ChannelTypeFeatures::from_init(&their_features); - if channel_type != ChannelTypeFeatures::only_static_remote_key() { - return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned())); - } - channel_type - }; - - let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id); - let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id); - let pubkeys = holder_signer.pubkeys().clone(); - let counterparty_pubkeys = ChannelPublicKeys { - funding_pubkey: msg.funding_pubkey, - revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint), - payment_point: msg.payment_point, - delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint), - htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint) - }; - - if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT { - return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT))); - } - - // Check sanity of message fields: - if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis { - return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis))); - } - if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS { - return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis))); - } - if msg.channel_reserve_satoshis > msg.funding_satoshis { - return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis))); - } - let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000; - if msg.push_msat > full_channel_value_msat { - return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat))); - } - if msg.dust_limit_satoshis > msg.funding_satoshis { - return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis))); - } - if msg.htlc_minimum_msat >= full_channel_value_msat { - return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat))); - } - Channel::::check_remote_fee(&channel_type, fee_estimator, msg.feerate_per_kw, None, logger)?; - - let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT); - if msg.to_self_delay > max_counterparty_selected_contest_delay { - return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay))); - } - if msg.max_accepted_htlcs < 1 { - return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned())); - } - if msg.max_accepted_htlcs > MAX_HTLCS { - return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS))); - } - - // Now check against optional parameters as set by config... - if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis { - return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis))); - } - if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat { - return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat))); - } - if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat { - return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat))); - } - if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis { - return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis))); - } - if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs { - return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs))); - } - if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { - return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS))); - } - if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS { - return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS))); - } - - // Convert things into internal flags and prep our state: - - if config.channel_handshake_limits.force_announced_channel_preference { - if config.channel_handshake_config.announced_channel != announced_channel { - return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned())); - } +/// Fetches the [`ChannelTypeFeatures`] that will be used for a channel built from a given +/// [`msgs::CommonOpenChannelFields`]. +pub(super) fn channel_type_from_open_channel( + common_fields: &msgs::CommonOpenChannelFields, their_features: &InitFeatures, + our_supported_features: &ChannelTypeFeatures +) -> Result { + if let Some(channel_type) = &common_fields.channel_type { + if channel_type.supports_any_optional_bits() { + return Err(ChannelError::close("Channel Type field contained optional bits - this is not allowed".to_owned())); } - let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config); - if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { - // Protocol level safety check in place, although it should never happen because - // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS` - return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS))); + // We only support the channel types defined by the `ChannelManager` in + // `provided_channel_type_features`. The channel type must always support + // `static_remote_key`. + if !channel_type.requires_static_remote_key() { + return Err(ChannelError::close("Channel Type was not understood - we require static remote key".to_owned())); } - if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat { - return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat))); + // Make sure we support all of the features behind the channel type. + if !channel_type.is_subset(our_supported_features) { + return Err(ChannelError::close("Channel Type contains unsupported features".to_owned())); } - if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { - log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.", - msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS); + let announced_channel = if (common_fields.channel_flags & 1) == 1 { true } else { false }; + if channel_type.requires_scid_privacy() && announced_channel { + return Err(ChannelError::close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned())); } - if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis { - return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis))); - } - - // check if the funder's amount for the initial commitment tx is sufficient - // for full fee payment plus a few HTLCs to ensure the channel will be useful. - let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() { - ANCHOR_OUTPUT_VALUE_SATOSHI * 2 - } else { - 0 - }; - let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat; - let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000; - if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee { - return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee))); - } - - let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value; - // While it's reasonable for us to not meet the channel reserve initially (if they don't - // want to push much to us), our counterparty should always have more than our reserve. - if to_remote_satoshis < holder_selected_channel_reserve_satoshis { - return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned())); - } - - let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() { - match &msg.shutdown_scriptpubkey { - &Some(ref script) => { - // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything - if script.len() == 0 { - None - } else { - if !script::is_bolt2_compliant(&script, their_features) { - return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script))) - } - Some(script.clone()) - } - }, - // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel - &None => { - return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned())); - } - } - } else { None }; - - let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey { - match signer_provider.get_shutdown_scriptpubkey() { - Ok(scriptpubkey) => Some(scriptpubkey), - Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())), - } - } else { None }; - - if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey { - if !shutdown_scriptpubkey.is_compatible(&their_features) { - return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey))); - } + Ok(channel_type.clone()) + } else { + let channel_type = ChannelTypeFeatures::from_init(&their_features); + if channel_type != ChannelTypeFeatures::only_static_remote_key() { + return Err(ChannelError::close("Only static_remote_key is supported for non-negotiated channel types".to_owned())); } + Ok(channel_type) + } +} - let destination_script = match signer_provider.get_destination_script() { - Ok(script) => script, - Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())), - }; - - let mut secp_ctx = Secp256k1::new(); - secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes()); - - let minimum_depth = if is_0conf { - Some(0) - } else { - Some(cmp::max(config.channel_handshake_config.minimum_depth, 1)) - }; - - let chan = Self { - context: ChannelContext { - user_id, - - config: LegacyChannelConfig { - options: config.channel_config.clone(), - announced_channel, - commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey, - }, - - prev_config: None, - - inbound_handshake_limits_override: None, - - temporary_channel_id: Some(msg.temporary_channel_id), - channel_id: msg.temporary_channel_id, - channel_state: (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32), - announcement_sigs_state: AnnouncementSigsState::NotSent, - secp_ctx, - - latest_monitor_update_id: 0, - - holder_signer: ChannelSignerType::Ecdsa(holder_signer), - shutdown_scriptpubkey, - destination_script, - - cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, - cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, - value_to_self_msat: msg.push_msat, - - pending_inbound_htlcs: Vec::new(), - pending_outbound_htlcs: Vec::new(), - holding_cell_htlc_updates: Vec::new(), - pending_update_fee: None, - holding_cell_update_fee: None, - next_holder_htlc_id: 0, - next_counterparty_htlc_id: 0, - update_time_counter: 1, - - resend_order: RAACommitmentOrder::CommitmentFirst, - - monitor_pending_channel_ready: false, - monitor_pending_revoke_and_ack: false, - monitor_pending_commitment_signed: false, - monitor_pending_forwards: Vec::new(), - monitor_pending_failures: Vec::new(), - monitor_pending_finalized_fulfills: Vec::new(), - - signer_pending_commitment_update: false, - signer_pending_funding: false, - - #[cfg(debug_assertions)] - holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)), - #[cfg(debug_assertions)] - counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)), - - last_sent_closing_fee: None, - pending_counterparty_closing_signed: None, - expecting_peer_commitment_signed: false, - closing_fee_limits: None, - target_closing_feerate_sats_per_kw: None, - - funding_tx_confirmed_in: None, - funding_tx_confirmation_height: 0, - short_channel_id: None, - channel_creation_height: current_chain_height, - - feerate_per_kw: msg.feerate_per_kw, - channel_value_satoshis: msg.funding_satoshis, - counterparty_dust_limit_satoshis: msg.dust_limit_satoshis, - holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS, - counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000), - holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config), - counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis), - holder_selected_channel_reserve_satoshis, - counterparty_htlc_minimum_msat: msg.htlc_minimum_msat, - holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat }, - counterparty_max_accepted_htlcs: msg.max_accepted_htlcs, - holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS), - minimum_depth, - - counterparty_forwarding_info: None, - - channel_transaction_parameters: ChannelTransactionParameters { - holder_pubkeys: pubkeys, - holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay, - is_outbound_from_holder: false, - counterparty_parameters: Some(CounterpartyChannelTransactionParameters { - selected_contest_delay: msg.to_self_delay, - pubkeys: counterparty_pubkeys, - }), - funding_outpoint: None, - channel_type_features: channel_type.clone() - }, - funding_transaction: None, - is_batch_funding: None, - - counterparty_cur_commitment_point: Some(msg.first_per_commitment_point), - counterparty_prev_commitment_point: None, - counterparty_node_id, - - counterparty_shutdown_scriptpubkey, - - commitment_secrets: CounterpartyCommitmentSecrets::new(), - - channel_update_status: ChannelUpdateStatus::Enabled, - closing_signed_in_flight: false, - - announcement_sigs: None, - - #[cfg(any(test, fuzzing))] - next_local_commitment_tx_fee_info_cached: Mutex::new(None), - #[cfg(any(test, fuzzing))] - next_remote_commitment_tx_fee_info_cached: Mutex::new(None), - - workaround_lnd_bug_4006: None, - sent_message_awaiting_response: None, +impl InboundV1Channel where SP::Target: SignerProvider { + /// Creates a new channel from a remote sides' request for one. + /// Assumes chain_hash has already been checked and corresponds with what we expect! + pub fn new( + fee_estimator: &LowerBoundedFeeEstimator, entropy_source: &ES, signer_provider: &SP, + counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures, + their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig, + current_chain_height: u32, logger: &L, is_0conf: bool, + ) -> Result, ChannelError> + where ES::Target: EntropySource, + F::Target: FeeEstimator, + L::Target: Logger, + { + let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.common_fields.temporary_channel_id), None); - latest_inbound_scid_alias: None, - outbound_scid_alias: 0, + // First check the channel type is known, failing before we do anything else if we don't + // support this channel type. + let channel_type = channel_type_from_open_channel(&msg.common_fields, their_features, our_supported_features)?; - channel_pending_event_emitted: false, - channel_ready_event_emitted: false, + let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.common_fields.funding_satoshis, config); + let counterparty_pubkeys = ChannelPublicKeys { + funding_pubkey: msg.common_fields.funding_pubkey, + revocation_basepoint: RevocationBasepoint::from(msg.common_fields.revocation_basepoint), + payment_point: msg.common_fields.payment_basepoint, + delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.common_fields.delayed_payment_basepoint), + htlc_basepoint: HtlcBasepoint::from(msg.common_fields.htlc_basepoint) + }; - #[cfg(any(test, fuzzing))] - historical_inbound_htlc_fulfills: HashSet::new(), + let chan = Self { + context: ChannelContext::new_for_inbound_channel( + fee_estimator, + entropy_source, + signer_provider, + counterparty_node_id, + their_features, + user_id, + config, + current_chain_height, + &&logger, + is_0conf, + 0, + counterparty_pubkeys, channel_type, - channel_keys_id, - - blocked_monitor_updates: Vec::new(), - }, + holder_selected_channel_reserve_satoshis, + msg.channel_reserve_satoshis, + msg.push_msat, + msg.common_fields.clone(), + )?, unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 } }; - Ok(chan) } @@ -6747,10 +8008,13 @@ impl InboundV1Channel where SP::Target: SignerProvider { if self.context.is_outbound() { panic!("Tried to send accept_channel for an outbound channel?"); } - if self.context.channel_state != (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32) { + if !matches!( + self.context.channel_state, ChannelState::NegotiatingFunding(flags) + if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT) + ) { panic!("Tried to send accept_channel after channel had moved forward"); } - if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { + if self.context.holder_commitment_point.transaction_number() != INITIAL_COMMITMENT_NUMBER { panic!("Tried to send an accept_channel for a channel that has already advanced"); } @@ -6763,29 +8027,32 @@ impl InboundV1Channel where SP::Target: SignerProvider { /// /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel fn generate_accept_channel_message(&self) -> msgs::AcceptChannel { - let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); + debug_assert!(self.context.holder_commitment_point.is_available()); + let first_per_commitment_point = self.context.holder_commitment_point.current_point(); let keys = self.context.get_holder_pubkeys(); msgs::AcceptChannel { - temporary_channel_id: self.context.channel_id, - dust_limit_satoshis: self.context.holder_dust_limit_satoshis, - max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat, + common_fields: msgs::CommonAcceptChannelFields { + temporary_channel_id: self.context.channel_id, + dust_limit_satoshis: self.context.holder_dust_limit_satoshis, + max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat, + htlc_minimum_msat: self.context.holder_htlc_minimum_msat, + minimum_depth: self.context.minimum_depth.unwrap(), + to_self_delay: self.context.get_holder_selected_contest_delay(), + max_accepted_htlcs: self.context.holder_max_accepted_htlcs, + funding_pubkey: keys.funding_pubkey, + revocation_basepoint: keys.revocation_basepoint.to_public_key(), + payment_basepoint: keys.payment_point, + delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(), + htlc_basepoint: keys.htlc_basepoint.to_public_key(), + first_per_commitment_point, + shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey { + Some(script) => script.clone().into_inner(), + None => Builder::new().into_script(), + }), + channel_type: Some(self.context.channel_type.clone()), + }, channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis, - htlc_minimum_msat: self.context.holder_htlc_minimum_msat, - minimum_depth: self.context.minimum_depth.unwrap(), - to_self_delay: self.context.get_holder_selected_contest_delay(), - max_accepted_htlcs: self.context.holder_max_accepted_htlcs, - funding_pubkey: keys.funding_pubkey, - revocation_basepoint: keys.revocation_basepoint.to_public_key(), - payment_point: keys.payment_point, - delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(), - htlc_basepoint: keys.htlc_basepoint.to_public_key(), - first_per_commitment_point, - shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey { - Some(script) => script.clone().into_inner(), - None => Builder::new().into_script(), - }), - channel_type: Some(self.context.channel_type.clone()), #[cfg(taproot)] next_local_nonce: None, } @@ -6803,8 +8070,8 @@ impl InboundV1Channel where SP::Target: SignerProvider { fn check_funding_created_signature(&mut self, sig: &Signature, logger: &L) -> Result where L::Target: Logger { let funding_script = self.context.get_funding_redeemscript(); - let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); - let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx; + let keys = self.context.build_holder_transaction_keys(); + let initial_commitment_tx = self.context.build_commitment_transaction(self.context.holder_commitment_point.transaction_number(), &keys, true, false, logger).tx; let trusted_tx = initial_commitment_tx.trust(); let initial_commitment_bitcoin_tx = trusted_tx.built_transaction(); let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis); @@ -6820,22 +8087,25 @@ impl InboundV1Channel where SP::Target: SignerProvider { pub fn funding_created( mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L - ) -> Result<(Channel, Option, ChannelMonitor<::Signer>), (Self, ChannelError)> + ) -> Result<(Channel, Option, ChannelMonitor<::EcdsaSigner>), (Self, ChannelError)> where L::Target: Logger { if self.context.is_outbound() { - return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned()))); + return Err((self, ChannelError::close("Received funding_created for an outbound channel?".to_owned()))); } - if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) { + if !matches!( + self.context.channel_state, ChannelState::NegotiatingFunding(flags) + if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT) + ) { // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT // remember the channel, so it's safe to just send an error_message here and drop the // channel. - return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned()))); + return Err((self, ChannelError::close("Received funding_created after we got the channel!".to_owned()))); } if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) || self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER || - self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { + self.context.holder_commitment_point.transaction_number() != INITIAL_COMMITMENT_NUMBER { panic!("Should not have advanced channel commitment tx numbers prior to funding_created"); } @@ -6867,20 +8137,27 @@ impl InboundV1Channel where SP::Target: SignerProvider { ); if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) { - return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned()))); + return Err((self, ChannelError::close("Failed to validate our commitment".to_owned()))); } // Now that we're past error-generating stuff, update our local state: - self.context.channel_state = ChannelState::FundingSent as u32; - self.context.channel_id = funding_txo.to_channel_id(); + self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()); + self.context.channel_id = ChannelId::v1_from_funding_outpoint(funding_txo); self.context.cur_counterparty_commitment_transaction_number -= 1; - self.context.cur_holder_commitment_transaction_number -= 1; + if self.context.holder_commitment_point.advance(&self.context.holder_signer, &self.context.secp_ctx, logger).is_err() { + // We only fail to advance our commitment point/number if we're currently + // waiting for our signer to unblock and provide a commitment point. + // We cannot send accept_channel before this has occurred, so if we + // err here by the time we receive funding_created, something has gone wrong. + debug_assert!(false, "We should be ready to advance our commitment point by the time we receive funding_created"); + return Err((self, ChannelError::close("Failed to advance holder commitment point".to_owned()))); + } let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger); let funding_redeemscript = self.context.get_funding_redeemscript(); - let funding_txo_script = funding_redeemscript.to_v0_p2wsh(); + let funding_txo_script = funding_redeemscript.to_p2wsh(); let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound()); let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner()); let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id); @@ -6891,8 +8168,7 @@ impl InboundV1Channel where SP::Target: SignerProvider { &self.context.channel_transaction_parameters, funding_redeemscript.clone(), self.context.channel_value_satoshis, obscure_factor, - holder_commitment_tx, best_block, self.context.counterparty_node_id); - + holder_commitment_tx, best_block, self.context.counterparty_node_id, self.context.channel_id()); channel_monitor.provide_initial_counterparty_commitment_tx( counterparty_initial_commitment_tx.trust().txid(), Vec::new(), self.context.cur_counterparty_commitment_transaction_number + 1, @@ -6907,18 +8183,329 @@ impl InboundV1Channel where SP::Target: SignerProvider { // `ChannelMonitor`. let mut channel = Channel { context: self.context, + #[cfg(any(dual_funding, splicing))] + dual_funding_channel_context: None, }; - let need_channel_ready = channel.check_get_channel_ready(0).is_some(); + let need_channel_ready = channel.check_get_channel_ready(0, logger).is_some(); channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new()); Ok((channel, funding_signed, channel_monitor)) } } -const SERIALIZATION_VERSION: u8 = 3; +// A not-yet-funded outbound (from holder) channel using V2 channel establishment. +#[cfg(any(dual_funding, splicing))] +pub(super) struct OutboundV2Channel where SP::Target: SignerProvider { + pub context: ChannelContext, + pub unfunded_context: UnfundedChannelContext, + #[cfg(any(dual_funding, splicing))] + pub dual_funding_context: DualFundingChannelContext, +} + +#[cfg(any(dual_funding, splicing))] +impl OutboundV2Channel where SP::Target: SignerProvider { + pub fn new( + fee_estimator: &LowerBoundedFeeEstimator, entropy_source: &ES, signer_provider: &SP, + counterparty_node_id: PublicKey, their_features: &InitFeatures, funding_satoshis: u64, + user_id: u128, config: &UserConfig, current_chain_height: u32, outbound_scid_alias: u64, + funding_confirmation_target: ConfirmationTarget, logger: L, + ) -> Result, APIError> + where ES::Target: EntropySource, + F::Target: FeeEstimator, + L::Target: Logger, + { + let channel_keys_id = signer_provider.generate_channel_keys_id(false, funding_satoshis, user_id); + let holder_signer = signer_provider.derive_channel_signer(funding_satoshis, channel_keys_id); + let pubkeys = holder_signer.pubkeys().clone(); + + let temporary_channel_id = Some(ChannelId::temporary_v2_from_revocation_basepoint(&pubkeys.revocation_basepoint)); + + let holder_selected_channel_reserve_satoshis = get_v2_channel_reserve_satoshis( + funding_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS); + + let funding_feerate_sat_per_1000_weight = fee_estimator.bounded_sat_per_1000_weight(funding_confirmation_target); + let funding_tx_locktime = current_chain_height; + + let chan = Self { + context: ChannelContext::new_for_outbound_channel( + fee_estimator, + entropy_source, + signer_provider, + counterparty_node_id, + their_features, + funding_satoshis, + 0, + user_id, + config, + current_chain_height, + outbound_scid_alias, + temporary_channel_id, + holder_selected_channel_reserve_satoshis, + channel_keys_id, + holder_signer, + pubkeys, + logger, + )?, + unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }, + dual_funding_context: DualFundingChannelContext { + our_funding_satoshis: funding_satoshis, + their_funding_satoshis: 0, + funding_tx_locktime, + funding_feerate_sat_per_1000_weight, + } + }; + Ok(chan) + } + + /// If we receive an error message, it may only be a rejection of the channel type we tried, + /// not of our ability to open any channel at all. Thus, on error, we should first call this + /// and see if we get a new `OpenChannelV2` message, otherwise the channel is failed. + pub(crate) fn maybe_handle_error_without_close( + &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator + ) -> Result + where + F::Target: FeeEstimator + { + self.context.maybe_downgrade_channel_features(fee_estimator)?; + Ok(self.get_open_channel_v2(chain_hash)) + } + + pub fn get_open_channel_v2(&self, chain_hash: ChainHash) -> msgs::OpenChannelV2 { + if self.context.have_received_message() { + debug_assert!(false, "Cannot generate an open_channel2 after we've moved forward"); + } + + if self.context.holder_commitment_point.transaction_number() != INITIAL_COMMITMENT_NUMBER { + debug_assert!(false, "Tried to send an open_channel2 for a channel that has already advanced"); + } + + let first_per_commitment_point = self.context.holder_signer.as_ref() + .get_per_commitment_point(self.context.holder_commitment_point.transaction_number(), + &self.context.secp_ctx) + .expect("TODO: async signing is not yet supported for commitment points in v2 channel establishment"); + let second_per_commitment_point = self.context.holder_signer.as_ref() + .get_per_commitment_point(self.context.holder_commitment_point.transaction_number() - 1, + &self.context.secp_ctx) + .expect("TODO: async signing is not yet supported for commitment points in v2 channel establishment"); + let keys = self.context.get_holder_pubkeys(); + + msgs::OpenChannelV2 { + common_fields: msgs::CommonOpenChannelFields { + chain_hash, + temporary_channel_id: self.context.temporary_channel_id.unwrap(), + funding_satoshis: self.context.channel_value_satoshis, + dust_limit_satoshis: self.context.holder_dust_limit_satoshis, + max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat, + htlc_minimum_msat: self.context.holder_htlc_minimum_msat, + commitment_feerate_sat_per_1000_weight: self.context.feerate_per_kw, + to_self_delay: self.context.get_holder_selected_contest_delay(), + max_accepted_htlcs: self.context.holder_max_accepted_htlcs, + funding_pubkey: keys.funding_pubkey, + revocation_basepoint: keys.revocation_basepoint.to_public_key(), + payment_basepoint: keys.payment_point, + delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(), + htlc_basepoint: keys.htlc_basepoint.to_public_key(), + first_per_commitment_point, + channel_flags: if self.context.config.announced_channel {1} else {0}, + shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey { + Some(script) => script.clone().into_inner(), + None => Builder::new().into_script(), + }), + channel_type: Some(self.context.channel_type.clone()), + }, + funding_feerate_sat_per_1000_weight: self.context.feerate_per_kw, + second_per_commitment_point, + locktime: self.dual_funding_context.funding_tx_locktime, + require_confirmed_inputs: None, + } + } +} + +// A not-yet-funded inbound (from counterparty) channel using V2 channel establishment. +#[cfg(any(dual_funding, splicing))] +pub(super) struct InboundV2Channel where SP::Target: SignerProvider { + pub context: ChannelContext, + pub unfunded_context: UnfundedChannelContext, + pub dual_funding_context: DualFundingChannelContext, +} + +#[cfg(any(dual_funding, splicing))] +impl InboundV2Channel where SP::Target: SignerProvider { + /// Creates a new dual-funded channel from a remote side's request for one. + /// Assumes chain_hash has already been checked and corresponds with what we expect! + pub fn new( + fee_estimator: &LowerBoundedFeeEstimator, entropy_source: &ES, signer_provider: &SP, + counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures, + their_features: &InitFeatures, msg: &msgs::OpenChannelV2, funding_satoshis: u64, user_id: u128, + config: &UserConfig, current_chain_height: u32, logger: &L, + ) -> Result, ChannelError> + where ES::Target: EntropySource, + F::Target: FeeEstimator, + L::Target: Logger, + { + let channel_value_satoshis = funding_satoshis.saturating_add(msg.common_fields.funding_satoshis); + let counterparty_selected_channel_reserve_satoshis = get_v2_channel_reserve_satoshis( + channel_value_satoshis, msg.common_fields.dust_limit_satoshis); + let holder_selected_channel_reserve_satoshis = get_v2_channel_reserve_satoshis( + channel_value_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS); + + // First check the channel type is known, failing before we do anything else if we don't + // support this channel type. + if msg.common_fields.channel_type.is_none() { + return Err(ChannelError::close(format!("Rejecting V2 channel {} missing channel_type", + msg.common_fields.temporary_channel_id))) + } + let channel_type = channel_type_from_open_channel(&msg.common_fields, their_features, our_supported_features)?; + + let counterparty_pubkeys = ChannelPublicKeys { + funding_pubkey: msg.common_fields.funding_pubkey, + revocation_basepoint: RevocationBasepoint(msg.common_fields.revocation_basepoint), + payment_point: msg.common_fields.payment_basepoint, + delayed_payment_basepoint: DelayedPaymentBasepoint(msg.common_fields.delayed_payment_basepoint), + htlc_basepoint: HtlcBasepoint(msg.common_fields.htlc_basepoint) + }; + + let mut context = ChannelContext::new_for_inbound_channel( + fee_estimator, + entropy_source, + signer_provider, + counterparty_node_id, + their_features, + user_id, + config, + current_chain_height, + logger, + false, + + funding_satoshis, + + counterparty_pubkeys, + channel_type, + holder_selected_channel_reserve_satoshis, + counterparty_selected_channel_reserve_satoshis, + 0 /* push_msat not used in dual-funding */, + msg.common_fields.clone(), + )?; + let channel_id = ChannelId::v2_from_revocation_basepoints( + &context.get_holder_pubkeys().revocation_basepoint, + &context.get_counterparty_pubkeys().revocation_basepoint); + context.channel_id = channel_id; + + let chan = Self { + context, + unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }, + dual_funding_context: DualFundingChannelContext { + our_funding_satoshis: funding_satoshis, + their_funding_satoshis: msg.common_fields.funding_satoshis, + funding_tx_locktime: msg.locktime, + funding_feerate_sat_per_1000_weight: msg.funding_feerate_sat_per_1000_weight, + } + }; + + Ok(chan) + } + + /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannelV2`] message which + /// should be sent back to the counterparty node. + /// + /// [`msgs::AcceptChannelV2`]: crate::ln::msgs::AcceptChannelV2 + pub fn accept_inbound_dual_funded_channel(&mut self) -> msgs::AcceptChannelV2 { + if self.context.is_outbound() { + debug_assert!(false, "Tried to send accept_channel for an outbound channel?"); + } + if !matches!( + self.context.channel_state, ChannelState::NegotiatingFunding(flags) + if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT) + ) { + debug_assert!(false, "Tried to send accept_channel2 after channel had moved forward"); + } + if self.context.holder_commitment_point.transaction_number() != INITIAL_COMMITMENT_NUMBER { + debug_assert!(false, "Tried to send an accept_channel2 for a channel that has already advanced"); + } + + self.generate_accept_channel_v2_message() + } + + /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an + /// inbound channel. If the intention is to accept an inbound channel, use + /// [`InboundV1Channel::accept_inbound_channel`] instead. + /// + /// [`msgs::AcceptChannelV2`]: crate::ln::msgs::AcceptChannelV2 + fn generate_accept_channel_v2_message(&self) -> msgs::AcceptChannelV2 { + let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point( + self.context.holder_commitment_point.transaction_number(), &self.context.secp_ctx) + .expect("TODO: async signing is not yet supported for commitment points in v2 channel establishment"); + let second_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point( + self.context.holder_commitment_point.transaction_number() - 1, &self.context.secp_ctx) + .expect("TODO: async signing is not yet supported for commitment points in v2 channel establishment"); + let keys = self.context.get_holder_pubkeys(); + + msgs::AcceptChannelV2 { + common_fields: msgs::CommonAcceptChannelFields { + temporary_channel_id: self.context.temporary_channel_id.unwrap(), + dust_limit_satoshis: self.context.holder_dust_limit_satoshis, + max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat, + htlc_minimum_msat: self.context.holder_htlc_minimum_msat, + minimum_depth: self.context.minimum_depth.unwrap(), + to_self_delay: self.context.get_holder_selected_contest_delay(), + max_accepted_htlcs: self.context.holder_max_accepted_htlcs, + funding_pubkey: keys.funding_pubkey, + revocation_basepoint: keys.revocation_basepoint.to_public_key(), + payment_basepoint: keys.payment_point, + delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(), + htlc_basepoint: keys.htlc_basepoint.to_public_key(), + first_per_commitment_point, + shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey { + Some(script) => script.clone().into_inner(), + None => Builder::new().into_script(), + }), + channel_type: Some(self.context.channel_type.clone()), + }, + funding_satoshis: self.dual_funding_context.our_funding_satoshis, + second_per_commitment_point, + require_confirmed_inputs: None, + } + } + + /// Enables the possibility for tests to extract a [`msgs::AcceptChannelV2`] message for an + /// inbound channel without accepting it. + /// + /// [`msgs::AcceptChannelV2`]: crate::ln::msgs::AcceptChannelV2 + #[cfg(test)] + pub fn get_accept_channel_v2_message(&self) -> msgs::AcceptChannelV2 { + self.generate_accept_channel_v2_message() + } +} + +// Unfunded channel utilities + +fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures { + // The default channel type (ie the first one we try) depends on whether the channel is + // public - if it is, we just go with `only_static_remotekey` as it's the only option + // available. If it's private, we first try `scid_privacy` as it provides better privacy + // with no other changes, and fall back to `only_static_remotekey`. + let mut ret = ChannelTypeFeatures::only_static_remote_key(); + if !config.channel_handshake_config.announced_channel && + config.channel_handshake_config.negotiate_scid_privacy && + their_features.supports_scid_privacy() { + ret.set_scid_privacy_required(); + } + + // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we + // set it now. If they don't understand it, we'll fall back to our default of + // `only_static_remotekey`. + if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx && + their_features.supports_anchors_zero_fee_htlc_tx() { + ret.set_anchors_zero_fee_htlc_tx_required(); + } + + ret +} + +const SERIALIZATION_VERSION: u8 = 4; const MIN_SERIALIZATION_VERSION: u8 = 3; -impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,; +impl_writeable_tlv_based_enum_legacy!(InboundHTLCRemovalReason,; (0, FailRelay), (1, FailMalformed), (2, Fulfill), @@ -6977,7 +8564,18 @@ impl Writeable for Channel where SP::Target: SignerProvider { // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been // called. - write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION); + let version_to_write = if self.context.pending_inbound_htlcs.iter().any(|htlc| match htlc.state { + InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_resolution)| + InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_resolution) => { + matches!(htlc_resolution, InboundHTLCResolution::Pending { .. }) + }, + _ => false, + }) { + SERIALIZATION_VERSION + } else { + MIN_SERIALIZATION_VERSION + }; + write_ver_prefix!(writer, version_to_write, MIN_SERIALIZATION_VERSION); // `user_id` used to be a single u64 value. In order to remain backwards compatible with // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write @@ -6991,7 +8589,15 @@ impl Writeable for Channel where SP::Target: SignerProvider { writer.write_all(&[0; 8])?; self.context.channel_id.write(writer)?; - (self.context.channel_state | ChannelState::PeerDisconnected as u32).write(writer)?; + { + let mut channel_state = self.context.channel_state; + if matches!(channel_state, ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)) { + channel_state.set_peer_disconnected(); + } else { + debug_assert!(false, "Pre-funded/shutdown channels should not be written"); + } + channel_state.to_u32().write(writer)?; + } self.context.channel_value_satoshis.write(writer)?; self.context.latest_monitor_update_id.write(writer)?; @@ -7004,7 +8610,7 @@ impl Writeable for Channel where SP::Target: SignerProvider { } self.context.destination_script.write(writer)?; - self.context.cur_holder_commitment_transaction_number.write(writer)?; + self.context.holder_commitment_point.transaction_number().write(writer)?; self.context.cur_counterparty_commitment_transaction_number.write(writer)?; self.context.value_to_self_msat.write(writer)?; @@ -7025,13 +8631,29 @@ impl Writeable for Channel where SP::Target: SignerProvider { htlc.payment_hash.write(writer)?; match &htlc.state { &InboundHTLCState::RemoteAnnounced(_) => unreachable!(), - &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => { + &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_resolution) => { 1u8.write(writer)?; - htlc_state.write(writer)?; + if version_to_write <= 3 { + if let InboundHTLCResolution::Resolved { pending_htlc_status } = htlc_resolution { + pending_htlc_status.write(writer)?; + } else { + panic!(); + } + } else { + htlc_resolution.write(writer)?; + } }, - &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => { + &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_resolution) => { 2u8.write(writer)?; - htlc_state.write(writer)?; + if version_to_write <= 3 { + if let InboundHTLCResolution::Resolved { pending_htlc_status } = htlc_resolution { + pending_htlc_status.write(writer)?; + } else { + panic!(); + } + } else { + htlc_resolution.write(writer)?; + } }, &InboundHTLCState::Committed => { 3u8.write(writer)?; @@ -7045,9 +8667,10 @@ impl Writeable for Channel where SP::Target: SignerProvider { let mut preimages: Vec<&Option> = vec![]; let mut pending_outbound_skimmed_fees: Vec> = Vec::new(); + let mut pending_outbound_blinding_points: Vec> = Vec::new(); (self.context.pending_outbound_htlcs.len() as u64).write(writer)?; - for (idx, htlc) in self.context.pending_outbound_htlcs.iter().enumerate() { + for htlc in self.context.pending_outbound_htlcs.iter() { htlc.htlc_id.write(writer)?; htlc.amount_msat.write(writer)?; htlc.cltv_expiry.write(writer)?; @@ -7083,23 +8706,20 @@ impl Writeable for Channel where SP::Target: SignerProvider { reason.write(writer)?; } } - if let Some(skimmed_fee) = htlc.skimmed_fee_msat { - if pending_outbound_skimmed_fees.is_empty() { - for _ in 0..idx { pending_outbound_skimmed_fees.push(None); } - } - pending_outbound_skimmed_fees.push(Some(skimmed_fee)); - } else if !pending_outbound_skimmed_fees.is_empty() { - pending_outbound_skimmed_fees.push(None); - } + pending_outbound_skimmed_fees.push(htlc.skimmed_fee_msat); + pending_outbound_blinding_points.push(htlc.blinding_point); } let mut holding_cell_skimmed_fees: Vec> = Vec::new(); + let mut holding_cell_blinding_points: Vec> = Vec::new(); + // Vec of (htlc_id, failure_code, sha256_of_onion) + let mut malformed_htlcs: Vec<(u64, u16, [u8; 32])> = Vec::new(); (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?; - for (idx, update) in self.context.holding_cell_htlc_updates.iter().enumerate() { + for update in self.context.holding_cell_htlc_updates.iter() { match update { &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet, - skimmed_fee_msat, + blinding_point, skimmed_fee_msat, } => { 0u8.write(writer)?; amount_msat.write(writer)?; @@ -7108,12 +8728,8 @@ impl Writeable for Channel where SP::Target: SignerProvider { source.write(writer)?; onion_routing_packet.write(writer)?; - if let Some(skimmed_fee) = skimmed_fee_msat { - if holding_cell_skimmed_fees.is_empty() { - for _ in 0..idx { holding_cell_skimmed_fees.push(None); } - } - holding_cell_skimmed_fees.push(Some(skimmed_fee)); - } else if !holding_cell_skimmed_fees.is_empty() { holding_cell_skimmed_fees.push(None); } + holding_cell_skimmed_fees.push(skimmed_fee_msat); + holding_cell_blinding_points.push(blinding_point); }, &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => { 1u8.write(writer)?; @@ -7125,6 +8741,18 @@ impl Writeable for Channel where SP::Target: SignerProvider { htlc_id.write(writer)?; err_packet.write(writer)?; } + &HTLCUpdateAwaitingACK::FailMalformedHTLC { + htlc_id, failure_code, sha256_of_onion + } => { + // We don't want to break downgrading by adding a new variant, so write a dummy + // `::FailHTLC` variant and write the real malformed error as an optional TLV. + malformed_htlcs.push((htlc_id, failure_code, sha256_of_onion)); + + let dummy_err_packet = msgs::OnionErrorPacket { data: Vec::new() }; + 2u8.write(writer)?; + htlc_id.write(writer)?; + dummy_err_packet.write(writer)?; + } } } @@ -7251,6 +8879,15 @@ impl Writeable for Channel where SP::Target: SignerProvider { let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) }; + let mut monitor_pending_update_adds = None; + if !self.context.monitor_pending_update_adds.is_empty() { + monitor_pending_update_adds = Some(&self.context.monitor_pending_update_adds); + } + + // `current_point` will become optional when async signing is implemented. + let cur_holder_commitment_point = Some(self.context.holder_commitment_point.current_point()); + let next_holder_commitment_point = self.context.holder_commitment_point.next_point(); + write_tlv_fields!(writer, { (0, self.context.announcement_sigs, option), // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a @@ -7268,6 +8905,7 @@ impl Writeable for Channel where SP::Target: SignerProvider { (7, self.context.shutdown_scriptpubkey, option), (8, self.context.blocked_monitor_updates, optional_vec), (9, self.context.target_closing_feerate_sats_per_kw, option), + (10, monitor_pending_update_adds, option), // Added in 0.0.122 (11, self.context.monitor_pending_finalized_fulfills, required_vec), (13, self.context.channel_creation_height, required), (15, preimages, required_vec), @@ -7283,6 +8921,12 @@ impl Writeable for Channel where SP::Target: SignerProvider { (35, pending_outbound_skimmed_fees, optional_vec), (37, holding_cell_skimmed_fees, optional_vec), (38, self.context.is_batch_funding, option), + (39, pending_outbound_blinding_points, optional_vec), + (41, holding_cell_blinding_points, optional_vec), + (43, malformed_htlcs, optional_vec), // Added in 0.0.119 + (45, cur_holder_commitment_point, option), + (47, next_holder_commitment_point, option), + (49, self.context.local_initiated_shutdown, option), // Added in 0.0.122 }); Ok(()) @@ -7317,7 +8961,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch } let channel_id = Readable::read(reader)?; - let channel_state = Readable::read(reader)?; + let channel_state = ChannelState::from_u32(Readable::read(reader)?).map_err(|_| DecodeError::InvalidValue)?; let channel_value_satoshis = Readable::read(reader)?; let latest_monitor_update_id = Readable::read(reader)?; @@ -7358,8 +9002,22 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch cltv_expiry: Readable::read(reader)?, payment_hash: Readable::read(reader)?, state: match ::read(reader)? { - 1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?), - 2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?), + 1 => { + let resolution = if ver <= 3 { + InboundHTLCResolution::Resolved { pending_htlc_status: Readable::read(reader)? } + } else { + Readable::read(reader)? + }; + InboundHTLCState::AwaitingRemoteRevokeToAnnounce(resolution) + }, + 2 => { + let resolution = if ver <= 3 { + InboundHTLCResolution::Resolved { pending_htlc_status: Readable::read(reader)? } + } else { + Readable::read(reader)? + }; + InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution) + }, 3 => InboundHTLCState::Committed, 4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?), _ => return Err(DecodeError::InvalidValue), @@ -7394,6 +9052,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch _ => return Err(DecodeError::InvalidValue), }, skimmed_fee_msat: None, + blinding_point: None, }); } @@ -7408,6 +9067,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch source: Readable::read(reader)?, onion_routing_packet: Readable::read(reader)?, skimmed_fee_msat: None, + blinding_point: None, }, 1 => HTLCUpdateAwaitingACK::ClaimHTLC { payment_preimage: Readable::read(reader)?, @@ -7518,7 +9178,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch let channel_update_status = Readable::read(reader)?; #[cfg(any(test, fuzzing))] - let mut historical_inbound_htlc_fulfills = HashSet::new(); + let mut historical_inbound_htlc_fulfills = new_hash_set(); #[cfg(any(test, fuzzing))] { let htlc_fulfills_len: u64 = Readable::read(reader)?; @@ -7568,6 +9228,17 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch let mut is_batch_funding: Option<()> = None; + let mut local_initiated_shutdown: Option<()> = None; + + let mut pending_outbound_blinding_points_opt: Option>> = None; + let mut holding_cell_blinding_points_opt: Option>> = None; + + let mut malformed_htlcs: Option> = None; + let mut monitor_pending_update_adds: Option> = None; + + let mut cur_holder_commitment_point_opt: Option = None; + let mut next_holder_commitment_point_opt: Option = None; + read_tlv_fields!(reader, { (0, announcement_sigs, option), (1, minimum_depth, option), @@ -7579,6 +9250,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch (7, shutdown_scriptpubkey, option), (8, blocked_monitor_updates, optional_vec), (9, target_closing_feerate_sats_per_kw, option), + (10, monitor_pending_update_adds, option), // Added in 0.0.122 (11, monitor_pending_finalized_fulfills, optional_vec), (13, channel_creation_height, option), (15, preimages_opt, optional_vec), @@ -7594,14 +9266,19 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch (35, pending_outbound_skimmed_fees_opt, optional_vec), (37, holding_cell_skimmed_fees_opt, optional_vec), (38, is_batch_funding, option), + (39, pending_outbound_blinding_points_opt, optional_vec), + (41, holding_cell_blinding_points_opt, optional_vec), + (43, malformed_htlcs, optional_vec), // Added in 0.0.119 + (45, cur_holder_commitment_point_opt, option), + (47, next_holder_commitment_point_opt, option), + (49, local_initiated_shutdown, option), }); let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id { let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id); // If we've gotten to the funding stage of the channel, populate the signer with its // required channel parameters. - let non_shutdown_state = channel_state & (!MULTI_STATE_FLAGS); - if non_shutdown_state & !STATE_FLAGS >= (ChannelState::FundingCreated as u32) { + if channel_state >= ChannelState::FundingNegotiated { holder_signer.provide_channel_parameters(&channel_parameters); } (channel_keys_id, holder_signer) @@ -7670,6 +9347,60 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch // We expect all skimmed fees to be consumed above if iter.next().is_some() { return Err(DecodeError::InvalidValue) } } + if let Some(blinding_pts) = pending_outbound_blinding_points_opt { + let mut iter = blinding_pts.into_iter(); + for htlc in pending_outbound_htlcs.iter_mut() { + htlc.blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?; + } + // We expect all blinding points to be consumed above + if iter.next().is_some() { return Err(DecodeError::InvalidValue) } + } + if let Some(blinding_pts) = holding_cell_blinding_points_opt { + let mut iter = blinding_pts.into_iter(); + for htlc in holding_cell_htlc_updates.iter_mut() { + if let HTLCUpdateAwaitingACK::AddHTLC { ref mut blinding_point, .. } = htlc { + *blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?; + } + } + // We expect all blinding points to be consumed above + if iter.next().is_some() { return Err(DecodeError::InvalidValue) } + } + + if let Some(malformed_htlcs) = malformed_htlcs { + for (malformed_htlc_id, failure_code, sha256_of_onion) in malformed_htlcs { + let htlc_idx = holding_cell_htlc_updates.iter().position(|htlc| { + if let HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet } = htlc { + let matches = *htlc_id == malformed_htlc_id; + if matches { debug_assert!(err_packet.data.is_empty()) } + matches + } else { false } + }).ok_or(DecodeError::InvalidValue)?; + let malformed_htlc = HTLCUpdateAwaitingACK::FailMalformedHTLC { + htlc_id: malformed_htlc_id, failure_code, sha256_of_onion + }; + let _ = core::mem::replace(&mut holding_cell_htlc_updates[htlc_idx], malformed_htlc); + } + } + + // If we're restoring this channel for the first time after an upgrade, then we require that the + // signer be available so that we can immediately populate the current commitment point. Channel + // restoration will fail if this is not possible. + let holder_commitment_point = match (cur_holder_commitment_point_opt, next_holder_commitment_point_opt) { + (Some(current), Some(next)) => HolderCommitmentPoint::Available { + transaction_number: cur_holder_commitment_transaction_number, current, next + }, + (Some(current), _) => HolderCommitmentPoint::PendingNext { + transaction_number: cur_holder_commitment_transaction_number, current, + }, + (_, _) => { + // TODO(async_signing): remove this expect with the Uninitialized variant + let current = holder_signer.get_per_commitment_point(cur_holder_commitment_transaction_number, &secp_ctx) + .expect("Must be able to derive the current commitment point upon channel restoration"); + HolderCommitmentPoint::PendingNext { + transaction_number: cur_holder_commitment_transaction_number, current, + } + }, + }; Ok(Channel { context: ChannelContext { @@ -7696,7 +9427,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch shutdown_scriptpubkey, destination_script, - cur_holder_commitment_transaction_number, + holder_commitment_point, cur_counterparty_commitment_transaction_number, value_to_self_msat, @@ -7713,7 +9444,9 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch monitor_pending_forwards, monitor_pending_failures, monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(), + monitor_pending_update_adds: monitor_pending_update_adds.unwrap_or(Vec::new()), + signer_pending_revoke_and_ack: false, signer_pending_commitment_update: false, signer_pending_funding: false, @@ -7791,8 +9524,12 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch channel_type: channel_type.unwrap(), channel_keys_id, + local_initiated_shutdown, + blocked_monitor_updates: blocked_monitor_updates.unwrap(), - } + }, + #[cfg(any(dual_funding, splicing))] + dual_funding_channel_context: None, }) } } @@ -7800,18 +9537,21 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch #[cfg(test)] mod tests { use std::cmp; + use bitcoin::amount::Amount; use bitcoin::blockdata::constants::ChainHash; use bitcoin::blockdata::script::{ScriptBuf, Builder}; - use bitcoin::blockdata::transaction::{Transaction, TxOut}; + use bitcoin::blockdata::transaction::{Transaction, TxOut, Version}; use bitcoin::blockdata::opcodes; - use bitcoin::network::constants::Network; - use crate::ln::PaymentHash; + use bitcoin::network::Network; + use crate::ln::onion_utils::INVALID_ONION_BLINDING; + use crate::ln::types::{PaymentHash, PaymentPreimage}; use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint}; -use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; + use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; use crate::ln::channel::InitFeatures; - use crate::ln::channel::{ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, commit_tx_fee_msat}; + use crate::ln::channel::{AwaitingChannelReadyFlags, Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, HTLCUpdateAwaitingACK, commit_tx_fee_msat}; use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS}; - use crate::ln::features::ChannelTypeFeatures; + use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures}; + use crate::ln::msgs; use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT}; use crate::ln::script::ShutdownScript; use crate::ln::chan_utils::{self, htlc_success_tx_weight, htlc_timeout_tx_weight}; @@ -7819,9 +9559,10 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget}; use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider}; use crate::chain::transaction::OutPoint; - use crate::routing::router::Path; + use crate::routing::router::{Path, RouteHop}; use crate::util::config::UserConfig; use crate::util::errors::APIError; + use crate::util::ser::{ReadableArgs, Writeable}; use crate::util::test_utils; use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface}; use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature}; @@ -7830,11 +9571,22 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::Hash; use bitcoin::hashes::hex::FromHex; - use bitcoin::hash_types::WPubkeyHash; use bitcoin::blockdata::locktime::absolute::LockTime; - use bitcoin::address::{WitnessProgram, WitnessVersion}; + use bitcoin::{WitnessProgram, WitnessVersion, WPubkeyHash}; use crate::prelude::*; + #[test] + fn test_channel_state_order() { + use crate::ln::channel::NegotiatingFundingFlags; + use crate::ln::channel::AwaitingChannelReadyFlags; + use crate::ln::channel::ChannelReadyFlags; + + assert!(ChannelState::NegotiatingFunding(NegotiatingFundingFlags::new()) < ChannelState::FundingNegotiated); + assert!(ChannelState::FundingNegotiated < ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new())); + assert!(ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()) < ChannelState::ChannelReady(ChannelReadyFlags::new())); + assert!(ChannelState::ChannelReady(ChannelReadyFlags::new()) < ChannelState::ShutdownComplete); + } + struct TestFeeEstimator { fee_est: u32 } @@ -7860,19 +9612,21 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; } impl SignerProvider for Keys { - type Signer = InMemorySigner; + type EcdsaSigner = InMemorySigner; + #[cfg(taproot)] + type TaprootSigner = InMemorySigner; fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] { self.signer.channel_keys_id() } - fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::Signer { + fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::EcdsaSigner { self.signer.clone() } - fn read_chan_signer(&self, _data: &[u8]) -> Result { panic!(); } + fn read_chan_signer(&self, _data: &[u8]) -> Result { panic!(); } - fn get_destination_script(&self) -> Result { + fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result { let secp_ctx = Secp256k1::signing_only(); let channel_monitor_claim_key = SecretKey::from_slice(&>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(); let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize()); @@ -7904,11 +9658,12 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; keys_provider.expect(OnGetShutdownScriptpubkey { returns: non_v0_segwit_shutdown_script.clone(), }); + let logger = test_utils::TestLogger::new(); let secp_ctx = Secp256k1::new(); let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42, None) { + match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42, None, &logger) { Err(APIError::IncompatibleShutdownScript { script }) => { assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner()); }, @@ -7928,16 +9683,17 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; let seed = [42; 32]; let network = Network::Testnet; let keys_provider = test_utils::TestKeysInterface::new(&seed, network); + let logger = test_utils::TestLogger::new(); let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap(); + let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None, &logger).unwrap(); // Now change the fee so we can check that the fee in the open_channel message is the // same as the old fee. fee_est.fee_est = 500; let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network)); - assert_eq!(open_channel_msg.feerate_per_kw, original_fee); + assert_eq!(open_channel_msg.common_fields.commitment_feerate_sat_per_1000_weight, original_fee); } #[test] @@ -7958,7 +9714,7 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; // Create Node A's channel pointing to Node B's pubkey let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap(); + let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None, &logger).unwrap(); // Create Node B's channel by receiving Node A's open_channel message // Make sure A's dust limit is as we expect. @@ -7968,21 +9724,22 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; // Node B --> Node A: accept channel, explicitly setting B's dust limit. let mut accept_channel_msg = node_b_chan.accept_inbound_channel(); - accept_channel_msg.dust_limit_satoshis = 546; + accept_channel_msg.common_fields.dust_limit_satoshis = 546; node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap(); node_a_chan.context.holder_dust_limit_satoshis = 1560; // Node A --> Node B: funding created let output_script = node_a_chan.context.get_funding_redeemscript(); - let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut { - value: 10000000, script_pubkey: output_script.clone(), + let tx = Transaction { version: Version::ONE, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut { + value: Amount::from_sat(10000000), script_pubkey: output_script.clone(), }]}; let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 }; - let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap(); + let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap(); let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap(); // Node B --> Node A: funding signed - let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap(); + let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger); + let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); }; // Put some inbound and outbound HTLCs in A's channel. let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's. @@ -8007,6 +9764,7 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; payment_id: PaymentId([42; 32]), }, skimmed_fee_msat: None, + blinding_point: None, }); // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass @@ -8036,10 +9794,11 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; let seed = [42; 32]; let network = Network::Testnet; let keys_provider = test_utils::TestKeysInterface::new(&seed, network); + let logger = test_utils::TestLogger::new(); let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap(); + let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None, &logger).unwrap(); let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type()); let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type()); @@ -8088,7 +9847,7 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; // Create Node A's channel pointing to Node B's pubkey let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap(); + let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None, &logger).unwrap(); // Create Node B's channel by receiving Node A's open_channel message let open_channel_msg = node_a_chan.get_open_channel(chain_hash); @@ -8101,15 +9860,16 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; // Node A --> Node B: funding created let output_script = node_a_chan.context.get_funding_redeemscript(); - let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut { - value: 10000000, script_pubkey: output_script.clone(), + let tx = Transaction { version: Version::ONE, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut { + value: Amount::from_sat(10000000), script_pubkey: output_script.clone(), }]}; let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 }; - let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap(); + let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap(); let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap(); // Node B --> Node A: funding signed - let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap(); + let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger); + let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); }; // Now disconnect the two nodes and check that the commitment point in // Node B's channel_reestablish message is sane. @@ -8151,12 +9911,12 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; // Test that `OutboundV1Channel::new` creates a channel with the correct value for // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value, // which is set to the lower bound + 1 (2%) of the `channel_value`. - let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None).unwrap(); + let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None, &logger).unwrap(); let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000; assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64); // Test with the upper bound - 1 of valid values (99%). - let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None).unwrap(); + let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None, &logger).unwrap(); let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000; assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64); @@ -8176,14 +9936,14 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%) // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1. - let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None).unwrap(); + let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None, &logger).unwrap(); let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000; assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64); // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value // than 100. - let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None).unwrap(); + let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None, &logger).unwrap(); let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000; assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat); @@ -8236,7 +9996,7 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; let mut outbound_node_config = UserConfig::default(); outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32; - let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None).unwrap(); + let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None, &logger).unwrap(); let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64); assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve); @@ -8273,7 +10033,7 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; // Create Node A's channel pointing to Node B's pubkey let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap(); + let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None, &logger).unwrap(); // Create Node B's channel by receiving Node A's open_channel message // Make sure A's dust limit is as we expect. @@ -8283,21 +10043,22 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; // Node B --> Node A: accept channel, explicitly setting B's dust limit. let mut accept_channel_msg = node_b_chan.accept_inbound_channel(); - accept_channel_msg.dust_limit_satoshis = 546; + accept_channel_msg.common_fields.dust_limit_satoshis = 546; node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap(); node_a_chan.context.holder_dust_limit_satoshis = 1560; // Node A --> Node B: funding created let output_script = node_a_chan.context.get_funding_redeemscript(); - let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut { - value: 10000000, script_pubkey: output_script.clone(), + let tx = Transaction { version: Version::ONE, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut { + value: Amount::from_sat(10000000), script_pubkey: output_script.clone(), }]}; let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 }; - let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap(); + let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap(); let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap(); // Node B --> Node A: funding signed - let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap(); + let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger); + let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); }; // Make sure that receiving a channel update will update the Channel as expected. let update = ChannelUpdate { @@ -8305,7 +10066,8 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; chain_hash, short_channel_id: 0, timestamp: 0, - flags: 0, + message_flags: 1, // Only must_be_one + channel_flags: 0, cltv_expiry_delta: 100, htlc_minimum_msat: 5, htlc_maximum_msat: MAX_VALUE_MSAT, @@ -8332,7 +10094,131 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; assert!(!node_a_chan.channel_update(&update).unwrap()); } - #[cfg(feature = "_test_vectors")] + #[test] + fn blinding_point_skimmed_fee_malformed_ser() { + // Ensure that channel blinding points, skimmed fees, and malformed HTLCs are (de)serialized + // properly. + let logger = test_utils::TestLogger::new(); + let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000}); + let secp_ctx = Secp256k1::new(); + let seed = [42; 32]; + let network = Network::Testnet; + let best_block = BestBlock::from_network(network); + let keys_provider = test_utils::TestKeysInterface::new(&seed, network); + + let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); + let config = UserConfig::default(); + let features = channelmanager::provided_init_features(&config); + let mut outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new( + &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None, &logger + ).unwrap(); + let inbound_chan = InboundV1Channel::<&TestKeysInterface>::new( + &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), + &features, &outbound_chan.get_open_channel(ChainHash::using_genesis_block(network)), 7, &config, 0, &&logger, false + ).unwrap(); + outbound_chan.accept_channel(&inbound_chan.get_accept_channel_message(), &config.channel_handshake_limits, &features).unwrap(); + let tx = Transaction { version: Version::ONE, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut { + value: Amount::from_sat(10000000), script_pubkey: outbound_chan.context.get_funding_redeemscript(), + }]}; + let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 }; + let funding_created = outbound_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap().unwrap(); + let mut chan = match inbound_chan.funding_created(&funding_created, best_block, &&keys_provider, &&logger) { + Ok((chan, _, _)) => chan, + Err((_, e)) => panic!("{}", e), + }; + + let dummy_htlc_source = HTLCSource::OutboundRoute { + path: Path { + hops: vec![RouteHop { + pubkey: test_utils::pubkey(2), channel_features: ChannelFeatures::empty(), + node_features: NodeFeatures::empty(), short_channel_id: 0, fee_msat: 0, + cltv_expiry_delta: 0, maybe_announced_channel: false, + }], + blinded_tail: None + }, + session_priv: test_utils::privkey(42), + first_hop_htlc_msat: 0, + payment_id: PaymentId([42; 32]), + }; + let dummy_outbound_output = OutboundHTLCOutput { + htlc_id: 0, + amount_msat: 0, + payment_hash: PaymentHash([43; 32]), + cltv_expiry: 0, + state: OutboundHTLCState::Committed, + source: dummy_htlc_source.clone(), + skimmed_fee_msat: None, + blinding_point: None, + }; + let mut pending_outbound_htlcs = vec![dummy_outbound_output.clone(); 10]; + for (idx, htlc) in pending_outbound_htlcs.iter_mut().enumerate() { + if idx % 2 == 0 { + htlc.blinding_point = Some(test_utils::pubkey(42 + idx as u8)); + } + if idx % 3 == 0 { + htlc.skimmed_fee_msat = Some(1); + } + } + chan.context.pending_outbound_htlcs = pending_outbound_htlcs.clone(); + + let dummy_holding_cell_add_htlc = HTLCUpdateAwaitingACK::AddHTLC { + amount_msat: 0, + cltv_expiry: 0, + payment_hash: PaymentHash([43; 32]), + source: dummy_htlc_source.clone(), + onion_routing_packet: msgs::OnionPacket { + version: 0, + public_key: Ok(test_utils::pubkey(1)), + hop_data: [0; 20*65], + hmac: [0; 32] + }, + skimmed_fee_msat: None, + blinding_point: None, + }; + let dummy_holding_cell_claim_htlc = HTLCUpdateAwaitingACK::ClaimHTLC { + payment_preimage: PaymentPreimage([42; 32]), + htlc_id: 0, + }; + let dummy_holding_cell_failed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailHTLC { + htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42] } + }; + let dummy_holding_cell_malformed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailMalformedHTLC { + htlc_id, failure_code: INVALID_ONION_BLINDING, sha256_of_onion: [0; 32], + }; + let mut holding_cell_htlc_updates = Vec::with_capacity(12); + for i in 0..12 { + if i % 5 == 0 { + holding_cell_htlc_updates.push(dummy_holding_cell_add_htlc.clone()); + } else if i % 5 == 1 { + holding_cell_htlc_updates.push(dummy_holding_cell_claim_htlc.clone()); + } else if i % 5 == 2 { + let mut dummy_add = dummy_holding_cell_add_htlc.clone(); + if let HTLCUpdateAwaitingACK::AddHTLC { + ref mut blinding_point, ref mut skimmed_fee_msat, .. + } = &mut dummy_add { + *blinding_point = Some(test_utils::pubkey(42 + i)); + *skimmed_fee_msat = Some(42); + } else { panic!() } + holding_cell_htlc_updates.push(dummy_add); + } else if i % 5 == 3 { + holding_cell_htlc_updates.push(dummy_holding_cell_malformed_htlc(i as u64)); + } else { + holding_cell_htlc_updates.push(dummy_holding_cell_failed_htlc(i as u64)); + } + } + chan.context.holding_cell_htlc_updates = holding_cell_htlc_updates.clone(); + + // Encode and decode the channel and ensure that the HTLCs within are the same. + let encoded_chan = chan.encode(); + let mut s = crate::io::Cursor::new(&encoded_chan); + let mut reader = crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64); + let features = channelmanager::provided_channel_type_features(&config); + let decoded_chan = Channel::read(&mut reader, (&&keys_provider, &&keys_provider, 0, &features)).unwrap(); + assert_eq!(decoded_chan.context.pending_outbound_htlcs, pending_outbound_htlcs); + assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates); + } + + #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))] #[test] fn outbound_commitment_test() { use bitcoin::sighash; @@ -8341,7 +10227,7 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; use bitcoin::hashes::hex::FromHex; use bitcoin::hash_types::Txid; use bitcoin::secp256k1::Message; - use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, EcdsaChannelSigner}; + use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, ecdsa::EcdsaChannelSigner}; use crate::ln::PaymentPreimage; use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys}; use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint}; @@ -8353,7 +10239,7 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; // Test vectors from BOLT 3 Appendices C and F (anchors): let feeest = TestFeeEstimator{fee_est: 15000}; - let logger : Arc = Arc::new(test_utils::TestLogger::new()); + let logger : Arc = Arc::new(test_utils::TestLogger::new()); let secp_ctx = Secp256k1::new(); let mut signer = InMemorySigner::new( @@ -8378,7 +10264,7 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let mut config = UserConfig::default(); config.channel_handshake_config.announced_channel = false; - let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42, None).unwrap(); // Nothing uses their network key in this test + let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42, None, &*logger).unwrap(); // Nothing uses their network key in this test chan.context.holder_dust_limit_satoshis = 546; chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel @@ -8489,7 +10375,7 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key); let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys); let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All }; - let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap(); + let htlc_sighash = Message::from_digest(sighash::SighashCache::new(&htlc_tx).p2wsh_signature_hash(0, &htlc_redeemscript, htlc.to_bitcoin_amount(), htlc_sighashtype).unwrap().as_raw_hash().to_byte_array()); assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key.to_public_key()).is_ok(), "verify counterparty htlc sig"); let mut preimage: Option = None; @@ -8581,6 +10467,7 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; state: OutboundHTLCState::Committed, source: HTLCSource::dummy(), skimmed_fee_msat: None, + blinding_point: None, }; out.payment_hash.0 = Sha256::hash(&>::from_hex("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).to_byte_array(); out @@ -8594,6 +10481,7 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; state: OutboundHTLCState::Committed, source: HTLCSource::dummy(), skimmed_fee_msat: None, + blinding_point: None, }; out.payment_hash.0 = Sha256::hash(&>::from_hex("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).to_byte_array(); out @@ -9005,6 +10893,7 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; state: OutboundHTLCState::Committed, source: HTLCSource::dummy(), skimmed_fee_msat: None, + blinding_point: None, }; out.payment_hash.0 = Sha256::hash(&>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array(); out @@ -9018,6 +10907,7 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; state: OutboundHTLCState::Committed, source: HTLCSource::dummy(), skimmed_fee_msat: None, + blinding_point: None, }; out.payment_hash.0 = Sha256::hash(&>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array(); out @@ -9084,7 +10974,7 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; assert_eq!(chan_utils::build_commitment_secret(&seed, 1), >::from_hex("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]); } - + #[test] fn test_key_derivation() { // Test vectors from BOLT 3 Appendix E: @@ -9121,13 +11011,13 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, - node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap(); + node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None, &logger).unwrap(); let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key(); channel_type_features.set_zero_conf_required(); let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network)); - open_channel_msg.channel_type = Some(channel_type_features); + open_channel_msg.common_fields.channel_type = Some(channel_type_features); let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap()); let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), @@ -9156,7 +11046,7 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; let channel_a = OutboundV1Channel::<&TestKeysInterface>::new( &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42, - &config, 0, 42, None + &config, 0, 42, None, &logger ).unwrap(); assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx()); @@ -9167,7 +11057,7 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; let channel_a = OutboundV1Channel::<&TestKeysInterface>::new( &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, - None + None, &logger ).unwrap(); let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network)); @@ -9205,12 +11095,12 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; let channel_a = OutboundV1Channel::<&TestKeysInterface>::new( &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, - None + None, &logger ).unwrap(); // Set `channel_type` to `None` to force the implicit feature negotiation. let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network)); - open_channel_msg.channel_type = None; + open_channel_msg.common_fields.channel_type = None; // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts // `static_remote_key`, it will fail the channel. @@ -9252,11 +11142,11 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; let channel_a = OutboundV1Channel::<&TestKeysInterface>::new( &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, - None + None, &logger ).unwrap(); let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network)); - open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone()); + open_channel_msg.common_fields.channel_type = Some(simple_anchors_channel_type.clone()); let res = InboundV1Channel::<&TestKeysInterface>::new( &fee_estimator, &&keys_provider, &&keys_provider, node_id_a, @@ -9271,7 +11161,7 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; // LDK. let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new( &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init, - 10000000, 100000, 42, &config, 0, 42, None + 10000000, 100000, 42, &config, 0, 42, None, &logger ).unwrap(); let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network)); @@ -9283,7 +11173,7 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; ).unwrap(); let mut accept_channel_msg = channel_b.get_accept_channel_message(); - accept_channel_msg.channel_type = Some(simple_anchors_channel_type.clone()); + accept_channel_msg.common_fields.channel_type = Some(simple_anchors_channel_type.clone()); let res = channel_a.accept_channel( &accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init @@ -9321,7 +11211,8 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; &config, 0, 42, - None + None, + &logger ).unwrap(); let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network)); @@ -9351,23 +11242,20 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; // Fund the channel with a batch funding transaction. let output_script = node_a_chan.context.get_funding_redeemscript(); let tx = Transaction { - version: 1, + version: Version::ONE, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![ TxOut { - value: 10000000, script_pubkey: output_script.clone(), + value: Amount::from_sat(10000000), script_pubkey: output_script.clone(), }, TxOut { - value: 10000000, script_pubkey: Builder::new().into_script(), + value: Amount::from_sat(10000000), script_pubkey: Builder::new().into_script(), }, ]}; let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 }; - let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created( - tx.clone(), - funding_outpoint, - true, - &&logger, + let funding_created_msg = node_a_chan.get_funding_created( + tx.clone(), funding_outpoint, true, &&logger, ).map_err(|_| ()).unwrap(); let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created( &funding_created_msg.unwrap(), @@ -9385,12 +11273,10 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; // Receive funding_signed, but the channel will be configured to hold sending channel_ready and // broadcasting the funding transaction until the batch is ready. - let _ = node_a_chan.funding_signed( - &funding_signed_msg.unwrap(), - best_block, - &&keys_provider, - &&logger, - ).unwrap(); + let res = node_a_chan.funding_signed( + &funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger, + ); + let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); }; let node_a_updates = node_a_chan.monitor_updating_restored( &&logger, &&keys_provider, @@ -9402,11 +11288,7 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; // as the funding transaction depends on all channels in the batch becoming ready. assert!(node_a_updates.channel_ready.is_none()); assert!(node_a_updates.funding_broadcastable.is_none()); - assert_eq!( - node_a_chan.context.channel_state, - ChannelState::FundingSent as u32 | - ChannelState::WaitingForBatch as u32, - ); + assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH)); // It is possible to receive a 0conf channel_ready from the remote node. node_a_chan.channel_ready( @@ -9419,18 +11301,12 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; ).unwrap(); assert_eq!( node_a_chan.context.channel_state, - ChannelState::FundingSent as u32 | - ChannelState::WaitingForBatch as u32 | - ChannelState::TheirChannelReady as u32, + ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH | AwaitingChannelReadyFlags::THEIR_CHANNEL_READY) ); // Clear the ChannelState::WaitingForBatch only when called by ChannelManager. node_a_chan.set_batch_ready(); - assert_eq!( - node_a_chan.context.channel_state, - ChannelState::FundingSent as u32 | - ChannelState::TheirChannelReady as u32, - ); - assert!(node_a_chan.check_get_channel_ready(0).is_some()); + assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY)); + assert!(node_a_chan.check_get_channel_ready(0, &&logger).is_some()); } }