use crate::ln::onion_utils::HTLCFailReason;
use crate::chain::BestBlock;
use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
-use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
+use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, WithChannelMonitor, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
use crate::chain::transaction::{OutPoint, TransactionData};
-use crate::sign::{EcdsaChannelSigner, WriteableEcdsaChannelSigner, EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
+use crate::sign::ecdsa::{EcdsaChannelSigner, WriteableEcdsaChannelSigner};
+use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
use crate::events::ClosureReason;
use crate::routing::gossip::NodeId;
use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
-use crate::util::logger::Logger;
+use crate::util::logger::{Logger, Record, WithContext};
use crate::util::errors::APIError;
use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
use crate::util::scid_utils::scid_from_parts;
use crate::sync::Mutex;
use crate::sign::type_resolver::ChannelSignerType;
+use super::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint, RevocationBasepoint};
+
#[cfg(test)]
pub struct ChannelValueStat {
pub value_to_self_msat: u64,
state: InboundHTLCState,
}
+#[cfg_attr(test, derive(Clone, Debug, PartialEq))]
enum OutboundHTLCState {
/// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
/// created it we would have put it in the holding cell instead). When they next revoke_and_ack
}
#[derive(Clone)]
+#[cfg_attr(test, derive(Debug, PartialEq))]
enum OutboundHTLCOutcome {
/// LDK version 0.0.105+ will always fill in the preimage here.
Success(Option<PaymentPreimage>),
}
}
+#[cfg_attr(test, derive(Clone, Debug, PartialEq))]
struct OutboundHTLCOutput {
htlc_id: u64,
amount_msat: u64,
payment_hash: PaymentHash,
state: OutboundHTLCState,
source: HTLCSource,
+ blinding_point: Option<PublicKey>,
skimmed_fee_msat: Option<u64>,
}
/// See AwaitingRemoteRevoke ChannelState for more info
+#[cfg_attr(test, derive(Clone, Debug, PartialEq))]
enum HTLCUpdateAwaitingACK {
AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
// always outbound
onion_routing_packet: msgs::OnionPacket,
// The extra fee we're skimming off the top of this HTLC.
skimmed_fee_msat: Option<u64>,
+ blinding_point: Option<PublicKey>,
},
ClaimHTLC {
payment_preimage: PaymentPreimage,
},
}
-/// There are a few "states" and then a number of flags which can be applied:
-/// We first move through init with `OurInitSent` -> `TheirInitSent` -> `FundingCreated` -> `FundingSent`.
-/// `TheirChannelReady` and `OurChannelReady` then get set on `FundingSent`, and when both are set we
-/// move on to `ChannelReady`.
-/// Note that `PeerDisconnected` can be set on both `ChannelReady` and `FundingSent`.
-/// `ChannelReady` can then get all remaining flags set on it, until we finish shutdown, then we
-/// move on to `ShutdownComplete`, at which point most calls into this channel are disallowed.
+macro_rules! define_state_flags {
+ ($flag_type_doc: expr, $flag_type: ident, [$(($flag_doc: expr, $flag: ident, $value: expr)),+], $extra_flags: expr) => {
+ #[doc = $flag_type_doc]
+ #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
+ struct $flag_type(u32);
+
+ impl $flag_type {
+ $(
+ #[doc = $flag_doc]
+ const $flag: $flag_type = $flag_type($value);
+ )*
+
+ /// All flags that apply to the specified [`ChannelState`] variant.
+ #[allow(unused)]
+ const ALL: $flag_type = Self($(Self::$flag.0 | )* $extra_flags);
+
+ #[allow(unused)]
+ fn new() -> Self { Self(0) }
+
+ #[allow(unused)]
+ fn from_u32(flags: u32) -> Result<Self, ()> {
+ if flags & !Self::ALL.0 != 0 {
+ Err(())
+ } else {
+ Ok($flag_type(flags))
+ }
+ }
+
+ #[allow(unused)]
+ fn is_empty(&self) -> bool { self.0 == 0 }
+
+ #[allow(unused)]
+ fn is_set(&self, flag: Self) -> bool { *self & flag == flag }
+ }
+
+ impl core::ops::Not for $flag_type {
+ type Output = Self;
+ fn not(self) -> Self::Output { Self(!self.0) }
+ }
+ impl core::ops::BitOr for $flag_type {
+ type Output = Self;
+ fn bitor(self, rhs: Self) -> Self::Output { Self(self.0 | rhs.0) }
+ }
+ impl core::ops::BitOrAssign for $flag_type {
+ fn bitor_assign(&mut self, rhs: Self) { self.0 |= rhs.0; }
+ }
+ impl core::ops::BitAnd for $flag_type {
+ type Output = Self;
+ fn bitand(self, rhs: Self) -> Self::Output { Self(self.0 & rhs.0) }
+ }
+ impl core::ops::BitAndAssign for $flag_type {
+ fn bitand_assign(&mut self, rhs: Self) { self.0 &= rhs.0; }
+ }
+ };
+ ($flag_type_doc: expr, $flag_type: ident, $flags: tt) => {
+ define_state_flags!($flag_type_doc, $flag_type, $flags, 0);
+ };
+ ($flag_type_doc: expr, FUNDED_STATE, $flag_type: ident, $flags: tt) => {
+ define_state_flags!($flag_type_doc, $flag_type, $flags, FundedStateFlags::ALL.0);
+ impl core::ops::BitOr<FundedStateFlags> for $flag_type {
+ type Output = Self;
+ fn bitor(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 | rhs.0) }
+ }
+ impl core::ops::BitOrAssign<FundedStateFlags> for $flag_type {
+ fn bitor_assign(&mut self, rhs: FundedStateFlags) { self.0 |= rhs.0; }
+ }
+ impl core::ops::BitAnd<FundedStateFlags> for $flag_type {
+ type Output = Self;
+ fn bitand(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 & rhs.0) }
+ }
+ impl core::ops::BitAndAssign<FundedStateFlags> for $flag_type {
+ fn bitand_assign(&mut self, rhs: FundedStateFlags) { self.0 &= rhs.0; }
+ }
+ impl PartialEq<FundedStateFlags> for $flag_type {
+ fn eq(&self, other: &FundedStateFlags) -> bool { self.0 == other.0 }
+ }
+ impl From<FundedStateFlags> for $flag_type {
+ fn from(flags: FundedStateFlags) -> Self { Self(flags.0) }
+ }
+ };
+}
+
+/// We declare all the states/flags here together to help determine which bits are still available
+/// to choose.
+mod state_flags {
+ pub const OUR_INIT_SENT: u32 = 1 << 0;
+ pub const THEIR_INIT_SENT: u32 = 1 << 1;
+ pub const FUNDING_CREATED: u32 = 1 << 2;
+ pub const FUNDING_SENT: u32 = 1 << 3;
+ pub const THEIR_CHANNEL_READY: u32 = 1 << 4;
+ pub const OUR_CHANNEL_READY: u32 = 1 << 5;
+ pub const CHANNEL_READY: u32 = 1 << 6;
+ pub const PEER_DISCONNECTED: u32 = 1 << 7;
+ pub const MONITOR_UPDATE_IN_PROGRESS: u32 = 1 << 8;
+ pub const AWAITING_REMOTE_REVOKE: u32 = 1 << 9;
+ pub const REMOTE_SHUTDOWN_SENT: u32 = 1 << 10;
+ pub const LOCAL_SHUTDOWN_SENT: u32 = 1 << 11;
+ pub const SHUTDOWN_COMPLETE: u32 = 1 << 12;
+ pub const WAITING_FOR_BATCH: u32 = 1 << 13;
+}
+
+define_state_flags!(
+ "Flags that apply to all [`ChannelState`] variants in which the channel is funded.",
+ FundedStateFlags, [
+ ("Indicates the remote side is considered \"disconnected\" and no updates are allowed \
+ until after we've done a `channel_reestablish` dance.", PEER_DISCONNECTED, state_flags::PEER_DISCONNECTED),
+ ("Indicates the user has told us a `ChannelMonitor` update is pending async persistence \
+ somewhere and we should pause sending any outbound messages until they've managed to \
+ complete it.", MONITOR_UPDATE_IN_PROGRESS, state_flags::MONITOR_UPDATE_IN_PROGRESS),
+ ("Indicates we received a `shutdown` message from the remote end. If set, they may not add \
+ any new HTLCs to the channel, and we are expected to respond with our own `shutdown` \
+ message when possible.", REMOTE_SHUTDOWN_SENT, state_flags::REMOTE_SHUTDOWN_SENT),
+ ("Indicates we sent a `shutdown` message. At this point, we may not add any new HTLCs to \
+ the channel.", LOCAL_SHUTDOWN_SENT, state_flags::LOCAL_SHUTDOWN_SENT)
+ ]
+);
+
+define_state_flags!(
+ "Flags that only apply to [`ChannelState::NegotiatingFunding`].",
+ NegotiatingFundingFlags, [
+ ("Indicates we have (or are prepared to) send our `open_channel`/`accept_channel` message.",
+ OUR_INIT_SENT, state_flags::OUR_INIT_SENT),
+ ("Indicates we have received their `open_channel`/`accept_channel` message.",
+ THEIR_INIT_SENT, state_flags::THEIR_INIT_SENT)
+ ]
+);
+
+define_state_flags!(
+ "Flags that only apply to [`ChannelState::FundingSent`].",
+ FUNDED_STATE, FundingSentFlags, [
+ ("Indicates they sent us a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
+ `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
+ THEIR_CHANNEL_READY, state_flags::THEIR_CHANNEL_READY),
+ ("Indicates we sent them a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
+ `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
+ OUR_CHANNEL_READY, state_flags::OUR_CHANNEL_READY),
+ ("Indicates the channel was funded in a batch and the broadcast of the funding transaction \
+ is being held until all channels in the batch have received `funding_signed` and have \
+ their monitors persisted.", WAITING_FOR_BATCH, state_flags::WAITING_FOR_BATCH)
+ ]
+);
+
+define_state_flags!(
+ "Flags that only apply to [`ChannelState::ChannelReady`].",
+ FUNDED_STATE, ChannelReadyFlags, [
+ ("Indicates that we have sent a `commitment_signed` but are awaiting the responding \
+ `revoke_and_ack` message. During this period, we can't generate new `commitment_signed` \
+ messages as we'd be unable to determine which HTLCs they included in their `revoke_and_ack` \
+ implicit ACK, so instead we have to hold them away temporarily to be sent later.",
+ AWAITING_REMOTE_REVOKE, state_flags::AWAITING_REMOTE_REVOKE)
+ ]
+);
+
+#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
enum ChannelState {
- /// Implies we have (or are prepared to) send our open_channel/accept_channel message
- OurInitSent = 1 << 0,
- /// Implies we have received their `open_channel`/`accept_channel` message
- TheirInitSent = 1 << 1,
- /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to `FundingSent`.
- /// Note that this is nonsense for an inbound channel as we immediately generate `funding_signed`
- /// upon receipt of `funding_created`, so simply skip this state.
- FundingCreated = 4,
- /// Set when we have received/sent `funding_created` and `funding_signed` and are thus now waiting
- /// on the funding transaction to confirm. The `ChannelReady` flags are set to indicate when we
- /// and our counterparty consider the funding transaction confirmed.
- FundingSent = 8,
- /// Flag which can be set on `FundingSent` to indicate they sent us a `channel_ready` message.
- /// Once both `TheirChannelReady` and `OurChannelReady` are set, state moves on to `ChannelReady`.
- TheirChannelReady = 1 << 4,
- /// Flag which can be set on `FundingSent` to indicate we sent them a `channel_ready` message.
- /// Once both `TheirChannelReady` and `OurChannelReady` are set, state moves on to `ChannelReady`.
- OurChannelReady = 1 << 5,
- ChannelReady = 64,
- /// Flag which is set on `ChannelReady` and `FundingSent` indicating remote side is considered
- /// "disconnected" and no updates are allowed until after we've done a `channel_reestablish`
- /// dance.
- PeerDisconnected = 1 << 7,
- /// Flag which is set on `ChannelReady`, FundingCreated, and `FundingSent` indicating the user has
- /// told us a `ChannelMonitor` update is pending async persistence somewhere and we should pause
- /// sending any outbound messages until they've managed to finish.
- MonitorUpdateInProgress = 1 << 8,
- /// Flag which implies that we have sent a commitment_signed but are awaiting the responding
- /// revoke_and_ack message. During this time period, we can't generate new commitment_signed
- /// messages as then we will be unable to determine which HTLCs they included in their
- /// revoke_and_ack implicit ACK, so instead we have to hold them away temporarily to be sent
- /// later.
- /// Flag is set on `ChannelReady`.
- AwaitingRemoteRevoke = 1 << 9,
- /// Flag which is set on `ChannelReady` or `FundingSent` after receiving a shutdown message from
- /// the remote end. If set, they may not add any new HTLCs to the channel, and we are expected
- /// to respond with our own shutdown message when possible.
- RemoteShutdownSent = 1 << 10,
- /// Flag which is set on `ChannelReady` or `FundingSent` after sending a shutdown message. At this
- /// point, we may not add any new HTLCs to the channel.
- LocalShutdownSent = 1 << 11,
- /// We've successfully negotiated a closing_signed dance. At this point ChannelManager is about
- /// to drop us, but we store this anyway.
- ShutdownComplete = 4096,
- /// Flag which is set on `FundingSent` to indicate this channel is funded in a batch and the
- /// broadcasting of the funding transaction is being held until all channels in the batch
- /// have received funding_signed and have their monitors persisted.
- WaitingForBatch = 1 << 13,
+ /// We are negotiating the parameters required for the channel prior to funding it.
+ NegotiatingFunding(NegotiatingFundingFlags),
+ /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to
+ /// `FundingSent`. Note that this is nonsense for an inbound channel as we immediately generate
+ /// `funding_signed` upon receipt of `funding_created`, so simply skip this state.
+ FundingCreated,
+ /// We've received/sent `funding_created` and `funding_signed` and are thus now waiting on the
+ /// funding transaction to confirm.
+ FundingSent(FundingSentFlags),
+ /// Both we and our counterparty consider the funding transaction confirmed and the channel is
+ /// now operational.
+ ChannelReady(ChannelReadyFlags),
+ /// We've successfully negotiated a `closing_signed` dance. At this point, the `ChannelManager`
+ /// is about to drop us, but we store this anyway.
+ ShutdownComplete,
+}
+
+macro_rules! impl_state_flag {
+ ($get: ident, $set: ident, $clear: ident, $state_flag: expr, [$($state: ident),+]) => {
+ #[allow(unused)]
+ fn $get(&self) -> bool {
+ match self {
+ $(
+ ChannelState::$state(flags) => flags.is_set($state_flag.into()),
+ )*
+ _ => false,
+ }
+ }
+ #[allow(unused)]
+ fn $set(&mut self) {
+ match self {
+ $(
+ ChannelState::$state(flags) => *flags |= $state_flag,
+ )*
+ _ => debug_assert!(false, "Attempted to set flag on unexpected ChannelState"),
+ }
+ }
+ #[allow(unused)]
+ fn $clear(&mut self) {
+ match self {
+ $(
+ ChannelState::$state(flags) => *flags &= !($state_flag),
+ )*
+ _ => debug_assert!(false, "Attempted to clear flag on unexpected ChannelState"),
+ }
+ }
+ };
+ ($get: ident, $set: ident, $clear: ident, $state_flag: expr, FUNDED_STATES) => {
+ impl_state_flag!($get, $set, $clear, $state_flag, [FundingSent, ChannelReady]);
+ };
+ ($get: ident, $set: ident, $clear: ident, $state_flag: expr, $state: ident) => {
+ impl_state_flag!($get, $set, $clear, $state_flag, [$state]);
+ };
+}
+
+impl ChannelState {
+ fn from_u32(state: u32) -> Result<Self, ()> {
+ match state {
+ state_flags::FUNDING_CREATED => Ok(ChannelState::FundingCreated),
+ state_flags::SHUTDOWN_COMPLETE => Ok(ChannelState::ShutdownComplete),
+ val => {
+ if val & state_flags::FUNDING_SENT == state_flags::FUNDING_SENT {
+ FundingSentFlags::from_u32(val & !state_flags::FUNDING_SENT)
+ .map(|flags| ChannelState::FundingSent(flags))
+ } else if val & state_flags::CHANNEL_READY == state_flags::CHANNEL_READY {
+ ChannelReadyFlags::from_u32(val & !state_flags::CHANNEL_READY)
+ .map(|flags| ChannelState::ChannelReady(flags))
+ } else if let Ok(flags) = NegotiatingFundingFlags::from_u32(val) {
+ Ok(ChannelState::NegotiatingFunding(flags))
+ } else {
+ Err(())
+ }
+ },
+ }
+ }
+
+ fn to_u32(&self) -> u32 {
+ match self {
+ ChannelState::NegotiatingFunding(flags) => flags.0,
+ ChannelState::FundingCreated => state_flags::FUNDING_CREATED,
+ ChannelState::FundingSent(flags) => state_flags::FUNDING_SENT | flags.0,
+ ChannelState::ChannelReady(flags) => state_flags::CHANNEL_READY | flags.0,
+ ChannelState::ShutdownComplete => state_flags::SHUTDOWN_COMPLETE,
+ }
+ }
+
+ fn is_pre_funded_state(&self) -> bool {
+ matches!(self, ChannelState::NegotiatingFunding(_)|ChannelState::FundingCreated)
+ }
+
+ fn is_both_sides_shutdown(&self) -> bool {
+ self.is_local_shutdown_sent() && self.is_remote_shutdown_sent()
+ }
+
+ fn with_funded_state_flags_mask(&self) -> FundedStateFlags {
+ match self {
+ ChannelState::FundingSent(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
+ ChannelState::ChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
+ _ => FundedStateFlags::new(),
+ }
+ }
+
+ fn should_force_holding_cell(&self) -> bool {
+ match self {
+ ChannelState::ChannelReady(flags) =>
+ flags.is_set(ChannelReadyFlags::AWAITING_REMOTE_REVOKE) ||
+ flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into()) ||
+ flags.is_set(FundedStateFlags::PEER_DISCONNECTED.into()),
+ _ => {
+ debug_assert!(false, "The holding cell is only valid within ChannelReady");
+ false
+ },
+ }
+ }
+
+ impl_state_flag!(is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected,
+ FundedStateFlags::PEER_DISCONNECTED, FUNDED_STATES);
+ impl_state_flag!(is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress,
+ FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS, FUNDED_STATES);
+ impl_state_flag!(is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent,
+ FundedStateFlags::LOCAL_SHUTDOWN_SENT, FUNDED_STATES);
+ impl_state_flag!(is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent,
+ FundedStateFlags::REMOTE_SHUTDOWN_SENT, FUNDED_STATES);
+ impl_state_flag!(is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready,
+ FundingSentFlags::OUR_CHANNEL_READY, FundingSent);
+ impl_state_flag!(is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready,
+ FundingSentFlags::THEIR_CHANNEL_READY, FundingSent);
+ impl_state_flag!(is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch,
+ FundingSentFlags::WAITING_FOR_BATCH, FundingSent);
+ impl_state_flag!(is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke,
+ ChannelReadyFlags::AWAITING_REMOTE_REVOKE, ChannelReady);
}
-const BOTH_SIDES_SHUTDOWN_MASK: u32 =
- ChannelState::LocalShutdownSent as u32 |
- ChannelState::RemoteShutdownSent as u32;
-const MULTI_STATE_FLAGS: u32 =
- BOTH_SIDES_SHUTDOWN_MASK |
- ChannelState::PeerDisconnected as u32 |
- ChannelState::MonitorUpdateInProgress as u32;
-const STATE_FLAGS: u32 =
- MULTI_STATE_FLAGS |
- ChannelState::TheirChannelReady as u32 |
- ChannelState::OurChannelReady as u32 |
- ChannelState::AwaitingRemoteRevoke as u32 |
- ChannelState::WaitingForBatch as u32;
pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
}
}
+pub(super) struct WithChannelContext<'a, L: Deref> where L::Target: Logger {
+ pub logger: &'a L,
+ pub peer_id: Option<PublicKey>,
+ pub channel_id: Option<ChannelId>,
+}
+
+impl<'a, L: Deref> Logger for WithChannelContext<'a, L> where L::Target: Logger {
+ fn log(&self, mut record: Record) {
+ record.peer_id = self.peer_id;
+ record.channel_id = self.channel_id;
+ self.logger.log(record)
+ }
+}
+
+impl<'a, 'b, L: Deref> WithChannelContext<'a, L>
+where L::Target: Logger {
+ pub(super) fn from<S: Deref>(logger: &'a L, context: &'b ChannelContext<S>) -> Self
+ where S::Target: SignerProvider
+ {
+ WithChannelContext {
+ logger,
+ peer_id: Some(context.counterparty_node_id),
+ channel_id: Some(context.channel_id),
+ }
+ }
+}
+
macro_rules! secp_check {
($res: expr, $err: expr) => {
match $res {
htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
local_balance_msat: u64, // local balance before fees but considering dust limits
remote_balance_msat: u64, // remote balance before fees but considering dust limits
- preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
+ outbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
+ inbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful received HTLCs since last commitment
}
/// Used when calculating whether we or the remote can afford an additional HTLC.
/// An unbroadcasted batch funding transaction id. The closure of this channel should be
/// propagated to the remainder of the batch.
pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
+ pub(crate) channel_id: ChannelId,
+ pub(crate) counterparty_node_id: PublicKey,
}
/// If the majority of the channels funds are to the fundee and the initiator holds only just
impl<'a, SP: Deref> ChannelPhase<SP> where
SP::Target: SignerProvider,
- <SP::Target as SignerProvider>::Signer: ChannelSigner,
+ <SP::Target as SignerProvider>::EcdsaSigner: ChannelSigner,
{
pub fn context(&'a self) -> &'a ChannelContext<SP> {
match self {
/// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
/// Will be `None` for channels created prior to 0.0.115.
temporary_channel_id: Option<ChannelId>,
- channel_state: u32,
+ channel_state: ChannelState,
// When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
// our peer. However, we want to make sure they received it, or else rebroadcast it when we
latest_monitor_update_id: u64,
- holder_signer: ChannelSignerType<<SP::Target as SignerProvider>::Signer>,
+ holder_signer: ChannelSignerType<SP>,
shutdown_scriptpubkey: Option<ShutdownScript>,
destination_script: ScriptBuf,
/// Returns true if we've ever received a message from the remote end for this Channel
pub fn have_received_message(&self) -> bool {
- self.channel_state & !STATE_FLAGS > (ChannelState::OurInitSent as u32)
+ self.channel_state > ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT)
}
/// Returns true if this channel is fully established and not known to be closing.
/// Allowed in any state (including after shutdown)
pub fn is_usable(&self) -> bool {
- let mask = ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK;
- (self.channel_state & mask) == (ChannelState::ChannelReady as u32) && !self.monitor_pending_channel_ready
+ matches!(self.channel_state, ChannelState::ChannelReady(_)) &&
+ !self.channel_state.is_local_shutdown_sent() &&
+ !self.channel_state.is_remote_shutdown_sent() &&
+ !self.monitor_pending_channel_ready
}
/// shutdown state returns the state of the channel in its various stages of shutdown
pub fn shutdown_state(&self) -> ChannelShutdownState {
- if self.channel_state & (ChannelState::ShutdownComplete as u32) != 0 {
- return ChannelShutdownState::ShutdownComplete;
- }
- if self.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 && self.channel_state & (ChannelState::RemoteShutdownSent as u32) == 0 {
- return ChannelShutdownState::ShutdownInitiated;
- }
- if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && !self.closing_negotiation_ready() {
- return ChannelShutdownState::ResolvingHTLCs;
- }
- if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && self.closing_negotiation_ready() {
- return ChannelShutdownState::NegotiatingClosingFee;
+ match self.channel_state {
+ ChannelState::FundingSent(_)|ChannelState::ChannelReady(_) =>
+ if self.channel_state.is_local_shutdown_sent() && !self.channel_state.is_remote_shutdown_sent() {
+ ChannelShutdownState::ShutdownInitiated
+ } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && !self.closing_negotiation_ready() {
+ ChannelShutdownState::ResolvingHTLCs
+ } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && self.closing_negotiation_ready() {
+ ChannelShutdownState::NegotiatingClosingFee
+ } else {
+ ChannelShutdownState::NotShuttingDown
+ },
+ ChannelState::ShutdownComplete => ChannelShutdownState::ShutdownComplete,
+ _ => ChannelShutdownState::NotShuttingDown,
}
- return ChannelShutdownState::NotShuttingDown;
}
fn closing_negotiation_ready(&self) -> bool {
+ let is_ready_to_close = match self.channel_state {
+ ChannelState::FundingSent(flags) =>
+ flags & FundedStateFlags::ALL == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
+ ChannelState::ChannelReady(flags) =>
+ flags == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
+ _ => false,
+ };
self.pending_inbound_htlcs.is_empty() &&
- self.pending_outbound_htlcs.is_empty() &&
- self.pending_update_fee.is_none() &&
- self.channel_state &
- (BOTH_SIDES_SHUTDOWN_MASK |
- ChannelState::AwaitingRemoteRevoke as u32 |
- ChannelState::PeerDisconnected as u32 |
- ChannelState::MonitorUpdateInProgress as u32) == BOTH_SIDES_SHUTDOWN_MASK
+ self.pending_outbound_htlcs.is_empty() &&
+ self.pending_update_fee.is_none() &&
+ is_ready_to_close
}
/// Returns true if this channel is currently available for use. This is a superset of
/// is_usable() and considers things like the channel being temporarily disabled.
/// Allowed in any state (including after shutdown)
pub fn is_live(&self) -> bool {
- self.is_usable() && (self.channel_state & (ChannelState::PeerDisconnected as u32) == 0)
+ self.is_usable() && !self.channel_state.is_peer_disconnected()
}
// Public utilities:
/// Returns the holder signer for this channel.
#[cfg(test)]
- pub fn get_signer(&self) -> &ChannelSignerType<<SP::Target as SignerProvider>::Signer> {
+ pub fn get_signer(&self) -> &ChannelSignerType<SP> {
return &self.holder_signer
}
/// Returns true if funding_signed was sent/received and the
/// funding transaction has been broadcast if necessary.
pub fn is_funding_broadcast(&self) -> bool {
- self.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 &&
- self.channel_state & ChannelState::WaitingForBatch as u32 == 0
+ !self.channel_state.is_pre_funded_state() &&
+ !matches!(self.channel_state, ChannelState::FundingSent(flags) if flags.is_set(FundingSentFlags::WAITING_FOR_BATCH))
}
/// Transaction nomenclature is somewhat confusing here as there are many different cases - a
}
}
+ let mut inbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
+
for ref htlc in self.pending_inbound_htlcs.iter() {
let (include, state_name) = match htlc.state {
InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
match &htlc.state {
&InboundHTLCState::LocalRemoved(ref reason) => {
if generated_by_local {
- if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
+ if let &InboundHTLCRemovalReason::Fulfill(preimage) = reason {
+ inbound_htlc_preimages.push(preimage);
value_to_self_msat_offset += htlc.amount_msat as i64;
}
}
}
}
- let mut preimages: Vec<PaymentPreimage> = Vec::new();
+
+ let mut outbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
for ref htlc in self.pending_outbound_htlcs.iter() {
let (include, state_name) = match htlc.state {
};
if let Some(preimage) = preimage_opt {
- preimages.push(preimage);
+ outbound_htlc_preimages.push(preimage);
}
if include {
htlcs_included,
local_balance_msat: value_to_self_msat as u64,
remote_balance_msat: value_to_remote_msat as u64,
- preimages
+ inbound_htlc_preimages,
+ outbound_htlc_preimages,
}
}
fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O>
where F: Fn() -> Option<O> {
- if self.channel_state & ChannelState::FundingCreated as u32 != 0 ||
- self.channel_state & ChannelState::WaitingForBatch as u32 != 0 {
- f()
- } else {
- None
+ match self.channel_state {
+ ChannelState::FundingCreated => f(),
+ ChannelState::FundingSent(flags) => if flags.is_set(FundingSentFlags::WAITING_FOR_BATCH) {
+ f()
+ } else {
+ None
+ },
+ _ => None,
}
}
// called during initialization prior to the chain_monitor in the encompassing ChannelManager
// being fully configured in some cases. Thus, its likely any monitor events we generate will
// be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
- assert!(self.channel_state != ChannelState::ShutdownComplete as u32);
+ assert!(!matches!(self.channel_state, ChannelState::ShutdownComplete));
// We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
// return them to fail the payment.
// funding transaction, don't return a funding txo (which prevents providing the
// monitor update to the user, even if we return one).
// See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
- if self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 {
+ let generate_monitor_update = match self.channel_state {
+ ChannelState::FundingSent(_)|ChannelState::ChannelReady(_)|ChannelState::ShutdownComplete => true,
+ _ => false,
+ };
+ if generate_monitor_update {
self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
update_id: self.latest_monitor_update_id,
} else { None };
let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
- self.channel_state = ChannelState::ShutdownComplete as u32;
+ self.channel_state = ChannelState::ShutdownComplete;
self.update_time_counter += 1;
ShutdownResult {
monitor_update,
dropped_outbound_htlcs,
unbroadcasted_batch_funding_txid,
+ channel_id: self.channel_id,
+ counterparty_node_id: self.counterparty_node_id,
}
}
let signature = match &self.holder_signer {
// TODO (taproot|arik): move match into calling method for Taproot
ChannelSignerType::Ecdsa(ecdsa) => {
- ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.secp_ctx)
+ ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.secp_ctx)
.map(|(sig, _)| sig).ok()?
- }
+ },
+ // TODO (taproot|arik)
+ #[cfg(taproot)]
+ _ => todo!()
};
if self.signer_pending_funding {
match &self.holder_signer {
// TODO (arik): move match into calling method for Taproot
ChannelSignerType::Ecdsa(ecdsa) => {
- let funding_signed = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.secp_ctx)
+ let funding_signed = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.secp_ctx)
.map(|(signature, _)| msgs::FundingSigned {
channel_id: self.channel_id(),
signature,
// We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
(counterparty_initial_commitment_tx, funding_signed)
- }
+ },
+ // TODO (taproot|arik)
+ #[cfg(taproot)]
+ _ => todo!()
}
}
}
impl<SP: Deref> Channel<SP> where
SP::Target: SignerProvider,
- <SP::Target as SignerProvider>::Signer: WriteableEcdsaChannelSigner
+ <SP::Target as SignerProvider>::EcdsaSigner: WriteableEcdsaChannelSigner
{
fn check_remote_fee<F: Deref, L: Deref>(
channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
where L::Target: Logger {
// Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
// (see equivalent if condition there).
- assert!(self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0);
+ assert!(self.context.channel_state.should_force_holding_cell());
let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
self.context.latest_monitor_update_id = mon_update_id;
// caller thought we could have something claimed (cause we wouldn't have accepted in an
// incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
// either.
- if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+ if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
}
- assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
// ChannelManager may generate duplicate claims/fails due to HTLC update events from
// on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
}],
};
- if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
+ if self.context.channel_state.should_force_holding_cell() {
// Note that this condition is the same as the assertion in
// `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
// `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
_ => {}
}
}
- log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state);
+ log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state.to_u32());
self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
});
/// [`ChannelError::Ignore`].
fn fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, mut force_holding_cell: bool, logger: &L)
-> Result<Option<msgs::UpdateFailHTLC>, ChannelError> where L::Target: Logger {
- if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+ if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
panic!("Was asked to fail an HTLC when channel was not in an operational state");
}
- assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
// ChannelManager may generate duplicate claims/fails due to HTLC update events from
// on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
return Ok(None);
}
- if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
+ if self.context.channel_state.should_force_holding_cell() {
debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
force_holding_cell = true;
}
/// If this call is successful, broadcast the funding transaction (and not before!)
pub fn funding_signed<L: Deref>(
&mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
- ) -> Result<ChannelMonitor<<SP::Target as SignerProvider>::Signer>, ChannelError>
+ ) -> Result<ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>, ChannelError>
where
L::Target: Logger
{
if !self.context.is_outbound() {
return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned()));
}
- if self.context.channel_state & !(ChannelState::MonitorUpdateInProgress as u32) != ChannelState::FundingCreated as u32 {
+ if !matches!(self.context.channel_state, ChannelState::FundingCreated) {
return Err(ChannelError::Close("Received funding_signed in strange state!".to_owned()));
}
if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
funding_redeemscript.clone(), self.context.channel_value_satoshis,
obscure_factor,
holder_commitment_tx, best_block, self.context.counterparty_node_id);
-
+ let logger_with_chan_monitor = WithChannelMonitor::from(logger, &channel_monitor);
channel_monitor.provide_initial_counterparty_commitment_tx(
counterparty_initial_bitcoin_tx.txid, Vec::new(),
self.context.cur_counterparty_commitment_transaction_number,
self.context.counterparty_cur_commitment_point.unwrap(),
counterparty_initial_commitment_tx.feerate_per_kw(),
counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
- counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
+ counterparty_initial_commitment_tx.to_countersignatory_value_sat(), &&logger_with_chan_monitor);
- assert_eq!(self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32), 0); // We have no had any monitor(s) yet to fail update!
+ assert!(!self.context.channel_state.is_monitor_update_in_progress()); // We have no had any monitor(s) yet to fail update!
if self.context.is_batch_funding() {
- self.context.channel_state = ChannelState::FundingSent as u32 | ChannelState::WaitingForBatch as u32;
+ self.context.channel_state = ChannelState::FundingSent(FundingSentFlags::WAITING_FOR_BATCH);
} else {
- self.context.channel_state = ChannelState::FundingSent as u32;
+ self.context.channel_state = ChannelState::FundingSent(FundingSentFlags::new());
}
self.context.cur_holder_commitment_transaction_number -= 1;
self.context.cur_counterparty_commitment_transaction_number -= 1;
/// treated as a non-batch channel going forward.
pub fn set_batch_ready(&mut self) {
self.context.is_batch_funding = None;
- self.context.channel_state &= !(ChannelState::WaitingForBatch as u32);
+ self.context.channel_state.clear_waiting_for_batch();
}
/// Handles a channel_ready message from our peer. If we've already sent our channel_ready
NS::Target: NodeSigner,
L::Target: Logger
{
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+ if self.context.channel_state.is_peer_disconnected() {
self.context.workaround_lnd_bug_4006 = Some(msg.clone());
return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
}
}
}
- let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
-
// Our channel_ready shouldn't have been sent if we are waiting for other channels in the
// batch, but we can receive channel_ready messages.
- debug_assert!(
- non_shutdown_state & ChannelState::OurChannelReady as u32 == 0 ||
- non_shutdown_state & ChannelState::WaitingForBatch as u32 == 0
- );
- if non_shutdown_state & !(ChannelState::WaitingForBatch as u32) == ChannelState::FundingSent as u32 {
- self.context.channel_state |= ChannelState::TheirChannelReady as u32;
- } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
- self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
- self.context.update_time_counter += 1;
- } else if self.context.channel_state & (ChannelState::ChannelReady as u32) != 0 ||
- // If we reconnected before sending our `channel_ready` they may still resend theirs:
- (self.context.channel_state & (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) ==
- (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32))
- {
+ let mut check_reconnection = false;
+ match &self.context.channel_state {
+ ChannelState::FundingSent(flags) => {
+ let flags = *flags & !FundedStateFlags::ALL;
+ debug_assert!(!flags.is_set(FundingSentFlags::OUR_CHANNEL_READY) || !flags.is_set(FundingSentFlags::WAITING_FOR_BATCH));
+ if flags & !FundingSentFlags::WAITING_FOR_BATCH == FundingSentFlags::THEIR_CHANNEL_READY {
+ // If we reconnected before sending our `channel_ready` they may still resend theirs.
+ check_reconnection = true;
+ } else if (flags & !FundingSentFlags::WAITING_FOR_BATCH).is_empty() {
+ self.context.channel_state.set_their_channel_ready();
+ } else if flags == FundingSentFlags::OUR_CHANNEL_READY {
+ self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
+ self.context.update_time_counter += 1;
+ } else {
+ // We're in `WAITING_FOR_BATCH`, so we should wait until we're ready.
+ debug_assert!(flags.is_set(FundingSentFlags::WAITING_FOR_BATCH));
+ }
+ }
+ // If we reconnected before sending our `channel_ready` they may still resend theirs.
+ ChannelState::ChannelReady(_) => check_reconnection = true,
+ _ => return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned())),
+ }
+ if check_reconnection {
// They probably disconnected/reconnected and re-sent the channel_ready, which is
// required, or they're sending a fresh SCID alias.
let expected_point =
return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
}
return Ok(None);
- } else {
- return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned()));
}
self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
FE::Target: FeeEstimator, L::Target: Logger,
{
+ if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
+ return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
+ }
// We can't accept HTLCs sent after we've sent a shutdown.
- let local_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::LocalShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
- if local_sent_shutdown {
+ if self.context.channel_state.is_local_shutdown_sent() {
pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
}
// If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
- let remote_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::RemoteShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
- if remote_sent_shutdown {
+ if self.context.channel_state.is_remote_shutdown_sent() {
return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
}
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+ if self.context.channel_state.is_peer_disconnected() {
return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
}
if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
}
- if self.context.channel_state & ChannelState::LocalShutdownSent as u32 != 0 {
+ if self.context.channel_state.is_local_shutdown_sent() {
if let PendingHTLCStatus::Forward(_) = pending_forward_status {
panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
}
}
pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
- if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+ if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
}
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+ if self.context.channel_state.is_peer_disconnected() {
return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
}
}
pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
- if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+ if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
}
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+ if self.context.channel_state.is_peer_disconnected() {
return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
}
}
pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
- if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+ if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
}
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+ if self.context.channel_state.is_peer_disconnected() {
return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
}
pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
where L::Target: Logger
{
- if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+ if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
}
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+ if self.context.channel_state.is_peer_disconnected() {
return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
}
- if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
+ if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
}
let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
- log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.serialize()),
+ log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.to_public_key().serialize()),
encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
- if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key) {
+ if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key.to_public_key()) {
return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
}
if !separate_nondust_htlc_sources {
self.context.counterparty_funding_pubkey()
);
- self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.preimages)
+ self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.outbound_htlc_preimages)
.map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
// Update state now that we've passed all the can-fail calls...
// build_commitment_no_status_check() next which will reset this to RAAFirst.
self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
- if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 {
+ if self.context.channel_state.is_monitor_update_in_progress() {
// In case we initially failed monitor updating without requiring a response, we need
// to make sure the RAA gets sent first.
self.context.monitor_pending_revoke_and_ack = true;
- if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
+ if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
// If we were going to send a commitment_signed after the RAA, go ahead and do all
// the corresponding HTLC status updates so that
// get_last_commitment_update_for_send includes the right HTLCs.
return Ok(self.push_ret_blockable_mon_update(monitor_update));
}
- let need_commitment_signed = if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
+ let need_commitment_signed = if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
// If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
// we'll send one right away when we get the revoke_and_ack when we
// free_holding_cell_htlcs().
) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
where F::Target: FeeEstimator, L::Target: Logger
{
- if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 &&
- (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
+ if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && !self.context.channel_state.should_force_holding_cell() {
self.free_holding_cell_htlcs(fee_estimator, logger)
} else { (None, Vec::new()) }
}
) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
where F::Target: FeeEstimator, L::Target: Logger
{
- assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0);
+ assert!(!self.context.channel_state.is_monitor_update_in_progress());
if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
match &htlc_update {
&HTLCUpdateAwaitingACK::AddHTLC {
amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
- skimmed_fee_msat, ..
+ skimmed_fee_msat, blinding_point, ..
} => {
- match self.send_htlc(amount_msat, *payment_hash, cltv_expiry, source.clone(),
- onion_routing_packet.clone(), false, skimmed_fee_msat, fee_estimator, logger)
- {
+ match self.send_htlc(
+ amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(),
+ false, skimmed_fee_msat, blinding_point, fee_estimator, logger
+ ) {
Ok(_) => update_add_count += 1,
Err(e) => {
match e {
) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
where F::Target: FeeEstimator, L::Target: Logger,
{
- if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+ if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
}
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+ if self.context.channel_state.is_peer_disconnected() {
return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
}
- if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
+ if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
}
}
}
- if self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 == 0 {
+ if !self.context.channel_state.is_awaiting_remote_revoke() {
// Our counterparty seems to have burned their coins to us (by revoking a state when we
// haven't given them a new commitment transaction to broadcast). We should probably
// take advantage of this by updating our channel monitor, sending them an error, and
self.context.cur_counterparty_commitment_transaction_number + 1,
&secret
).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
- }
+ },
+ // TODO (taproot|arik)
+ #[cfg(taproot)]
+ _ => todo!()
};
self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
// (note that we may still fail to generate the new commitment_signed message, but that's
// OK, we step the channel here and *then* if the new generation fails we can fail the
// channel based on that, but stepping stuff here should be safe either way.
- self.context.channel_state &= !(ChannelState::AwaitingRemoteRevoke as u32);
+ self.context.channel_state.clear_awaiting_remote_revoke();
self.context.sent_message_awaiting_response = None;
self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
}
}
- if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) == ChannelState::MonitorUpdateInProgress as u32 {
+ if self.context.channel_state.is_monitor_update_in_progress() {
// We can't actually generate a new commitment transaction (incl by freeing holding
// cells) while we can't update the monitor, so we just return what we have.
if require_commitment {
return None;
}
- if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
+ if self.context.channel_state.is_awaiting_remote_revoke() || self.context.channel_state.is_monitor_update_in_progress() {
force_holding_cell = true;
}
/// completed.
/// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
- assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
- if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
- return Err(());
+ assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
+ if self.context.channel_state.is_pre_funded_state() {
+ return Err(())
}
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == (ChannelState::PeerDisconnected as u32) {
+ if self.context.channel_state.is_peer_disconnected() {
// While the below code should be idempotent, it's simpler to just return early, as
// redundant disconnect events can fire, though they should be rare.
return Ok(());
self.context.sent_message_awaiting_response = None;
- self.context.channel_state |= ChannelState::PeerDisconnected as u32;
+ self.context.channel_state.set_peer_disconnected();
log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
Ok(())
}
self.context.monitor_pending_forwards.append(&mut pending_forwards);
self.context.monitor_pending_failures.append(&mut pending_fails);
self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
- self.context.channel_state |= ChannelState::MonitorUpdateInProgress as u32;
+ self.context.channel_state.set_monitor_update_in_progress();
}
/// Indicates that the latest ChannelMonitor update has been committed by the client
L::Target: Logger,
NS::Target: NodeSigner
{
- assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, ChannelState::MonitorUpdateInProgress as u32);
- self.context.channel_state &= !(ChannelState::MonitorUpdateInProgress as u32);
+ assert!(self.context.channel_state.is_monitor_update_in_progress());
+ self.context.channel_state.clear_monitor_update_in_progress();
// If we're past (or at) the FundingSent stage on an outbound channel, try to
// (re-)broadcast the funding transaction as we may have declined to broadcast it when we
// first received the funding_signed.
let mut funding_broadcastable =
- if self.context.is_outbound() && self.context.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 && self.context.channel_state & ChannelState::WaitingForBatch as u32 == 0 {
+ if self.context.is_outbound() &&
+ matches!(self.context.channel_state, ChannelState::FundingSent(flags) if !flags.is_set(FundingSentFlags::WAITING_FOR_BATCH)) ||
+ matches!(self.context.channel_state, ChannelState::ChannelReady(_))
+ {
self.context.funding_transaction.take()
} else { None };
// That said, if the funding transaction is already confirmed (ie we're active with a
// minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
- if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 && self.context.minimum_depth != Some(0) {
+ if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.minimum_depth != Some(0) {
funding_broadcastable = None;
}
let mut finalized_claimed_htlcs = Vec::new();
mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) != 0 {
+ if self.context.channel_state.is_peer_disconnected() {
self.context.monitor_pending_revoke_and_ack = false;
self.context.monitor_pending_commitment_signed = false;
return MonitorRestoreUpdates {
if self.context.is_outbound() {
return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
}
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+ if self.context.channel_state.is_peer_disconnected() {
return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
}
Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
cltv_expiry: htlc.cltv_expiry,
onion_routing_packet: (**onion_packet).clone(),
skimmed_fee_msat: htlc.skimmed_fee_msat,
+ blinding_point: htlc.blinding_point,
});
}
}
/// Gets the `Shutdown` message we should send our peer on reconnect, if any.
pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
- if self.context.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 {
+ if self.context.channel_state.is_local_shutdown_sent() {
assert!(self.context.shutdown_scriptpubkey.is_some());
Some(msgs::Shutdown {
channel_id: self.context.channel_id,
L::Target: Logger,
NS::Target: NodeSigner
{
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
+ if !self.context.channel_state.is_peer_disconnected() {
// While BOLT 2 doesn't indicate explicitly we should error this channel here, it
// almost certainly indicates we are going to end up out-of-sync in some way, so we
// just close here instead of trying to recover.
return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
}
+ let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
if msg.next_remote_commitment_number > 0 {
let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
}
- if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number {
+ if msg.next_remote_commitment_number > our_commitment_transaction {
macro_rules! log_and_panic {
($err_msg: expr) => {
log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
// Before we change the state of the channel, we check if the peer is sending a very old
// commitment transaction number, if yes we send a warning message.
- let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
- if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
- return Err(
- ChannelError::Warn(format!("Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)", msg.next_remote_commitment_number, our_commitment_transaction))
- );
+ if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
+ return Err(ChannelError::Warn(format!(
+ "Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)",
+ msg.next_remote_commitment_number,
+ our_commitment_transaction
+ )));
}
// Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
// remaining cases either succeed or ErrorMessage-fail).
- self.context.channel_state &= !(ChannelState::PeerDisconnected as u32);
+ self.context.channel_state.clear_peer_disconnected();
self.context.sent_message_awaiting_response = None;
let shutdown_msg = self.get_outbound_shutdown();
let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger);
- if self.context.channel_state & (ChannelState::FundingSent as u32) == ChannelState::FundingSent as u32 {
+ if matches!(self.context.channel_state, ChannelState::FundingSent(_)) {
// If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
- if self.context.channel_state & (ChannelState::OurChannelReady as u32) == 0 ||
- self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
+ if !self.context.channel_state.is_our_channel_ready() ||
+ self.context.channel_state.is_monitor_update_in_progress() {
if msg.next_remote_commitment_number != 0 {
return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
}
});
}
- let required_revoke = if msg.next_remote_commitment_number + 1 == INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number {
+ let required_revoke = if msg.next_remote_commitment_number == our_commitment_transaction {
// Remote isn't waiting on any RevokeAndACK from us!
// Note that if we need to repeat our ChannelReady we'll do that in the next if block.
None
- } else if msg.next_remote_commitment_number + 1 == (INITIAL_COMMITMENT_NUMBER - 1) - self.context.cur_holder_commitment_transaction_number {
- if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
+ } else if msg.next_remote_commitment_number + 1 == our_commitment_transaction {
+ if self.context.channel_state.is_monitor_update_in_progress() {
self.context.monitor_pending_revoke_and_ack = true;
None
} else {
Some(self.get_last_revoke_and_ack())
}
} else {
- return Err(ChannelError::Close("Peer attempted to reestablish channel with a very old local commitment transaction".to_owned()));
+ debug_assert!(false, "All values should have been handled in the four cases above");
+ return Err(ChannelError::Close(format!(
+ "Peer attempted to reestablish channel expecting a future local commitment transaction: {} (received) vs {} (expected)",
+ msg.next_remote_commitment_number,
+ our_commitment_transaction
+ )));
};
// We increment cur_counterparty_commitment_transaction_number only upon receipt of
// revoke_and_ack, not on sending commitment_signed, so we add one if have
// AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
// the corresponding revoke_and_ack back yet.
- let is_awaiting_remote_revoke = self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 != 0;
+ let is_awaiting_remote_revoke = self.context.channel_state.is_awaiting_remote_revoke();
if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
self.mark_awaiting_response();
}
log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
}
- if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
+ if self.context.channel_state.is_monitor_update_in_progress() {
self.context.monitor_pending_commitment_signed = true;
Ok(ReestablishResponses {
channel_ready, shutdown_msg, announcement_sigs,
order: self.context.resend_order.clone(),
})
}
+ } else if msg.next_local_commitment_number < next_counterparty_commitment_number {
+ Err(ChannelError::Close(format!(
+ "Peer attempted to reestablish channel with a very old remote commitment transaction: {} (received) vs {} (expected)",
+ msg.next_local_commitment_number,
+ next_counterparty_commitment_number,
+ )))
} else {
- Err(ChannelError::Close("Peer attempted to reestablish channel with a very old remote commitment transaction".to_owned()))
+ Err(ChannelError::Close(format!(
+ "Peer attempted to reestablish channel with a future remote commitment transaction: {} (received) vs {} (expected)",
+ msg.next_local_commitment_number,
+ next_counterparty_commitment_number,
+ )))
}
}
max_fee_satoshis: our_max_fee,
}),
}), None, None))
- }
+ },
+ // TODO (taproot|arik)
+ #[cfg(taproot)]
+ _ => todo!()
}
}
&mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
{
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+ if self.context.channel_state.is_peer_disconnected() {
return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
}
- if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
+ if self.context.channel_state.is_pre_funded_state() {
// Spec says we should fail the connection, not the channel, but that's nonsense, there
// are plenty of reasons you may want to fail a channel pre-funding, and spec says you
// can do that via error message without getting a connection fail anyway...
return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
}
}
- assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
+ assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_hex_string())));
// If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
// immediately after the commitment dance, but we can send a Shutdown because we won't send
// any further commitment updates after we set LocalShutdownSent.
- let send_shutdown = (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != ChannelState::LocalShutdownSent as u32;
+ let send_shutdown = !self.context.channel_state.is_local_shutdown_sent();
let update_shutdown_script = match self.context.shutdown_scriptpubkey {
Some(_) => false,
// From here on out, we may not fail!
- self.context.channel_state |= ChannelState::RemoteShutdownSent as u32;
+ self.context.channel_state.set_remote_shutdown_sent();
self.context.update_time_counter += 1;
let monitor_update = if update_shutdown_script {
}
});
- self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
+ self.context.channel_state.set_local_shutdown_sent();
self.context.update_time_counter += 1;
Ok((shutdown, monitor_update, dropped_outbound_htlcs))
-> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
where F::Target: FeeEstimator
{
- if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != BOTH_SIDES_SHUTDOWN_MASK {
+ if !self.context.channel_state.is_both_sides_shutdown() {
return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
}
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+ if self.context.channel_state.is_peer_disconnected() {
return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
}
if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
}
- if self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32 != 0 {
+ if self.context.channel_state.is_monitor_update_in_progress() {
self.context.pending_counterparty_closing_signed = Some(msg.clone());
return Ok((None, None, None));
}
monitor_update: None,
dropped_outbound_htlcs: Vec::new(),
unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
+ channel_id: self.context.channel_id,
+ counterparty_node_id: self.context.counterparty_node_id,
};
let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
- self.context.channel_state = ChannelState::ShutdownComplete as u32;
+ self.context.channel_state = ChannelState::ShutdownComplete;
self.context.update_time_counter += 1;
return Ok((None, Some(tx), Some(shutdown_result)));
}
monitor_update: None,
dropped_outbound_htlcs: Vec::new(),
unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
+ channel_id: self.context.channel_id,
+ counterparty_node_id: self.context.counterparty_node_id,
};
- self.context.channel_state = ChannelState::ShutdownComplete as u32;
+ self.context.channel_state = ChannelState::ShutdownComplete;
self.context.update_time_counter += 1;
let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
(Some(tx), Some(shutdown_result))
max_fee_satoshis: our_max_fee,
}),
}), signed_tx, shutdown_result))
- }
+ },
+ // TODO (taproot|arik)
+ #[cfg(taproot)]
+ _ => todo!()
}
}
}
}
pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
- self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32) != 0 { 1 } else { 0 }
+ self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state.is_awaiting_remote_revoke() { 1 } else { 0 }
}
pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
}
#[cfg(test)]
- pub fn get_signer(&self) -> &ChannelSignerType<<SP::Target as SignerProvider>::Signer> {
+ pub fn get_signer(&self) -> &ChannelSignerType<SP> {
&self.context.holder_signer
}
/// Returns true if this channel has been marked as awaiting a monitor update to move forward.
/// Allowed in any state (including after shutdown)
pub fn is_awaiting_monitor_update(&self) -> bool {
- (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0
+ self.context.channel_state.is_monitor_update_in_progress()
}
/// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
/// advanced state.
pub fn is_awaiting_initial_mon_persist(&self) -> bool {
if !self.is_awaiting_monitor_update() { return false; }
- if self.context.channel_state &
- !(ChannelState::TheirChannelReady as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32 | ChannelState::WaitingForBatch as u32)
- == ChannelState::FundingSent as u32 {
+ if matches!(
+ self.context.channel_state, ChannelState::FundingSent(flags)
+ if (flags & !(FundingSentFlags::THEIR_CHANNEL_READY | FundedStateFlags::PEER_DISCONNECTED | FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS | FundingSentFlags::WAITING_FOR_BATCH)).is_empty()
+ ) {
// If we're not a 0conf channel, we'll be waiting on a monitor update with only
// FundingSent set, though our peer could have sent their channel_ready.
debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
/// Returns true if our channel_ready has been sent
pub fn is_our_channel_ready(&self) -> bool {
- (self.context.channel_state & ChannelState::OurChannelReady as u32) != 0 || self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32
+ matches!(self.context.channel_state, ChannelState::FundingSent(flags) if flags.is_set(FundingSentFlags::OUR_CHANNEL_READY)) ||
+ matches!(self.context.channel_state, ChannelState::ChannelReady(_))
}
/// Returns true if our peer has either initiated or agreed to shut down the channel.
pub fn received_shutdown(&self) -> bool {
- (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) != 0
+ self.context.channel_state.is_remote_shutdown_sent()
}
/// Returns true if we either initiated or agreed to shut down the channel.
pub fn sent_shutdown(&self) -> bool {
- (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != 0
+ self.context.channel_state.is_local_shutdown_sent()
}
/// Returns true if this channel is fully shut down. True here implies that no further actions
/// may/will be taken on this channel, and thus this object should be freed. Any future changes
/// will be handled appropriately by the chain monitor.
pub fn is_shutdown(&self) -> bool {
- if (self.context.channel_state & ChannelState::ShutdownComplete as u32) == ChannelState::ShutdownComplete as u32 {
- assert!(self.context.channel_state == ChannelState::ShutdownComplete as u32);
- true
- } else { false }
+ matches!(self.context.channel_state, ChannelState::ShutdownComplete)
}
pub fn channel_update_status(&self) -> ChannelUpdateStatus {
// Note that we don't include ChannelState::WaitingForBatch as we don't want to send
// channel_ready until the entire batch is ready.
- let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
- let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 {
- self.context.channel_state |= ChannelState::OurChannelReady as u32;
+ let need_commitment_update = if matches!(self.context.channel_state, ChannelState::FundingSent(f) if (f & !FundedStateFlags::ALL).is_empty()) {
+ self.context.channel_state.set_our_channel_ready();
true
- } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) {
- self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
+ } else if matches!(self.context.channel_state, ChannelState::FundingSent(f) if f & !FundedStateFlags::ALL == FundingSentFlags::THEIR_CHANNEL_READY) {
+ self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
self.context.update_time_counter += 1;
true
- } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
+ } else if matches!(self.context.channel_state, ChannelState::FundingSent(f) if f & !FundedStateFlags::ALL == FundingSentFlags::OUR_CHANNEL_READY) {
// We got a reorg but not enough to trigger a force close, just ignore.
false
} else {
- if self.context.funding_tx_confirmation_height != 0 && self.context.channel_state & !STATE_FLAGS < ChannelState::ChannelReady as u32 {
+ if self.context.funding_tx_confirmation_height != 0 &&
+ self.context.channel_state < ChannelState::ChannelReady(ChannelReadyFlags::new())
+ {
// We should never see a funding transaction on-chain until we've received
// funding_signed (if we're an outbound channel), or seen funding_generated (if we're
// an inbound channel - before that we have no known funding TXID). The fuzzer,
#[cfg(not(fuzzing))]
panic!("Started confirming a channel in a state pre-FundingSent: {}.\n\
Do NOT broadcast a funding transaction manually - let LDK do it for you!",
- self.context.channel_state);
+ self.context.channel_state.to_u32());
}
// We got a reorg but not enough to trigger a force close, just ignore.
false
};
if need_commitment_update {
- if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) == 0 {
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
+ if !self.context.channel_state.is_monitor_update_in_progress() {
+ if !self.context.channel_state.is_peer_disconnected() {
let next_per_commitment_point =
self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
return Some(msgs::ChannelReady {
return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
}
- let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
- if non_shutdown_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 ||
- (non_shutdown_state & ChannelState::OurChannelReady as u32) == ChannelState::OurChannelReady as u32 {
+ if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
+ self.context.channel_state.is_our_channel_ready() {
let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
if self.context.funding_tx_confirmation_height == 0 {
// Note that check_get_channel_ready may reset funding_tx_confirmation_height to
height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
// If funding_tx_confirmed_in is unset, the channel must not be active
- assert!(non_shutdown_state & !STATE_FLAGS <= ChannelState::ChannelReady as u32);
- assert_eq!(non_shutdown_state & ChannelState::OurChannelReady as u32, 0);
+ assert!(self.context.channel_state <= ChannelState::ChannelReady(ChannelReadyFlags::new()));
+ assert!(!self.context.channel_state.is_our_channel_ready());
return Err(ClosureReason::FundingTimedOut);
}
return None;
}
- if self.context.channel_state & ChannelState::PeerDisconnected as u32 != 0 {
+ if self.context.channel_state.is_peer_disconnected() {
log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
return None;
}
node_signature: our_node_sig,
bitcoin_signature: our_bitcoin_sig,
})
- }
+ },
+ // TODO (taproot|arik)
+ #[cfg(taproot)]
+ _ => todo!()
}
}
bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
contents: announcement,
})
- }
+ },
+ // TODO (taproot|arik)
+ #[cfg(taproot)]
+ _ => todo!()
}
} else {
Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
/// May panic if called on a channel that wasn't immediately-previously
/// self.remove_uncommitted_htlcs_and_mark_paused()'d
pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
- assert_eq!(self.context.channel_state & ChannelState::PeerDisconnected as u32, ChannelState::PeerDisconnected as u32);
+ assert!(self.context.channel_state.is_peer_disconnected());
assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
// Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
// current to_remote balances. However, it no longer has any use, and thus is now simply
pub fn queue_add_htlc<F: Deref, L: Deref>(
&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
- fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+ blinding_point: Option<PublicKey>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
) -> Result<(), ChannelError>
where F::Target: FeeEstimator, L::Target: Logger
{
self
.send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
- skimmed_fee_msat, fee_estimator, logger)
+ skimmed_fee_msat, blinding_point, fee_estimator, logger)
.map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
.map_err(|err| {
if let ChannelError::Ignore(_) = err { /* fine */ }
fn send_htlc<F: Deref, L: Deref>(
&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
- skimmed_fee_msat: Option<u64>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+ skimmed_fee_msat: Option<u64>, blinding_point: Option<PublicKey>,
+ fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
where F::Target: FeeEstimator, L::Target: Logger
{
- if (self.context.channel_state & (ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelReady as u32) {
+ if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
+ self.context.channel_state.is_local_shutdown_sent() ||
+ self.context.channel_state.is_remote_shutdown_sent()
+ {
return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
}
let channel_total_msat = self.context.channel_value_satoshis * 1000;
available_balances.next_outbound_htlc_limit_msat)));
}
- if (self.context.channel_state & (ChannelState::PeerDisconnected as u32)) != 0 {
+ if self.context.channel_state.is_peer_disconnected() {
// Note that this should never really happen, if we're !is_live() on receipt of an
// incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
// the user to send directly into a !is_live() channel. However, if we
return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
}
- let need_holding_cell = (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0;
+ let need_holding_cell = self.context.channel_state.should_force_holding_cell();
log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
payment_hash, amount_msat,
if force_holding_cell { "into holding cell" }
source,
onion_routing_packet,
skimmed_fee_msat,
+ blinding_point,
});
return Ok(None);
}
cltv_expiry,
state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
source,
+ blinding_point,
skimmed_fee_msat,
});
cltv_expiry,
onion_routing_packet,
skimmed_fee_msat,
+ blinding_point,
};
self.context.next_holder_htlc_id += 1;
to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
}]
};
- self.context.channel_state |= ChannelState::AwaitingRemoteRevoke as u32;
+ self.context.channel_state.set_awaiting_remote_revoke();
monitor_update
}
htlcs.push(htlc);
}
- let res = ecdsa.sign_counterparty_commitment(&commitment_stats.tx, commitment_stats.preimages, &self.context.secp_ctx)
- .map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
+ let res = ecdsa.sign_counterparty_commitment(
+ &commitment_stats.tx,
+ commitment_stats.inbound_htlc_preimages,
+ commitment_stats.outbound_htlc_preimages,
+ &self.context.secp_ctx,
+ ).map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
signature = res.0;
htlc_signatures = res.1;
log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
- log_bytes!(counterparty_keys.broadcaster_htlc_key.serialize()),
+ log_bytes!(counterparty_keys.broadcaster_htlc_key.to_public_key().serialize()),
log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id());
}
}
#[cfg(taproot)]
partial_signature_with_nonce: None,
}, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
- }
+ },
+ // TODO (taproot|arik)
+ #[cfg(taproot)]
+ _ => todo!()
}
}
where F::Target: FeeEstimator, L::Target: Logger
{
let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
- onion_routing_packet, false, skimmed_fee_msat, fee_estimator, logger);
+ onion_routing_packet, false, skimmed_fee_msat, None, fee_estimator, logger);
if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
match send_res? {
Some(_) => {
return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
}
}
- if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0 {
- if (self.context.channel_state & ChannelState::LocalShutdownSent as u32) == ChannelState::LocalShutdownSent as u32 {
- return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
- }
- else if (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) == ChannelState::RemoteShutdownSent as u32 {
- return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
- }
+ if self.context.channel_state.is_local_shutdown_sent() {
+ return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
+ }
+ else if self.context.channel_state.is_remote_shutdown_sent() {
+ return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
}
if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
}
- assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0 {
+ assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
+ if self.context.channel_state.is_peer_disconnected() || self.context.channel_state.is_monitor_update_in_progress() {
return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
}
// If we haven't funded the channel yet, we don't need to bother ensuring the shutdown
// script is set, we just force-close and call it a day.
let mut chan_closed = false;
- if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
+ if self.context.channel_state.is_pre_funded_state() {
chan_closed = true;
}
// From here on out, we may not fail!
self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
- let shutdown_result = if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
+ let shutdown_result = if self.context.channel_state.is_pre_funded_state() {
let shutdown_result = ShutdownResult {
monitor_update: None,
dropped_outbound_htlcs: Vec::new(),
unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
+ channel_id: self.context.channel_id,
+ counterparty_node_id: self.context.counterparty_node_id,
};
- self.context.channel_state = ChannelState::ShutdownComplete as u32;
+ self.context.channel_state = ChannelState::ShutdownComplete;
Some(shutdown_result)
} else {
- self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
+ self.context.channel_state.set_local_shutdown_sent();
None
};
self.context.update_time_counter += 1;
}
}
- let destination_script = match signer_provider.get_destination_script() {
+ let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
Ok(script) => script,
Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
};
channel_id: temporary_channel_id,
temporary_channel_id: Some(temporary_channel_id),
- channel_state: ChannelState::OurInitSent as u32,
+ channel_state: ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT),
announcement_sigs_state: AnnouncementSigsState::NotSent,
secp_ctx,
channel_value_satoshis,
if !self.context.is_outbound() {
panic!("Tried to create outbound funding_created message on an inbound channel!");
}
- if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
+ if !matches!(
+ self.context.channel_state, ChannelState::NegotiatingFunding(flags)
+ if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
+ ) {
panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
}
if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
// Now that we're past error-generating stuff, update our local state:
- self.context.channel_state = ChannelState::FundingCreated as u32;
+ self.context.channel_state = ChannelState::FundingCreated;
self.context.channel_id = funding_txo.to_channel_id();
// If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
where
F::Target: FeeEstimator
{
- if !self.context.is_outbound() || self.context.channel_state != ChannelState::OurInitSent as u32 { return Err(()); }
+ if !self.context.is_outbound() ||
+ !matches!(
+ self.context.channel_state, ChannelState::NegotiatingFunding(flags)
+ if flags == NegotiatingFundingFlags::OUR_INIT_SENT
+ )
+ {
+ return Err(());
+ }
if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
// We've exhausted our options
return Err(());
if !self.context.is_outbound() {
panic!("Tried to open a channel for an inbound channel?");
}
- if self.context.channel_state != ChannelState::OurInitSent as u32 {
+ if self.context.have_received_message() {
panic!("Cannot generate an open_channel after we've moved forward");
}
to_self_delay: self.context.get_holder_selected_contest_delay(),
max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
funding_pubkey: keys.funding_pubkey,
- revocation_basepoint: keys.revocation_basepoint,
+ revocation_basepoint: keys.revocation_basepoint.to_public_key(),
payment_point: keys.payment_point,
- delayed_payment_basepoint: keys.delayed_payment_basepoint,
- htlc_basepoint: keys.htlc_basepoint,
+ delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
+ htlc_basepoint: keys.htlc_basepoint.to_public_key(),
first_per_commitment_point,
channel_flags: if self.context.config.announced_channel {1} else {0},
shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
if !self.context.is_outbound() {
return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
}
- if self.context.channel_state != ChannelState::OurInitSent as u32 {
+ if !matches!(self.context.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) {
return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
}
if msg.dust_limit_satoshis > 21000000 * 100000000 {
let counterparty_pubkeys = ChannelPublicKeys {
funding_pubkey: msg.funding_pubkey,
- revocation_basepoint: msg.revocation_basepoint,
+ revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
payment_point: msg.payment_point,
- delayed_payment_basepoint: msg.delayed_payment_basepoint,
- htlc_basepoint: msg.htlc_basepoint
+ delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
+ htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
};
self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
- self.context.channel_state = ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32;
+ self.context.channel_state = ChannelState::NegotiatingFunding(
+ NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
+ );
self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
Ok(())
F::Target: FeeEstimator,
L::Target: Logger,
{
+ let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.temporary_channel_id));
let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
// First check the channel type is known, failing before we do anything else if we don't
let pubkeys = holder_signer.pubkeys().clone();
let counterparty_pubkeys = ChannelPublicKeys {
funding_pubkey: msg.funding_pubkey,
- revocation_basepoint: msg.revocation_basepoint,
+ revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
payment_point: msg.payment_point,
- delayed_payment_basepoint: msg.delayed_payment_basepoint,
- htlc_basepoint: msg.htlc_basepoint
+ delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
+ htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
};
if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
if msg.htlc_minimum_msat >= full_channel_value_msat {
return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
}
- Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, msg.feerate_per_kw, None, logger)?;
+ Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, msg.feerate_per_kw, None, &&logger)?;
let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
if msg.to_self_delay > max_counterparty_selected_contest_delay {
}
}
- let destination_script = match signer_provider.get_destination_script() {
+ let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
Ok(script) => script,
Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
};
temporary_channel_id: Some(msg.temporary_channel_id),
channel_id: msg.temporary_channel_id,
- channel_state: (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32),
+ channel_state: ChannelState::NegotiatingFunding(
+ NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
+ ),
announcement_sigs_state: AnnouncementSigsState::NotSent,
secp_ctx,
if self.context.is_outbound() {
panic!("Tried to send accept_channel for an outbound channel?");
}
- if self.context.channel_state != (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32) {
+ if !matches!(
+ self.context.channel_state, ChannelState::NegotiatingFunding(flags)
+ if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
+ ) {
panic!("Tried to send accept_channel after channel had moved forward");
}
if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
to_self_delay: self.context.get_holder_selected_contest_delay(),
max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
funding_pubkey: keys.funding_pubkey,
- revocation_basepoint: keys.revocation_basepoint,
+ revocation_basepoint: keys.revocation_basepoint.to_public_key(),
payment_point: keys.payment_point,
- delayed_payment_basepoint: keys.delayed_payment_basepoint,
- htlc_basepoint: keys.htlc_basepoint,
+ delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
+ htlc_basepoint: keys.htlc_basepoint.to_public_key(),
first_per_commitment_point,
shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
Some(script) => script.clone().into_inner(),
pub fn funding_created<L: Deref>(
mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
- ) -> Result<(Channel<SP>, Option<msgs::FundingSigned>, ChannelMonitor<<SP::Target as SignerProvider>::Signer>), (Self, ChannelError)>
+ ) -> Result<(Channel<SP>, Option<msgs::FundingSigned>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (Self, ChannelError)>
where
L::Target: Logger
{
if self.context.is_outbound() {
return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
}
- if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
+ if !matches!(
+ self.context.channel_state, ChannelState::NegotiatingFunding(flags)
+ if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
+ ) {
// BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
// remember the channel, so it's safe to just send an error_message here and drop the
// channel.
// Now that we're past error-generating stuff, update our local state:
- self.context.channel_state = ChannelState::FundingSent as u32;
+ self.context.channel_state = ChannelState::FundingSent(FundingSentFlags::new());
self.context.channel_id = funding_txo.to_channel_id();
self.context.cur_counterparty_commitment_transaction_number -= 1;
self.context.cur_holder_commitment_transaction_number -= 1;
funding_redeemscript.clone(), self.context.channel_value_satoshis,
obscure_factor,
holder_commitment_tx, best_block, self.context.counterparty_node_id);
-
+ let logger_with_chan_monitor = WithChannelMonitor::from(logger, &channel_monitor);
channel_monitor.provide_initial_counterparty_commitment_tx(
counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
self.context.cur_counterparty_commitment_transaction_number + 1,
self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
- counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
+ counterparty_initial_commitment_tx.to_countersignatory_value_sat(), &&logger_with_chan_monitor);
log_info!(logger, "{} funding_signed for peer for channel {}",
if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id());
writer.write_all(&[0; 8])?;
self.context.channel_id.write(writer)?;
- (self.context.channel_state | ChannelState::PeerDisconnected as u32).write(writer)?;
+ {
+ let mut channel_state = self.context.channel_state;
+ if matches!(channel_state, ChannelState::FundingSent(_)|ChannelState::ChannelReady(_)) {
+ channel_state.set_peer_disconnected();
+ }
+ channel_state.to_u32().write(writer)?;
+ }
self.context.channel_value_satoshis.write(writer)?;
self.context.latest_monitor_update_id.write(writer)?;
let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
+ let mut pending_outbound_blinding_points: Vec<Option<PublicKey>> = Vec::new();
(self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
for (idx, htlc) in self.context.pending_outbound_htlcs.iter().enumerate() {
} else if !pending_outbound_skimmed_fees.is_empty() {
pending_outbound_skimmed_fees.push(None);
}
+ pending_outbound_blinding_points.push(htlc.blinding_point);
}
let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
+ let mut holding_cell_blinding_points: Vec<Option<PublicKey>> = Vec::new();
(self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
for (idx, update) in self.context.holding_cell_htlc_updates.iter().enumerate() {
match update {
&HTLCUpdateAwaitingACK::AddHTLC {
ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
- skimmed_fee_msat,
+ blinding_point, skimmed_fee_msat,
} => {
0u8.write(writer)?;
amount_msat.write(writer)?;
}
holding_cell_skimmed_fees.push(Some(skimmed_fee));
} else if !holding_cell_skimmed_fees.is_empty() { holding_cell_skimmed_fees.push(None); }
+
+ holding_cell_blinding_points.push(blinding_point);
},
&HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
1u8.write(writer)?;
(35, pending_outbound_skimmed_fees, optional_vec),
(37, holding_cell_skimmed_fees, optional_vec),
(38, self.context.is_batch_funding, option),
+ (39, pending_outbound_blinding_points, optional_vec),
+ (41, holding_cell_blinding_points, optional_vec),
});
Ok(())
}
let channel_id = Readable::read(reader)?;
- let channel_state = Readable::read(reader)?;
+ let channel_state = ChannelState::from_u32(Readable::read(reader)?).map_err(|_| DecodeError::InvalidValue)?;
let channel_value_satoshis = Readable::read(reader)?;
let latest_monitor_update_id = Readable::read(reader)?;
_ => return Err(DecodeError::InvalidValue),
},
skimmed_fee_msat: None,
+ blinding_point: None,
});
}
source: Readable::read(reader)?,
onion_routing_packet: Readable::read(reader)?,
skimmed_fee_msat: None,
+ blinding_point: None,
},
1 => HTLCUpdateAwaitingACK::ClaimHTLC {
payment_preimage: Readable::read(reader)?,
let mut is_batch_funding: Option<()> = None;
+ let mut pending_outbound_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
+ let mut holding_cell_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
+
read_tlv_fields!(reader, {
(0, announcement_sigs, option),
(1, minimum_depth, option),
(35, pending_outbound_skimmed_fees_opt, optional_vec),
(37, holding_cell_skimmed_fees_opt, optional_vec),
(38, is_batch_funding, option),
+ (39, pending_outbound_blinding_points_opt, optional_vec),
+ (41, holding_cell_blinding_points_opt, optional_vec),
});
let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
// If we've gotten to the funding stage of the channel, populate the signer with its
// required channel parameters.
- let non_shutdown_state = channel_state & (!MULTI_STATE_FLAGS);
- if non_shutdown_state & !STATE_FLAGS >= (ChannelState::FundingCreated as u32) {
+ if channel_state >= ChannelState::FundingCreated {
holder_signer.provide_channel_parameters(&channel_parameters);
}
(channel_keys_id, holder_signer)
// We expect all skimmed fees to be consumed above
if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
}
+ if let Some(blinding_pts) = pending_outbound_blinding_points_opt {
+ let mut iter = blinding_pts.into_iter();
+ for htlc in pending_outbound_htlcs.iter_mut() {
+ htlc.blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
+ }
+ // We expect all blinding points to be consumed above
+ if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
+ }
+ if let Some(blinding_pts) = holding_cell_blinding_points_opt {
+ let mut iter = blinding_pts.into_iter();
+ for htlc in holding_cell_htlc_updates.iter_mut() {
+ if let HTLCUpdateAwaitingACK::AddHTLC { ref mut blinding_point, .. } = htlc {
+ *blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
+ }
+ }
+ // We expect all blinding points to be consumed above
+ if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
+ }
Ok(Channel {
context: ChannelContext {
use bitcoin::blockdata::transaction::{Transaction, TxOut};
use bitcoin::blockdata::opcodes;
use bitcoin::network::constants::Network;
- use crate::ln::PaymentHash;
+ use crate::ln::{PaymentHash, PaymentPreimage};
+ use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint};
use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
use crate::ln::channel::InitFeatures;
- use crate::ln::channel::{ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, commit_tx_fee_msat};
+ use crate::ln::channel::{FundingSentFlags, Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, HTLCUpdateAwaitingACK, commit_tx_fee_msat};
use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
- use crate::ln::features::ChannelTypeFeatures;
+ use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
+ use crate::ln::msgs;
use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
use crate::ln::script::ShutdownScript;
- use crate::ln::chan_utils;
- use crate::ln::chan_utils::{htlc_success_tx_weight, htlc_timeout_tx_weight};
+ use crate::ln::chan_utils::{self, htlc_success_tx_weight, htlc_timeout_tx_weight};
use crate::chain::BestBlock;
use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
use crate::chain::transaction::OutPoint;
- use crate::routing::router::Path;
+ use crate::routing::router::{Path, RouteHop};
use crate::util::config::UserConfig;
use crate::util::errors::APIError;
+ use crate::util::ser::{ReadableArgs, Writeable};
use crate::util::test_utils;
use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface};
use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
}
impl SignerProvider for Keys {
- type Signer = InMemorySigner;
+ type EcdsaSigner = InMemorySigner;
+ #[cfg(taproot)]
+ type TaprootSigner = InMemorySigner;
fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
self.signer.channel_keys_id()
}
- fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::Signer {
+ fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::EcdsaSigner {
self.signer.clone()
}
- fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::Signer, DecodeError> { panic!(); }
+ fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::EcdsaSigner, DecodeError> { panic!(); }
- fn get_destination_script(&self) -> Result<ScriptBuf, ()> {
+ fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> {
let secp_ctx = Secp256k1::signing_only();
let channel_monitor_claim_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
payment_id: PaymentId([42; 32]),
},
skimmed_fee_msat: None,
+ blinding_point: None,
});
// Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
assert!(!node_a_chan.channel_update(&update).unwrap());
}
+ #[test]
+ fn blinding_point_ser() {
+ // Ensure that channel blinding points are (de)serialized properly.
+ let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
+ let secp_ctx = Secp256k1::new();
+ let seed = [42; 32];
+ let network = Network::Testnet;
+ let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
+
+ let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
+ let config = UserConfig::default();
+ let features = channelmanager::provided_init_features(&config);
+ let outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None).unwrap();
+ let mut chan = Channel { context: outbound_chan.context };
+
+ let dummy_htlc_source = HTLCSource::OutboundRoute {
+ path: Path {
+ hops: vec![RouteHop {
+ pubkey: test_utils::pubkey(2), channel_features: ChannelFeatures::empty(),
+ node_features: NodeFeatures::empty(), short_channel_id: 0, fee_msat: 0,
+ cltv_expiry_delta: 0, maybe_announced_channel: false,
+ }],
+ blinded_tail: None
+ },
+ session_priv: test_utils::privkey(42),
+ first_hop_htlc_msat: 0,
+ payment_id: PaymentId([42; 32]),
+ };
+ let dummy_outbound_output = OutboundHTLCOutput {
+ htlc_id: 0,
+ amount_msat: 0,
+ payment_hash: PaymentHash([43; 32]),
+ cltv_expiry: 0,
+ state: OutboundHTLCState::Committed,
+ source: dummy_htlc_source.clone(),
+ skimmed_fee_msat: None,
+ blinding_point: None,
+ };
+ let mut pending_outbound_htlcs = vec![dummy_outbound_output.clone(); 10];
+ for (idx, htlc) in pending_outbound_htlcs.iter_mut().enumerate() {
+ if idx % 2 == 0 {
+ htlc.blinding_point = Some(test_utils::pubkey(42 + idx as u8));
+ }
+ }
+ chan.context.pending_outbound_htlcs = pending_outbound_htlcs.clone();
+
+ let dummy_holding_cell_add_htlc = HTLCUpdateAwaitingACK::AddHTLC {
+ amount_msat: 0,
+ cltv_expiry: 0,
+ payment_hash: PaymentHash([43; 32]),
+ source: dummy_htlc_source.clone(),
+ onion_routing_packet: msgs::OnionPacket {
+ version: 0,
+ public_key: Ok(test_utils::pubkey(1)),
+ hop_data: [0; 20*65],
+ hmac: [0; 32]
+ },
+ skimmed_fee_msat: None,
+ blinding_point: None,
+ };
+ let dummy_holding_cell_claim_htlc = HTLCUpdateAwaitingACK::ClaimHTLC {
+ payment_preimage: PaymentPreimage([42; 32]),
+ htlc_id: 0,
+ };
+ let mut holding_cell_htlc_updates = Vec::with_capacity(10);
+ for i in 0..10 {
+ if i % 3 == 0 {
+ holding_cell_htlc_updates.push(dummy_holding_cell_add_htlc.clone());
+ } else if i % 3 == 1 {
+ holding_cell_htlc_updates.push(dummy_holding_cell_claim_htlc.clone());
+ } else {
+ let mut dummy_add = dummy_holding_cell_add_htlc.clone();
+ if let HTLCUpdateAwaitingACK::AddHTLC { ref mut blinding_point, .. } = &mut dummy_add {
+ *blinding_point = Some(test_utils::pubkey(42 + i));
+ } else { panic!() }
+ holding_cell_htlc_updates.push(dummy_add);
+ }
+ }
+ chan.context.holding_cell_htlc_updates = holding_cell_htlc_updates.clone();
+
+ // Encode and decode the channel and ensure that the HTLCs within are the same.
+ let encoded_chan = chan.encode();
+ let mut s = crate::io::Cursor::new(&encoded_chan);
+ let mut reader = crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64);
+ let features = channelmanager::provided_channel_type_features(&config);
+ let decoded_chan = Channel::read(&mut reader, (&&keys_provider, &&keys_provider, 0, &features)).unwrap();
+ assert_eq!(decoded_chan.context.pending_outbound_htlcs, pending_outbound_htlcs);
+ assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates);
+ }
+
#[cfg(feature = "_test_vectors")]
#[test]
fn outbound_commitment_test() {
use bitcoin::hashes::hex::FromHex;
use bitcoin::hash_types::Txid;
use bitcoin::secp256k1::Message;
- use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, EcdsaChannelSigner};
+ use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, ecdsa::EcdsaChannelSigner};
use crate::ln::PaymentPreimage;
use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
+ use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint};
use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
use crate::util::logger::Logger;
use crate::sync::Arc;
let counterparty_pubkeys = ChannelPublicKeys {
funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
- revocation_basepoint: PublicKey::from_slice(&<Vec<u8>>::from_hex("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap(),
+ revocation_basepoint: RevocationBasepoint::from(PublicKey::from_slice(&<Vec<u8>>::from_hex("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap()),
payment_point: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"),
- delayed_payment_basepoint: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
- htlc_basepoint: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444")
+ delayed_payment_basepoint: DelayedPaymentBasepoint::from(public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13")),
+ htlc_basepoint: HtlcBasepoint::from(public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"))
};
chan.context.channel_transaction_parameters.counterparty_parameters = Some(
CounterpartyChannelTransactionParameters {
assert_eq!(counterparty_pubkeys.funding_pubkey.serialize()[..],
<Vec<u8>>::from_hex("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
- assert_eq!(counterparty_pubkeys.htlc_basepoint.serialize()[..],
+ assert_eq!(counterparty_pubkeys.htlc_basepoint.to_public_key().serialize()[..],
<Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
// We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
- assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key).is_ok(), "verify counterparty htlc sig");
+ assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key.to_public_key()).is_ok(), "verify counterparty htlc sig");
let mut preimage: Option<PaymentPreimage> = None;
if !htlc.offered {
state: OutboundHTLCState::Committed,
source: HTLCSource::dummy(),
skimmed_fee_msat: None,
+ blinding_point: None,
};
out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).to_byte_array();
out
state: OutboundHTLCState::Committed,
source: HTLCSource::dummy(),
skimmed_fee_msat: None,
+ blinding_point: None,
};
out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).to_byte_array();
out
state: OutboundHTLCState::Committed,
source: HTLCSource::dummy(),
skimmed_fee_msat: None,
+ blinding_point: None,
};
out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
out
state: OutboundHTLCState::Committed,
source: HTLCSource::dummy(),
skimmed_fee_msat: None,
+ blinding_point: None,
};
out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
out
let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
assert_eq!(per_commitment_point.serialize()[..], <Vec<u8>>::from_hex("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
- assert_eq!(chan_utils::derive_public_key(&secp_ctx, &per_commitment_point, &base_point).serialize()[..],
- <Vec<u8>>::from_hex("0235f2dbfaa89b57ec7b055afe29849ef7ddfeb1cefdb9ebdc43f5494984db29e5").unwrap()[..]);
-
assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
SecretKey::from_slice(&<Vec<u8>>::from_hex("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
- assert_eq!(chan_utils::derive_public_revocation_key(&secp_ctx, &per_commitment_point, &base_point).serialize()[..],
+ assert_eq!(RevocationKey::from_basepoint(&secp_ctx, &RevocationBasepoint::from(base_point), &per_commitment_point).to_public_key().serialize()[..],
<Vec<u8>>::from_hex("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
// as the funding transaction depends on all channels in the batch becoming ready.
assert!(node_a_updates.channel_ready.is_none());
assert!(node_a_updates.funding_broadcastable.is_none());
- assert_eq!(
- node_a_chan.context.channel_state,
- ChannelState::FundingSent as u32 |
- ChannelState::WaitingForBatch as u32,
- );
+ assert_eq!(node_a_chan.context.channel_state, ChannelState::FundingSent(FundingSentFlags::WAITING_FOR_BATCH));
// It is possible to receive a 0conf channel_ready from the remote node.
node_a_chan.channel_ready(
).unwrap();
assert_eq!(
node_a_chan.context.channel_state,
- ChannelState::FundingSent as u32 |
- ChannelState::WaitingForBatch as u32 |
- ChannelState::TheirChannelReady as u32,
+ ChannelState::FundingSent(FundingSentFlags::WAITING_FOR_BATCH | FundingSentFlags::THEIR_CHANNEL_READY)
);
// Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
node_a_chan.set_batch_ready();
- assert_eq!(
- node_a_chan.context.channel_state,
- ChannelState::FundingSent as u32 |
- ChannelState::TheirChannelReady as u32,
- );
+ assert_eq!(node_a_chan.context.channel_state, ChannelState::FundingSent(FundingSentFlags::THEIR_CHANNEL_READY));
assert!(node_a_chan.check_get_channel_ready(0).is_some());
}
}