use crate::events::ClosureReason;
use crate::routing::gossip::NodeId;
use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
-use crate::util::logger::{Logger, WithContext};
+use crate::util::logger::{Logger, Record, WithContext};
use crate::util::errors::APIError;
use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
use crate::util::scid_utils::scid_from_parts;
htlc_id: u64,
err_packet: msgs::OnionErrorPacket,
},
+ FailMalformedHTLC {
+ htlc_id: u64,
+ failure_code: u16,
+ sha256_of_onion: [u8; 32],
+ },
+}
+
+macro_rules! define_state_flags {
+ ($flag_type_doc: expr, $flag_type: ident, [$(($flag_doc: expr, $flag: ident, $value: expr, $get: ident, $set: ident, $clear: ident)),+], $extra_flags: expr) => {
+ #[doc = $flag_type_doc]
+ #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
+ struct $flag_type(u32);
+
+ impl $flag_type {
+ $(
+ #[doc = $flag_doc]
+ const $flag: $flag_type = $flag_type($value);
+ )*
+
+ /// All flags that apply to the specified [`ChannelState`] variant.
+ #[allow(unused)]
+ const ALL: $flag_type = Self($(Self::$flag.0 | )* $extra_flags);
+
+ #[allow(unused)]
+ fn new() -> Self { Self(0) }
+
+ #[allow(unused)]
+ fn from_u32(flags: u32) -> Result<Self, ()> {
+ if flags & !Self::ALL.0 != 0 {
+ Err(())
+ } else {
+ Ok($flag_type(flags))
+ }
+ }
+
+ #[allow(unused)]
+ fn is_empty(&self) -> bool { self.0 == 0 }
+ #[allow(unused)]
+ fn is_set(&self, flag: Self) -> bool { *self & flag == flag }
+ #[allow(unused)]
+ fn set(&mut self, flag: Self) { *self |= flag }
+ #[allow(unused)]
+ fn clear(&mut self, flag: Self) -> Self { self.0 &= !flag.0; *self }
+ }
+
+ $(
+ define_state_flags!($flag_type, Self::$flag, $get, $set, $clear);
+ )*
+
+ impl core::ops::BitOr for $flag_type {
+ type Output = Self;
+ fn bitor(self, rhs: Self) -> Self::Output { Self(self.0 | rhs.0) }
+ }
+ impl core::ops::BitOrAssign for $flag_type {
+ fn bitor_assign(&mut self, rhs: Self) { self.0 |= rhs.0; }
+ }
+ impl core::ops::BitAnd for $flag_type {
+ type Output = Self;
+ fn bitand(self, rhs: Self) -> Self::Output { Self(self.0 & rhs.0) }
+ }
+ impl core::ops::BitAndAssign for $flag_type {
+ fn bitand_assign(&mut self, rhs: Self) { self.0 &= rhs.0; }
+ }
+ };
+ ($flag_type_doc: expr, $flag_type: ident, $flags: tt) => {
+ define_state_flags!($flag_type_doc, $flag_type, $flags, 0);
+ };
+ ($flag_type: ident, $flag: expr, $get: ident, $set: ident, $clear: ident) => {
+ impl $flag_type {
+ #[allow(unused)]
+ fn $get(&self) -> bool { self.is_set($flag_type::new() | $flag) }
+ #[allow(unused)]
+ fn $set(&mut self) { self.set($flag_type::new() | $flag) }
+ #[allow(unused)]
+ fn $clear(&mut self) -> Self { self.clear($flag_type::new() | $flag) }
+ }
+ };
+ ($flag_type_doc: expr, FUNDED_STATE, $flag_type: ident, $flags: tt) => {
+ define_state_flags!($flag_type_doc, $flag_type, $flags, FundedStateFlags::ALL.0);
+
+ define_state_flags!($flag_type, FundedStateFlags::PEER_DISCONNECTED,
+ is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected);
+ define_state_flags!($flag_type, FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS,
+ is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress);
+ define_state_flags!($flag_type, FundedStateFlags::REMOTE_SHUTDOWN_SENT,
+ is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent);
+ define_state_flags!($flag_type, FundedStateFlags::LOCAL_SHUTDOWN_SENT,
+ is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent);
+
+ impl core::ops::BitOr<FundedStateFlags> for $flag_type {
+ type Output = Self;
+ fn bitor(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 | rhs.0) }
+ }
+ impl core::ops::BitOrAssign<FundedStateFlags> for $flag_type {
+ fn bitor_assign(&mut self, rhs: FundedStateFlags) { self.0 |= rhs.0; }
+ }
+ impl core::ops::BitAnd<FundedStateFlags> for $flag_type {
+ type Output = Self;
+ fn bitand(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 & rhs.0) }
+ }
+ impl core::ops::BitAndAssign<FundedStateFlags> for $flag_type {
+ fn bitand_assign(&mut self, rhs: FundedStateFlags) { self.0 &= rhs.0; }
+ }
+ impl PartialEq<FundedStateFlags> for $flag_type {
+ fn eq(&self, other: &FundedStateFlags) -> bool { self.0 == other.0 }
+ }
+ impl From<FundedStateFlags> for $flag_type {
+ fn from(flags: FundedStateFlags) -> Self { Self(flags.0) }
+ }
+ };
+}
+
+/// We declare all the states/flags here together to help determine which bits are still available
+/// to choose.
+mod state_flags {
+ pub const OUR_INIT_SENT: u32 = 1 << 0;
+ pub const THEIR_INIT_SENT: u32 = 1 << 1;
+ pub const FUNDING_NEGOTIATED: u32 = 1 << 2;
+ pub const AWAITING_CHANNEL_READY: u32 = 1 << 3;
+ pub const THEIR_CHANNEL_READY: u32 = 1 << 4;
+ pub const OUR_CHANNEL_READY: u32 = 1 << 5;
+ pub const CHANNEL_READY: u32 = 1 << 6;
+ pub const PEER_DISCONNECTED: u32 = 1 << 7;
+ pub const MONITOR_UPDATE_IN_PROGRESS: u32 = 1 << 8;
+ pub const AWAITING_REMOTE_REVOKE: u32 = 1 << 9;
+ pub const REMOTE_SHUTDOWN_SENT: u32 = 1 << 10;
+ pub const LOCAL_SHUTDOWN_SENT: u32 = 1 << 11;
+ pub const SHUTDOWN_COMPLETE: u32 = 1 << 12;
+ pub const WAITING_FOR_BATCH: u32 = 1 << 13;
}
-/// There are a few "states" and then a number of flags which can be applied:
-/// We first move through init with `OurInitSent` -> `TheirInitSent` -> `FundingCreated` -> `FundingSent`.
-/// `TheirChannelReady` and `OurChannelReady` then get set on `FundingSent`, and when both are set we
-/// move on to `ChannelReady`.
-/// Note that `PeerDisconnected` can be set on both `ChannelReady` and `FundingSent`.
-/// `ChannelReady` can then get all remaining flags set on it, until we finish shutdown, then we
-/// move on to `ShutdownComplete`, at which point most calls into this channel are disallowed.
+define_state_flags!(
+ "Flags that apply to all [`ChannelState`] variants in which the channel is funded.",
+ FundedStateFlags, [
+ ("Indicates the remote side is considered \"disconnected\" and no updates are allowed \
+ until after we've done a `channel_reestablish` dance.", PEER_DISCONNECTED, state_flags::PEER_DISCONNECTED,
+ is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected),
+ ("Indicates the user has told us a `ChannelMonitor` update is pending async persistence \
+ somewhere and we should pause sending any outbound messages until they've managed to \
+ complete it.", MONITOR_UPDATE_IN_PROGRESS, state_flags::MONITOR_UPDATE_IN_PROGRESS,
+ is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress),
+ ("Indicates we received a `shutdown` message from the remote end. If set, they may not add \
+ any new HTLCs to the channel, and we are expected to respond with our own `shutdown` \
+ message when possible.", REMOTE_SHUTDOWN_SENT, state_flags::REMOTE_SHUTDOWN_SENT,
+ is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent),
+ ("Indicates we sent a `shutdown` message. At this point, we may not add any new HTLCs to \
+ the channel.", LOCAL_SHUTDOWN_SENT, state_flags::LOCAL_SHUTDOWN_SENT,
+ is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent)
+ ]
+);
+
+define_state_flags!(
+ "Flags that only apply to [`ChannelState::NegotiatingFunding`].",
+ NegotiatingFundingFlags, [
+ ("Indicates we have (or are prepared to) send our `open_channel`/`accept_channel` message.",
+ OUR_INIT_SENT, state_flags::OUR_INIT_SENT, is_our_init_sent, set_our_init_sent, clear_our_init_sent),
+ ("Indicates we have received their `open_channel`/`accept_channel` message.",
+ THEIR_INIT_SENT, state_flags::THEIR_INIT_SENT, is_their_init_sent, set_their_init_sent, clear_their_init_sent)
+ ]
+);
+
+define_state_flags!(
+ "Flags that only apply to [`ChannelState::AwaitingChannelReady`].",
+ FUNDED_STATE, AwaitingChannelReadyFlags, [
+ ("Indicates they sent us a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
+ `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
+ THEIR_CHANNEL_READY, state_flags::THEIR_CHANNEL_READY,
+ is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready),
+ ("Indicates we sent them a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
+ `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
+ OUR_CHANNEL_READY, state_flags::OUR_CHANNEL_READY,
+ is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready),
+ ("Indicates the channel was funded in a batch and the broadcast of the funding transaction \
+ is being held until all channels in the batch have received `funding_signed` and have \
+ their monitors persisted.", WAITING_FOR_BATCH, state_flags::WAITING_FOR_BATCH,
+ is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch)
+ ]
+);
+
+define_state_flags!(
+ "Flags that only apply to [`ChannelState::ChannelReady`].",
+ FUNDED_STATE, ChannelReadyFlags, [
+ ("Indicates that we have sent a `commitment_signed` but are awaiting the responding \
+ `revoke_and_ack` message. During this period, we can't generate new `commitment_signed` \
+ messages as we'd be unable to determine which HTLCs they included in their `revoke_and_ack` \
+ implicit ACK, so instead we have to hold them away temporarily to be sent later.",
+ AWAITING_REMOTE_REVOKE, state_flags::AWAITING_REMOTE_REVOKE,
+ is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke)
+ ]
+);
+
+// Note that the order of this enum is implicitly defined by where each variant is placed. Take this
+// into account when introducing new states and update `test_channel_state_order` accordingly.
+#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
enum ChannelState {
- /// Implies we have (or are prepared to) send our open_channel/accept_channel message
- OurInitSent = 1 << 0,
- /// Implies we have received their `open_channel`/`accept_channel` message
- TheirInitSent = 1 << 1,
- /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to `FundingSent`.
- /// Note that this is nonsense for an inbound channel as we immediately generate `funding_signed`
- /// upon receipt of `funding_created`, so simply skip this state.
- FundingCreated = 4,
- /// Set when we have received/sent `funding_created` and `funding_signed` and are thus now waiting
- /// on the funding transaction to confirm. The `ChannelReady` flags are set to indicate when we
- /// and our counterparty consider the funding transaction confirmed.
- FundingSent = 8,
- /// Flag which can be set on `FundingSent` to indicate they sent us a `channel_ready` message.
- /// Once both `TheirChannelReady` and `OurChannelReady` are set, state moves on to `ChannelReady`.
- TheirChannelReady = 1 << 4,
- /// Flag which can be set on `FundingSent` to indicate we sent them a `channel_ready` message.
- /// Once both `TheirChannelReady` and `OurChannelReady` are set, state moves on to `ChannelReady`.
- OurChannelReady = 1 << 5,
- ChannelReady = 64,
- /// Flag which is set on `ChannelReady` and `FundingSent` indicating remote side is considered
- /// "disconnected" and no updates are allowed until after we've done a `channel_reestablish`
- /// dance.
- PeerDisconnected = 1 << 7,
- /// Flag which is set on `ChannelReady`, FundingCreated, and `FundingSent` indicating the user has
- /// told us a `ChannelMonitor` update is pending async persistence somewhere and we should pause
- /// sending any outbound messages until they've managed to finish.
- MonitorUpdateInProgress = 1 << 8,
- /// Flag which implies that we have sent a commitment_signed but are awaiting the responding
- /// revoke_and_ack message. During this time period, we can't generate new commitment_signed
- /// messages as then we will be unable to determine which HTLCs they included in their
- /// revoke_and_ack implicit ACK, so instead we have to hold them away temporarily to be sent
- /// later.
- /// Flag is set on `ChannelReady`.
- AwaitingRemoteRevoke = 1 << 9,
- /// Flag which is set on `ChannelReady` or `FundingSent` after receiving a shutdown message from
- /// the remote end. If set, they may not add any new HTLCs to the channel, and we are expected
- /// to respond with our own shutdown message when possible.
- RemoteShutdownSent = 1 << 10,
- /// Flag which is set on `ChannelReady` or `FundingSent` after sending a shutdown message. At this
- /// point, we may not add any new HTLCs to the channel.
- LocalShutdownSent = 1 << 11,
- /// We've successfully negotiated a closing_signed dance. At this point ChannelManager is about
- /// to drop us, but we store this anyway.
- ShutdownComplete = 4096,
- /// Flag which is set on `FundingSent` to indicate this channel is funded in a batch and the
- /// broadcasting of the funding transaction is being held until all channels in the batch
- /// have received funding_signed and have their monitors persisted.
- WaitingForBatch = 1 << 13,
+ /// We are negotiating the parameters required for the channel prior to funding it.
+ NegotiatingFunding(NegotiatingFundingFlags),
+ /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to
+ /// `AwaitingChannelReady`. Note that this is nonsense for an inbound channel as we immediately generate
+ /// `funding_signed` upon receipt of `funding_created`, so simply skip this state.
+ FundingNegotiated,
+ /// We've received/sent `funding_created` and `funding_signed` and are thus now waiting on the
+ /// funding transaction to confirm.
+ AwaitingChannelReady(AwaitingChannelReadyFlags),
+ /// Both we and our counterparty consider the funding transaction confirmed and the channel is
+ /// now operational.
+ ChannelReady(ChannelReadyFlags),
+ /// We've successfully negotiated a `closing_signed` dance. At this point, the `ChannelManager`
+ /// is about to drop us, but we store this anyway.
+ ShutdownComplete,
+}
+
+macro_rules! impl_state_flag {
+ ($get: ident, $set: ident, $clear: ident, [$($state: ident),+]) => {
+ #[allow(unused)]
+ fn $get(&self) -> bool {
+ match self {
+ $(
+ ChannelState::$state(flags) => flags.$get(),
+ )*
+ _ => false,
+ }
+ }
+ #[allow(unused)]
+ fn $set(&mut self) {
+ match self {
+ $(
+ ChannelState::$state(flags) => flags.$set(),
+ )*
+ _ => debug_assert!(false, "Attempted to set flag on unexpected ChannelState"),
+ }
+ }
+ #[allow(unused)]
+ fn $clear(&mut self) {
+ match self {
+ $(
+ ChannelState::$state(flags) => { let _ = flags.$clear(); },
+ )*
+ _ => debug_assert!(false, "Attempted to clear flag on unexpected ChannelState"),
+ }
+ }
+ };
+ ($get: ident, $set: ident, $clear: ident, FUNDED_STATES) => {
+ impl_state_flag!($get, $set, $clear, [AwaitingChannelReady, ChannelReady]);
+ };
+ ($get: ident, $set: ident, $clear: ident, $state: ident) => {
+ impl_state_flag!($get, $set, $clear, [$state]);
+ };
+}
+
+impl ChannelState {
+ fn from_u32(state: u32) -> Result<Self, ()> {
+ match state {
+ state_flags::FUNDING_NEGOTIATED => Ok(ChannelState::FundingNegotiated),
+ state_flags::SHUTDOWN_COMPLETE => Ok(ChannelState::ShutdownComplete),
+ val => {
+ if val & state_flags::AWAITING_CHANNEL_READY == state_flags::AWAITING_CHANNEL_READY {
+ AwaitingChannelReadyFlags::from_u32(val & !state_flags::AWAITING_CHANNEL_READY)
+ .map(|flags| ChannelState::AwaitingChannelReady(flags))
+ } else if val & state_flags::CHANNEL_READY == state_flags::CHANNEL_READY {
+ ChannelReadyFlags::from_u32(val & !state_flags::CHANNEL_READY)
+ .map(|flags| ChannelState::ChannelReady(flags))
+ } else if let Ok(flags) = NegotiatingFundingFlags::from_u32(val) {
+ Ok(ChannelState::NegotiatingFunding(flags))
+ } else {
+ Err(())
+ }
+ },
+ }
+ }
+
+ fn to_u32(&self) -> u32 {
+ match self {
+ ChannelState::NegotiatingFunding(flags) => flags.0,
+ ChannelState::FundingNegotiated => state_flags::FUNDING_NEGOTIATED,
+ ChannelState::AwaitingChannelReady(flags) => state_flags::AWAITING_CHANNEL_READY | flags.0,
+ ChannelState::ChannelReady(flags) => state_flags::CHANNEL_READY | flags.0,
+ ChannelState::ShutdownComplete => state_flags::SHUTDOWN_COMPLETE,
+ }
+ }
+
+ fn is_pre_funded_state(&self) -> bool {
+ matches!(self, ChannelState::NegotiatingFunding(_)|ChannelState::FundingNegotiated)
+ }
+
+ fn is_both_sides_shutdown(&self) -> bool {
+ self.is_local_shutdown_sent() && self.is_remote_shutdown_sent()
+ }
+
+ fn with_funded_state_flags_mask(&self) -> FundedStateFlags {
+ match self {
+ ChannelState::AwaitingChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
+ ChannelState::ChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
+ _ => FundedStateFlags::new(),
+ }
+ }
+
+ fn can_generate_new_commitment(&self) -> bool {
+ match self {
+ ChannelState::ChannelReady(flags) =>
+ !flags.is_set(ChannelReadyFlags::AWAITING_REMOTE_REVOKE) &&
+ !flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into()) &&
+ !flags.is_set(FundedStateFlags::PEER_DISCONNECTED.into()),
+ _ => {
+ debug_assert!(false, "Can only generate new commitment within ChannelReady");
+ false
+ },
+ }
+ }
+
+ impl_state_flag!(is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected, FUNDED_STATES);
+ impl_state_flag!(is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress, FUNDED_STATES);
+ impl_state_flag!(is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent, FUNDED_STATES);
+ impl_state_flag!(is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent, FUNDED_STATES);
+ impl_state_flag!(is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready, AwaitingChannelReady);
+ impl_state_flag!(is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready, AwaitingChannelReady);
+ impl_state_flag!(is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch, AwaitingChannelReady);
+ impl_state_flag!(is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke, ChannelReady);
}
-const BOTH_SIDES_SHUTDOWN_MASK: u32 =
- ChannelState::LocalShutdownSent as u32 |
- ChannelState::RemoteShutdownSent as u32;
-const MULTI_STATE_FLAGS: u32 =
- BOTH_SIDES_SHUTDOWN_MASK |
- ChannelState::PeerDisconnected as u32 |
- ChannelState::MonitorUpdateInProgress as u32;
-const STATE_FLAGS: u32 =
- MULTI_STATE_FLAGS |
- ChannelState::TheirChannelReady as u32 |
- ChannelState::OurChannelReady as u32 |
- ChannelState::AwaitingRemoteRevoke as u32 |
- ChannelState::WaitingForBatch as u32;
pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
}
}
+pub(super) struct WithChannelContext<'a, L: Deref> where L::Target: Logger {
+ pub logger: &'a L,
+ pub peer_id: Option<PublicKey>,
+ pub channel_id: Option<ChannelId>,
+}
+
+impl<'a, L: Deref> Logger for WithChannelContext<'a, L> where L::Target: Logger {
+ fn log(&self, mut record: Record) {
+ record.peer_id = self.peer_id;
+ record.channel_id = self.channel_id;
+ self.logger.log(record)
+ }
+}
+
+impl<'a, 'b, L: Deref> WithChannelContext<'a, L>
+where L::Target: Logger {
+ pub(super) fn from<S: Deref>(logger: &'a L, context: &'b ChannelContext<S>) -> Self
+ where S::Target: SignerProvider
+ {
+ WithChannelContext {
+ logger,
+ peer_id: Some(context.counterparty_node_id),
+ channel_id: Some(context.channel_id),
+ }
+ }
+}
+
macro_rules! secp_check {
($res: expr, $err: expr) => {
match $res {
total_fee_sat: u64, // the total fee included in the transaction
num_nondust_htlcs: usize, // the number of HTLC outputs (dust HTLCs *non*-included)
htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
- local_balance_msat: u64, // local balance before fees but considering dust limits
- remote_balance_msat: u64, // remote balance before fees but considering dust limits
- preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
+ local_balance_msat: u64, // local balance before fees *not* considering dust limits
+ remote_balance_msat: u64, // remote balance before fees *not* considering dust limits
+ outbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
+ inbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful received HTLCs since last commitment
}
/// Used when calculating whether we or the remote can afford an additional HTLC.
pub(super) struct SignerResumeUpdates {
pub commitment_update: Option<msgs::CommitmentUpdate>,
pub funding_signed: Option<msgs::FundingSigned>,
- pub funding_created: Option<msgs::FundingCreated>,
pub channel_ready: Option<msgs::ChannelReady>,
}
/// The result of a shutdown that should be handled.
#[must_use]
pub(crate) struct ShutdownResult {
+ pub(crate) closure_reason: ClosureReason,
/// A channel monitor update to apply.
- pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
+ pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelId, ChannelMonitorUpdate)>,
/// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
/// An unbroadcasted batch funding transaction id. The closure of this channel should be
/// propagated to the remainder of the batch.
pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
+ pub(crate) channel_id: ChannelId,
+ pub(crate) user_channel_id: u128,
+ pub(crate) channel_capacity_satoshis: u64,
+ pub(crate) counterparty_node_id: PublicKey,
+ pub(crate) unbroadcasted_funding_tx: Option<Transaction>,
+ pub(crate) channel_funding_txo: Option<OutPoint>,
}
/// If the majority of the channels funds are to the fundee and the initiator holds only just
/// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
/// Will be `None` for channels created prior to 0.0.115.
temporary_channel_id: Option<ChannelId>,
- channel_state: u32,
+ channel_state: ChannelState,
// When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
// our peer. However, we want to make sure they received it, or else rebroadcast it when we
/// Returns true if we've ever received a message from the remote end for this Channel
pub fn have_received_message(&self) -> bool {
- self.channel_state & !STATE_FLAGS > (ChannelState::OurInitSent as u32)
+ self.channel_state > ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT)
}
/// Returns true if this channel is fully established and not known to be closing.
/// Allowed in any state (including after shutdown)
pub fn is_usable(&self) -> bool {
- let mask = ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK;
- (self.channel_state & mask) == (ChannelState::ChannelReady as u32) && !self.monitor_pending_channel_ready
+ matches!(self.channel_state, ChannelState::ChannelReady(_)) &&
+ !self.channel_state.is_local_shutdown_sent() &&
+ !self.channel_state.is_remote_shutdown_sent() &&
+ !self.monitor_pending_channel_ready
}
/// shutdown state returns the state of the channel in its various stages of shutdown
pub fn shutdown_state(&self) -> ChannelShutdownState {
- if self.channel_state & (ChannelState::ShutdownComplete as u32) != 0 {
- return ChannelShutdownState::ShutdownComplete;
- }
- if self.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 && self.channel_state & (ChannelState::RemoteShutdownSent as u32) == 0 {
- return ChannelShutdownState::ShutdownInitiated;
- }
- if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && !self.closing_negotiation_ready() {
- return ChannelShutdownState::ResolvingHTLCs;
- }
- if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && self.closing_negotiation_ready() {
- return ChannelShutdownState::NegotiatingClosingFee;
+ match self.channel_state {
+ ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_) =>
+ if self.channel_state.is_local_shutdown_sent() && !self.channel_state.is_remote_shutdown_sent() {
+ ChannelShutdownState::ShutdownInitiated
+ } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && !self.closing_negotiation_ready() {
+ ChannelShutdownState::ResolvingHTLCs
+ } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && self.closing_negotiation_ready() {
+ ChannelShutdownState::NegotiatingClosingFee
+ } else {
+ ChannelShutdownState::NotShuttingDown
+ },
+ ChannelState::ShutdownComplete => ChannelShutdownState::ShutdownComplete,
+ _ => ChannelShutdownState::NotShuttingDown,
}
- return ChannelShutdownState::NotShuttingDown;
}
fn closing_negotiation_ready(&self) -> bool {
+ let is_ready_to_close = match self.channel_state {
+ ChannelState::AwaitingChannelReady(flags) =>
+ flags & FundedStateFlags::ALL == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
+ ChannelState::ChannelReady(flags) =>
+ flags == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
+ _ => false,
+ };
self.pending_inbound_htlcs.is_empty() &&
- self.pending_outbound_htlcs.is_empty() &&
- self.pending_update_fee.is_none() &&
- self.channel_state &
- (BOTH_SIDES_SHUTDOWN_MASK |
- ChannelState::AwaitingRemoteRevoke as u32 |
- ChannelState::PeerDisconnected as u32 |
- ChannelState::MonitorUpdateInProgress as u32) == BOTH_SIDES_SHUTDOWN_MASK
+ self.pending_outbound_htlcs.is_empty() &&
+ self.pending_update_fee.is_none() &&
+ is_ready_to_close
}
/// Returns true if this channel is currently available for use. This is a superset of
/// is_usable() and considers things like the channel being temporarily disabled.
/// Allowed in any state (including after shutdown)
pub fn is_live(&self) -> bool {
- self.is_usable() && (self.channel_state & (ChannelState::PeerDisconnected as u32) == 0)
+ self.is_usable() && !self.channel_state.is_peer_disconnected()
}
// Public utilities:
/// Returns true if funding_signed was sent/received and the
/// funding transaction has been broadcast if necessary.
pub fn is_funding_broadcast(&self) -> bool {
- self.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 &&
- self.channel_state & ChannelState::WaitingForBatch as u32 == 0
+ !self.channel_state.is_pre_funded_state() &&
+ !matches!(self.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH))
}
/// Transaction nomenclature is somewhat confusing here as there are many different cases - a
}
}
+ let mut inbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
+
for ref htlc in self.pending_inbound_htlcs.iter() {
let (include, state_name) = match htlc.state {
InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
match &htlc.state {
&InboundHTLCState::LocalRemoved(ref reason) => {
if generated_by_local {
- if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
+ if let &InboundHTLCRemovalReason::Fulfill(preimage) = reason {
+ inbound_htlc_preimages.push(preimage);
value_to_self_msat_offset += htlc.amount_msat as i64;
}
}
}
}
- let mut preimages: Vec<PaymentPreimage> = Vec::new();
+
+ let mut outbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
for ref htlc in self.pending_outbound_htlcs.iter() {
let (include, state_name) = match htlc.state {
};
if let Some(preimage) = preimage_opt {
- preimages.push(preimage);
+ outbound_htlc_preimages.push(preimage);
}
if include {
}
}
- let mut value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
+ let value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
assert!(value_to_self_msat >= 0);
// Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
// AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
// "violate" their reserve value by couting those against it. Thus, we have to convert
// everything to i64 before subtracting as otherwise we can overflow.
- let mut value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
+ let value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
assert!(value_to_remote_msat >= 0);
#[cfg(debug_assertions)]
htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
htlcs_included.append(&mut included_dust_htlcs);
- // For the stats, trimmed-to-0 the value in msats accordingly
- value_to_self_msat = if (value_to_self_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_self_msat };
- value_to_remote_msat = if (value_to_remote_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_remote_msat };
-
CommitmentStats {
tx,
feerate_per_kw,
htlcs_included,
local_balance_msat: value_to_self_msat as u64,
remote_balance_msat: value_to_remote_msat as u64,
- preimages
+ inbound_htlc_preimages,
+ outbound_htlc_preimages,
}
}
/// will sign and send to our counterparty.
/// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
fn build_remote_transaction_keys(&self) -> TxCreationKeys {
- //TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we
- //may see payments to it!
let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
let counterparty_pubkeys = self.get_counterparty_pubkeys();
if let Some(feerate) = outbound_feerate_update {
feerate_per_kw = cmp::max(feerate_per_kw, feerate);
}
- cmp::max(2530, feerate_per_kw * 1250 / 1000)
+ let feerate_plus_quarter = feerate_per_kw.checked_mul(1250).map(|v| v / 1000);
+ cmp::max(2530, feerate_plus_quarter.unwrap_or(u32::max_value()))
}
/// Get forwarding information for the counterparty.
res
}
- fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O>
- where F: Fn() -> Option<O> {
- if self.channel_state & ChannelState::FundingCreated as u32 != 0 ||
- self.channel_state & ChannelState::WaitingForBatch as u32 != 0 {
- f()
- } else {
- None
+ fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O> where F: Fn() -> Option<O> {
+ match self.channel_state {
+ ChannelState::FundingNegotiated => f(),
+ ChannelState::AwaitingChannelReady(flags) =>
+ if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) ||
+ flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into())
+ {
+ f()
+ } else {
+ None
+ },
+ _ => None,
}
}
/// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
/// Also returns the list of payment_hashes for channels which we can safely fail backwards
/// immediately (others we will have to allow to time out).
- pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
+ pub fn force_shutdown(&mut self, should_broadcast: bool, closure_reason: ClosureReason) -> ShutdownResult {
// Note that we MUST only generate a monitor update that indicates force-closure - we're
// called during initialization prior to the chain_monitor in the encompassing ChannelManager
// being fully configured in some cases. Thus, its likely any monitor events we generate will
// be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
- assert!(self.channel_state != ChannelState::ShutdownComplete as u32);
+ assert!(!matches!(self.channel_state, ChannelState::ShutdownComplete));
// We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
// return them to fail the payment.
}
}
let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
- // If we haven't yet exchanged funding signatures (ie channel_state < FundingSent),
+ // If we haven't yet exchanged funding signatures (ie channel_state < AwaitingChannelReady),
// returning a channel monitor update here would imply a channel monitor update before
// we even registered the channel monitor to begin with, which is invalid.
// Thus, if we aren't actually at a point where we could conceivably broadcast the
// funding transaction, don't return a funding txo (which prevents providing the
// monitor update to the user, even if we return one).
// See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
- if self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 {
+ if !self.channel_state.is_pre_funded_state() {
self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
- Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
+ Some((self.get_counterparty_node_id(), funding_txo, self.channel_id(), ChannelMonitorUpdate {
update_id: self.latest_monitor_update_id,
+ counterparty_node_id: Some(self.counterparty_node_id),
updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
+ channel_id: Some(self.channel_id()),
}))
} else { None }
} else { None };
let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
+ let unbroadcasted_funding_tx = self.unbroadcasted_funding();
- self.channel_state = ChannelState::ShutdownComplete as u32;
+ self.channel_state = ChannelState::ShutdownComplete;
self.update_time_counter += 1;
ShutdownResult {
+ closure_reason,
monitor_update,
dropped_outbound_htlcs,
unbroadcasted_batch_funding_txid,
+ channel_id: self.channel_id,
+ user_channel_id: self.user_id,
+ channel_capacity_satoshis: self.channel_value_satoshis,
+ counterparty_node_id: self.counterparty_node_id,
+ unbroadcasted_funding_tx,
+ channel_funding_txo: self.get_funding_txo(),
}
}
- /// Only allowed after [`Self::channel_transaction_parameters`] is set.
- fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
- let counterparty_keys = self.build_remote_transaction_keys();
- let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
- let signature = match &self.holder_signer {
- // TODO (taproot|arik): move match into calling method for Taproot
- ChannelSignerType::Ecdsa(ecdsa) => {
- ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.secp_ctx)
- .map(|(sig, _)| sig).ok()?
- },
- // TODO (taproot|arik)
- #[cfg(taproot)]
- _ => todo!()
- };
-
- if self.signer_pending_funding {
- log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
- self.signer_pending_funding = false;
- }
-
- Some(msgs::FundingCreated {
- temporary_channel_id: self.temporary_channel_id.unwrap(),
- funding_txid: self.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid,
- funding_output_index: self.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index,
- signature,
- #[cfg(taproot)]
- partial_signature_with_nonce: None,
- #[cfg(taproot)]
- next_local_nonce: None,
- })
- }
-
/// Only allowed after [`Self::channel_transaction_parameters`] is set.
fn get_funding_signed_msg<L: Deref>(&mut self, logger: &L) -> (CommitmentTransaction, Option<msgs::FundingSigned>) where L::Target: Logger {
let counterparty_keys = self.build_remote_transaction_keys();
match &self.holder_signer {
// TODO (arik): move match into calling method for Taproot
ChannelSignerType::Ecdsa(ecdsa) => {
- let funding_signed = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.secp_ctx)
+ let funding_signed = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.secp_ctx)
.map(|(signature, _)| msgs::FundingSigned {
channel_id: self.channel_id(),
signature,
.ok();
if funding_signed.is_none() {
- log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
- self.signer_pending_funding = true;
+ #[cfg(not(async_signing))] {
+ panic!("Failed to get signature for funding_signed");
+ }
+ #[cfg(async_signing)] {
+ log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
+ self.signer_pending_funding = true;
+ }
} else if self.signer_pending_funding {
log_trace!(logger, "Counterparty commitment signature available for funding_signed message; clearing signer_pending_funding");
self.signer_pending_funding = false;
feerate: u32,
}
+/// Contents of a wire message that fails an HTLC backwards. Useful for [`Channel::fail_htlc`] to
+/// fail with either [`msgs::UpdateFailMalformedHTLC`] or [`msgs::UpdateFailHTLC`] as needed.
+trait FailHTLCContents {
+ type Message: FailHTLCMessageName;
+ fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message;
+ fn to_inbound_htlc_state(self) -> InboundHTLCState;
+ fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK;
+}
+impl FailHTLCContents for msgs::OnionErrorPacket {
+ type Message = msgs::UpdateFailHTLC;
+ fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
+ msgs::UpdateFailHTLC { htlc_id, channel_id, reason: self }
+ }
+ fn to_inbound_htlc_state(self) -> InboundHTLCState {
+ InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(self))
+ }
+ fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
+ HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet: self }
+ }
+}
+impl FailHTLCContents for ([u8; 32], u16) {
+ type Message = msgs::UpdateFailMalformedHTLC;
+ fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
+ msgs::UpdateFailMalformedHTLC {
+ htlc_id,
+ channel_id,
+ sha256_of_onion: self.0,
+ failure_code: self.1
+ }
+ }
+ fn to_inbound_htlc_state(self) -> InboundHTLCState {
+ InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed(self))
+ }
+ fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
+ HTLCUpdateAwaitingACK::FailMalformedHTLC {
+ htlc_id,
+ sha256_of_onion: self.0,
+ failure_code: self.1
+ }
+ }
+}
+
+trait FailHTLCMessageName {
+ fn name() -> &'static str;
+}
+impl FailHTLCMessageName for msgs::UpdateFailHTLC {
+ fn name() -> &'static str {
+ "update_fail_htlc"
+ }
+}
+impl FailHTLCMessageName for msgs::UpdateFailMalformedHTLC {
+ fn name() -> &'static str {
+ "update_fail_malformed_htlc"
+ }
+}
+
impl<SP: Deref> Channel<SP> where
SP::Target: SignerProvider,
<SP::Target as SignerProvider>::EcdsaSigner: WriteableEcdsaChannelSigner
where L::Target: Logger {
// Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
// (see equivalent if condition there).
- assert!(self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0);
+ assert!(!self.context.channel_state.can_generate_new_commitment());
let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
self.context.latest_monitor_update_id = mon_update_id;
// caller thought we could have something claimed (cause we wouldn't have accepted in an
// incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
// either.
- if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+ if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
}
- assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
// ChannelManager may generate duplicate claims/fails due to HTLC update events from
// on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
self.context.latest_monitor_update_id += 1;
let monitor_update = ChannelMonitorUpdate {
update_id: self.context.latest_monitor_update_id,
+ counterparty_node_id: Some(self.context.counterparty_node_id),
updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
payment_preimage: payment_preimage_arg.clone(),
}],
+ channel_id: Some(self.context.channel_id()),
};
- if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
+ if !self.context.channel_state.can_generate_new_commitment() {
// Note that this condition is the same as the assertion in
// `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
// `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
return UpdateFulfillFetch::DuplicateClaim {};
}
},
- &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
+ &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
+ &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
+ {
if htlc_id_arg == htlc_id {
log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
// TODO: We may actually be able to switch to a fulfill here, though its
_ => {}
}
}
- log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state);
+ log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state.to_u32());
self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
});
.map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
}
+ /// Used for failing back with [`msgs::UpdateFailMalformedHTLC`]. For now, this is used when we
+ /// want to fail blinded HTLCs where we are not the intro node.
+ ///
+ /// See [`Self::queue_fail_htlc`] for more info.
+ pub fn queue_fail_malformed_htlc<L: Deref>(
+ &mut self, htlc_id_arg: u64, failure_code: u16, sha256_of_onion: [u8; 32], logger: &L
+ ) -> Result<(), ChannelError> where L::Target: Logger {
+ self.fail_htlc(htlc_id_arg, (sha256_of_onion, failure_code), true, logger)
+ .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
+ }
+
/// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
/// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
/// however, fail more than once as we wait for an upstream failure to be irrevocably committed
/// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
/// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
/// [`ChannelError::Ignore`].
- fn fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, mut force_holding_cell: bool, logger: &L)
- -> Result<Option<msgs::UpdateFailHTLC>, ChannelError> where L::Target: Logger {
- if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+ fn fail_htlc<L: Deref, E: FailHTLCContents + Clone>(
+ &mut self, htlc_id_arg: u64, err_contents: E, mut force_holding_cell: bool,
+ logger: &L
+ ) -> Result<Option<E::Message>, ChannelError> where L::Target: Logger {
+ if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
panic!("Was asked to fail an HTLC when channel was not in an operational state");
}
- assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
// ChannelManager may generate duplicate claims/fails due to HTLC update events from
// on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
return Ok(None);
}
- if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
+ if !self.context.channel_state.can_generate_new_commitment() {
debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
force_holding_cell = true;
}
return Ok(None);
}
},
- &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
+ &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
+ &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
+ {
if htlc_id_arg == htlc_id {
debug_assert!(false, "Tried to fail an HTLC that was already failed");
return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
}
}
log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
- self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC {
- htlc_id: htlc_id_arg,
- err_packet,
- });
+ self.context.holding_cell_htlc_updates.push(err_contents.to_htlc_update_awaiting_ack(htlc_id_arg));
return Ok(None);
}
- log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, &self.context.channel_id());
+ log_trace!(logger, "Failing HTLC ID {} back with {} message in channel {}.", htlc_id_arg,
+ E::Message::name(), &self.context.channel_id());
{
let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
- htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone()));
+ htlc.state = err_contents.clone().to_inbound_htlc_state();
}
- Ok(Some(msgs::UpdateFailHTLC {
- channel_id: self.context.channel_id(),
- htlc_id: htlc_id_arg,
- reason: err_packet
- }))
+ Ok(Some(err_contents.to_message(htlc_id_arg, self.context.channel_id())))
}
// Message handlers:
-
- /// Handles a funding_signed message from the remote end.
- /// If this call is successful, broadcast the funding transaction (and not before!)
- pub fn funding_signed<L: Deref>(
- &mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
- ) -> Result<ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>, ChannelError>
- where
- L::Target: Logger
- {
- if !self.context.is_outbound() {
- return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned()));
- }
- if self.context.channel_state & !(ChannelState::MonitorUpdateInProgress as u32) != ChannelState::FundingCreated as u32 {
- return Err(ChannelError::Close("Received funding_signed in strange state!".to_owned()));
- }
- if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
- self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
- self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
- panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
- }
-
- let funding_script = self.context.get_funding_redeemscript();
-
- let counterparty_keys = self.context.build_remote_transaction_keys();
- let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
- let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
- let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
-
- log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
- &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
-
- let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
- let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
- {
- let trusted_tx = initial_commitment_tx.trust();
- let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
- let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
- // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
- if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
- return Err(ChannelError::Close("Invalid funding_signed signature from peer".to_owned()));
- }
- }
-
- let holder_commitment_tx = HolderCommitmentTransaction::new(
- initial_commitment_tx,
- msg.signature,
- Vec::new(),
- &self.context.get_holder_pubkeys().funding_pubkey,
- self.context.counterparty_funding_pubkey()
- );
-
- self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new())
- .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
-
-
- let funding_redeemscript = self.context.get_funding_redeemscript();
- let funding_txo = self.context.get_funding_txo().unwrap();
- let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
- let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
- let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
- let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
- monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
- let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
- shutdown_script, self.context.get_holder_selected_contest_delay(),
- &self.context.destination_script, (funding_txo, funding_txo_script),
- &self.context.channel_transaction_parameters,
- funding_redeemscript.clone(), self.context.channel_value_satoshis,
- obscure_factor,
- holder_commitment_tx, best_block, self.context.counterparty_node_id);
-
- channel_monitor.provide_initial_counterparty_commitment_tx(
- counterparty_initial_bitcoin_tx.txid, Vec::new(),
- self.context.cur_counterparty_commitment_transaction_number,
- self.context.counterparty_cur_commitment_point.unwrap(),
- counterparty_initial_commitment_tx.feerate_per_kw(),
- counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
- counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
-
- assert_eq!(self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32), 0); // We have no had any monitor(s) yet to fail update!
- if self.context.is_batch_funding() {
- self.context.channel_state = ChannelState::FundingSent as u32 | ChannelState::WaitingForBatch as u32;
- } else {
- self.context.channel_state = ChannelState::FundingSent as u32;
- }
- self.context.cur_holder_commitment_transaction_number -= 1;
- self.context.cur_counterparty_commitment_transaction_number -= 1;
-
- log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
-
- let need_channel_ready = self.check_get_channel_ready(0).is_some();
- self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
- Ok(channel_monitor)
- }
-
/// Updates the state of the channel to indicate that all channels in the batch have received
/// funding_signed and persisted their monitors.
/// The funding transaction is consequently allowed to be broadcast, and the channel can be
/// treated as a non-batch channel going forward.
pub fn set_batch_ready(&mut self) {
self.context.is_batch_funding = None;
- self.context.channel_state &= !(ChannelState::WaitingForBatch as u32);
+ self.context.channel_state.clear_waiting_for_batch();
+ }
+
+ /// Unsets the existing funding information.
+ ///
+ /// This must only be used if the channel has not yet completed funding and has not been used.
+ ///
+ /// Further, the channel must be immediately shut down after this with a call to
+ /// [`ChannelContext::force_shutdown`].
+ pub fn unset_funding_info(&mut self, temporary_channel_id: ChannelId) {
+ debug_assert!(matches!(
+ self.context.channel_state, ChannelState::AwaitingChannelReady(_)
+ ));
+ self.context.channel_transaction_parameters.funding_outpoint = None;
+ self.context.channel_id = temporary_channel_id;
}
/// Handles a channel_ready message from our peer. If we've already sent our channel_ready
NS::Target: NodeSigner,
L::Target: Logger
{
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+ if self.context.channel_state.is_peer_disconnected() {
self.context.workaround_lnd_bug_4006 = Some(msg.clone());
return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
}
}
}
- let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
-
// Our channel_ready shouldn't have been sent if we are waiting for other channels in the
// batch, but we can receive channel_ready messages.
- debug_assert!(
- non_shutdown_state & ChannelState::OurChannelReady as u32 == 0 ||
- non_shutdown_state & ChannelState::WaitingForBatch as u32 == 0
- );
- if non_shutdown_state & !(ChannelState::WaitingForBatch as u32) == ChannelState::FundingSent as u32 {
- self.context.channel_state |= ChannelState::TheirChannelReady as u32;
- } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
- self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
- self.context.update_time_counter += 1;
- } else if self.context.channel_state & (ChannelState::ChannelReady as u32) != 0 ||
- // If we reconnected before sending our `channel_ready` they may still resend theirs:
- (self.context.channel_state & (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) ==
- (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32))
- {
+ let mut check_reconnection = false;
+ match &self.context.channel_state {
+ ChannelState::AwaitingChannelReady(flags) => {
+ let flags = flags.clone().clear(FundedStateFlags::ALL.into());
+ debug_assert!(!flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY) || !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
+ if flags.clone().clear(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY {
+ // If we reconnected before sending our `channel_ready` they may still resend theirs.
+ check_reconnection = true;
+ } else if flags.clone().clear(AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty() {
+ self.context.channel_state.set_their_channel_ready();
+ } else if flags == AwaitingChannelReadyFlags::OUR_CHANNEL_READY {
+ self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
+ self.context.update_time_counter += 1;
+ } else {
+ // We're in `WAITING_FOR_BATCH`, so we should wait until we're ready.
+ debug_assert!(flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
+ }
+ }
+ // If we reconnected before sending our `channel_ready` they may still resend theirs.
+ ChannelState::ChannelReady(_) => check_reconnection = true,
+ _ => return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned())),
+ }
+ if check_reconnection {
// They probably disconnected/reconnected and re-sent the channel_ready, which is
// required, or they're sending a fresh SCID alias.
let expected_point =
return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
}
return Ok(None);
- } else {
- return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned()));
}
self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
FE::Target: FeeEstimator, L::Target: Logger,
{
+ if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
+ return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
+ }
// We can't accept HTLCs sent after we've sent a shutdown.
- let local_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::LocalShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
- if local_sent_shutdown {
+ if self.context.channel_state.is_local_shutdown_sent() {
pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
}
// If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
- let remote_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::RemoteShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
- if remote_sent_shutdown {
+ if self.context.channel_state.is_remote_shutdown_sent() {
return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
}
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+ if self.context.channel_state.is_peer_disconnected() {
return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
}
if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
}
- if self.context.channel_state & ChannelState::LocalShutdownSent as u32 != 0 {
+ if self.context.channel_state.is_local_shutdown_sent() {
if let PendingHTLCStatus::Forward(_) = pending_forward_status {
panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
}
Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
}
- pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
- if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+ pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64, Option<u64>), ChannelError> {
+ if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
}
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+ if self.context.channel_state.is_peer_disconnected() {
return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
}
- self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
+ self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat, htlc.skimmed_fee_msat))
}
pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
- if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+ if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
}
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+ if self.context.channel_state.is_peer_disconnected() {
return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
}
}
pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
- if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+ if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
}
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+ if self.context.channel_state.is_peer_disconnected() {
return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
}
pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
where L::Target: Logger
{
- if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+ if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
}
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+ if self.context.channel_state.is_peer_disconnected() {
return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
}
- if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
+ if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
}
self.context.counterparty_funding_pubkey()
);
- self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.preimages)
+ self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.outbound_htlc_preimages)
.map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
// Update state now that we've passed all the can-fail calls...
self.context.latest_monitor_update_id += 1;
let mut monitor_update = ChannelMonitorUpdate {
update_id: self.context.latest_monitor_update_id,
+ counterparty_node_id: Some(self.context.counterparty_node_id),
updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
commitment_tx: holder_commitment_tx,
htlc_outputs: htlcs_and_sigs,
claimed_htlcs,
nondust_htlc_sources,
- }]
+ }],
+ channel_id: Some(self.context.channel_id()),
};
self.context.cur_holder_commitment_transaction_number -= 1;
// build_commitment_no_status_check() next which will reset this to RAAFirst.
self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
- if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 {
+ if self.context.channel_state.is_monitor_update_in_progress() {
// In case we initially failed monitor updating without requiring a response, we need
// to make sure the RAA gets sent first.
self.context.monitor_pending_revoke_and_ack = true;
- if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
+ if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
// If we were going to send a commitment_signed after the RAA, go ahead and do all
// the corresponding HTLC status updates so that
// get_last_commitment_update_for_send includes the right HTLCs.
return Ok(self.push_ret_blockable_mon_update(monitor_update));
}
- let need_commitment_signed = if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
+ let need_commitment_signed = if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
// If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
// we'll send one right away when we get the revoke_and_ack when we
// free_holding_cell_htlcs().
) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
where F::Target: FeeEstimator, L::Target: Logger
{
- if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 &&
- (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
+ if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.channel_state.can_generate_new_commitment() {
self.free_holding_cell_htlcs(fee_estimator, logger)
} else { (None, Vec::new()) }
}
) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
where F::Target: FeeEstimator, L::Target: Logger
{
- assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0);
+ assert!(!self.context.channel_state.is_monitor_update_in_progress());
if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
let mut monitor_update = ChannelMonitorUpdate {
update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
+ counterparty_node_id: Some(self.context.counterparty_node_id),
updates: Vec::new(),
+ channel_id: Some(self.context.channel_id()),
};
let mut htlc_updates = Vec::new();
// the limit. In case it's less rare than I anticipate, we may want to revisit
// handling this case better and maybe fulfilling some of the HTLCs while attempting
// to rebalance channels.
- match &htlc_update {
+ let fail_htlc_res = match &htlc_update {
&HTLCUpdateAwaitingACK::AddHTLC {
amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
skimmed_fee_msat, blinding_point, ..
}
}
}
+ None
},
&HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
// If an HTLC claim was previously added to the holding cell (via
{ monitor_update } else { unreachable!() };
update_fulfill_count += 1;
monitor_update.updates.append(&mut additional_monitor_update.updates);
+ None
},
&HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
- match self.fail_htlc(htlc_id, err_packet.clone(), false, logger) {
- Ok(update_fail_msg_option) => {
- // If an HTLC failure was previously added to the holding cell (via
- // `queue_fail_htlc`) then generating the fail message itself must
- // not fail - we should never end up in a state where we double-fail
- // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
- // for a full revocation before failing.
- debug_assert!(update_fail_msg_option.is_some());
- update_fail_count += 1;
- },
- Err(e) => {
- if let ChannelError::Ignore(_) = e {}
- else {
- panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
- }
- }
- }
+ Some(self.fail_htlc(htlc_id, err_packet.clone(), false, logger)
+ .map(|fail_msg_opt| fail_msg_opt.map(|_| ())))
},
+ &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
+ Some(self.fail_htlc(htlc_id, (sha256_of_onion, failure_code), false, logger)
+ .map(|fail_msg_opt| fail_msg_opt.map(|_| ())))
+ }
+ };
+ if let Some(res) = fail_htlc_res {
+ match res {
+ Ok(fail_msg_opt) => {
+ // If an HTLC failure was previously added to the holding cell (via
+ // `queue_fail_{malformed_}htlc`) then generating the fail message itself must
+ // not fail - we should never end up in a state where we double-fail
+ // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
+ // for a full revocation before failing.
+ debug_assert!(fail_msg_opt.is_some());
+ update_fail_count += 1;
+ },
+ Err(ChannelError::Ignore(_)) => {},
+ Err(_) => {
+ panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
+ },
+ }
}
}
if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
where F::Target: FeeEstimator, L::Target: Logger,
{
- if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+ if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
}
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+ if self.context.channel_state.is_peer_disconnected() {
return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
}
- if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
+ if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
}
}
}
- if self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 == 0 {
+ if !self.context.channel_state.is_awaiting_remote_revoke() {
// Our counterparty seems to have burned their coins to us (by revoking a state when we
// haven't given them a new commitment transaction to broadcast). We should probably
// take advantage of this by updating our channel monitor, sending them an error, and
self.context.latest_monitor_update_id += 1;
let mut monitor_update = ChannelMonitorUpdate {
update_id: self.context.latest_monitor_update_id,
+ counterparty_node_id: Some(self.context.counterparty_node_id),
updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
idx: self.context.cur_counterparty_commitment_transaction_number + 1,
secret: msg.per_commitment_secret,
}],
+ channel_id: Some(self.context.channel_id()),
};
// Update state now that we've passed all the can-fail calls...
// (note that we may still fail to generate the new commitment_signed message, but that's
// OK, we step the channel here and *then* if the new generation fails we can fail the
// channel based on that, but stepping stuff here should be safe either way.
- self.context.channel_state &= !(ChannelState::AwaitingRemoteRevoke as u32);
+ self.context.channel_state.clear_awaiting_remote_revoke();
self.context.sent_message_awaiting_response = None;
self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
}
}
- if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) == ChannelState::MonitorUpdateInProgress as u32 {
+ if self.context.channel_state.is_monitor_update_in_progress() {
// We can't actually generate a new commitment transaction (incl by freeing holding
// cells) while we can't update the monitor, so we just return what we have.
if require_commitment {
return None;
}
- if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
+ if self.context.channel_state.is_awaiting_remote_revoke() || self.context.channel_state.is_monitor_update_in_progress() {
force_holding_cell = true;
}
/// completed.
/// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
- assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
- if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
- return Err(());
+ assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
+ if self.context.channel_state.is_pre_funded_state() {
+ return Err(())
}
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == (ChannelState::PeerDisconnected as u32) {
+ if self.context.channel_state.is_peer_disconnected() {
// While the below code should be idempotent, it's simpler to just return early, as
// redundant disconnect events can fire, though they should be rare.
return Ok(());
self.context.sent_message_awaiting_response = None;
- self.context.channel_state |= ChannelState::PeerDisconnected as u32;
+ self.context.channel_state.set_peer_disconnected();
log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
Ok(())
}
self.context.monitor_pending_forwards.append(&mut pending_forwards);
self.context.monitor_pending_failures.append(&mut pending_fails);
self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
- self.context.channel_state |= ChannelState::MonitorUpdateInProgress as u32;
+ self.context.channel_state.set_monitor_update_in_progress();
}
/// Indicates that the latest ChannelMonitor update has been committed by the client
L::Target: Logger,
NS::Target: NodeSigner
{
- assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, ChannelState::MonitorUpdateInProgress as u32);
- self.context.channel_state &= !(ChannelState::MonitorUpdateInProgress as u32);
+ assert!(self.context.channel_state.is_monitor_update_in_progress());
+ self.context.channel_state.clear_monitor_update_in_progress();
- // If we're past (or at) the FundingSent stage on an outbound channel, try to
+ // If we're past (or at) the AwaitingChannelReady stage on an outbound channel, try to
// (re-)broadcast the funding transaction as we may have declined to broadcast it when we
// first received the funding_signed.
let mut funding_broadcastable =
- if self.context.is_outbound() && self.context.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 && self.context.channel_state & ChannelState::WaitingForBatch as u32 == 0 {
+ if self.context.is_outbound() &&
+ (matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH)) ||
+ matches!(self.context.channel_state, ChannelState::ChannelReady(_)))
+ {
self.context.funding_transaction.take()
} else { None };
// That said, if the funding transaction is already confirmed (ie we're active with a
// minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
- if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 && self.context.minimum_depth != Some(0) {
+ if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.minimum_depth != Some(0) {
funding_broadcastable = None;
}
let mut finalized_claimed_htlcs = Vec::new();
mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) != 0 {
+ if self.context.channel_state.is_peer_disconnected() {
self.context.monitor_pending_revoke_and_ack = false;
self.context.monitor_pending_commitment_signed = false;
return MonitorRestoreUpdates {
if self.context.is_outbound() {
return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
}
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+ if self.context.channel_state.is_peer_disconnected() {
return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
}
Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
/// Indicates that the signer may have some signatures for us, so we should retry if we're
/// blocked.
- #[allow(unused)]
+ #[cfg(async_signing)]
pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
let commitment_update = if self.context.signer_pending_commitment_update {
self.get_last_commitment_update_for_send(logger).ok()
let channel_ready = if funding_signed.is_some() {
self.check_get_channel_ready(0)
} else { None };
- let funding_created = if self.context.signer_pending_funding && self.context.is_outbound() {
- self.context.get_funding_created_msg(logger)
- } else { None };
- log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed, {} funding_created, and {} channel_ready",
+ log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed and {} channel_ready",
if commitment_update.is_some() { "a" } else { "no" },
if funding_signed.is_some() { "a" } else { "no" },
- if funding_created.is_some() { "a" } else { "no" },
if channel_ready.is_some() { "a" } else { "no" });
SignerResumeUpdates {
commitment_update,
funding_signed,
- funding_created,
channel_ready,
}
}
}
update
} else {
- if !self.context.signer_pending_commitment_update {
- log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
- self.context.signer_pending_commitment_update = true;
+ #[cfg(not(async_signing))] {
+ panic!("Failed to get signature for new commitment state");
+ }
+ #[cfg(async_signing)] {
+ if !self.context.signer_pending_commitment_update {
+ log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
+ self.context.signer_pending_commitment_update = true;
+ }
+ return Err(());
}
- return Err(());
};
Ok(msgs::CommitmentUpdate {
update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
/// Gets the `Shutdown` message we should send our peer on reconnect, if any.
pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
- if self.context.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 {
+ if self.context.channel_state.is_local_shutdown_sent() {
assert!(self.context.shutdown_scriptpubkey.is_some());
Some(msgs::Shutdown {
channel_id: self.context.channel_id,
L::Target: Logger,
NS::Target: NodeSigner
{
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
+ if !self.context.channel_state.is_peer_disconnected() {
// While BOLT 2 doesn't indicate explicitly we should error this channel here, it
// almost certainly indicates we are going to end up out-of-sync in some way, so we
// just close here instead of trying to recover.
// Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
// remaining cases either succeed or ErrorMessage-fail).
- self.context.channel_state &= !(ChannelState::PeerDisconnected as u32);
+ self.context.channel_state.clear_peer_disconnected();
self.context.sent_message_awaiting_response = None;
let shutdown_msg = self.get_outbound_shutdown();
let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger);
- if self.context.channel_state & (ChannelState::FundingSent as u32) == ChannelState::FundingSent as u32 {
+ if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(_)) {
// If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
- if self.context.channel_state & (ChannelState::OurChannelReady as u32) == 0 ||
- self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
+ if !self.context.channel_state.is_our_channel_ready() ||
+ self.context.channel_state.is_monitor_update_in_progress() {
if msg.next_remote_commitment_number != 0 {
return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
}
// Note that if we need to repeat our ChannelReady we'll do that in the next if block.
None
} else if msg.next_remote_commitment_number + 1 == our_commitment_transaction {
- if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
+ if self.context.channel_state.is_monitor_update_in_progress() {
self.context.monitor_pending_revoke_and_ack = true;
None
} else {
// revoke_and_ack, not on sending commitment_signed, so we add one if have
// AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
// the corresponding revoke_and_ack back yet.
- let is_awaiting_remote_revoke = self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 != 0;
+ let is_awaiting_remote_revoke = self.context.channel_state.is_awaiting_remote_revoke();
if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
self.mark_awaiting_response();
}
log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
}
- if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
+ if self.context.channel_state.is_monitor_update_in_progress() {
self.context.monitor_pending_commitment_signed = true;
Ok(ReestablishResponses {
channel_ready, shutdown_msg, announcement_sigs,
&mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
{
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+ if self.context.channel_state.is_peer_disconnected() {
return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
}
- if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
+ if self.context.channel_state.is_pre_funded_state() {
// Spec says we should fail the connection, not the channel, but that's nonsense, there
// are plenty of reasons you may want to fail a channel pre-funding, and spec says you
// can do that via error message without getting a connection fail anyway...
return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
}
}
- assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
+ assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_hex_string())));
// If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
// immediately after the commitment dance, but we can send a Shutdown because we won't send
// any further commitment updates after we set LocalShutdownSent.
- let send_shutdown = (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != ChannelState::LocalShutdownSent as u32;
+ let send_shutdown = !self.context.channel_state.is_local_shutdown_sent();
let update_shutdown_script = match self.context.shutdown_scriptpubkey {
Some(_) => false,
// From here on out, we may not fail!
- self.context.channel_state |= ChannelState::RemoteShutdownSent as u32;
+ self.context.channel_state.set_remote_shutdown_sent();
self.context.update_time_counter += 1;
let monitor_update = if update_shutdown_script {
self.context.latest_monitor_update_id += 1;
let monitor_update = ChannelMonitorUpdate {
update_id: self.context.latest_monitor_update_id,
+ counterparty_node_id: Some(self.context.counterparty_node_id),
updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
scriptpubkey: self.get_closing_scriptpubkey(),
}],
+ channel_id: Some(self.context.channel_id()),
};
self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
self.push_ret_blockable_mon_update(monitor_update)
}
});
- self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
+ self.context.channel_state.set_local_shutdown_sent();
self.context.update_time_counter += 1;
Ok((shutdown, monitor_update, dropped_outbound_htlcs))
-> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
where F::Target: FeeEstimator
{
- if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != BOTH_SIDES_SHUTDOWN_MASK {
+ if !self.context.channel_state.is_both_sides_shutdown() {
return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
}
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+ if self.context.channel_state.is_peer_disconnected() {
return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
}
if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
}
- if self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32 != 0 {
+ if self.context.channel_state.is_monitor_update_in_progress() {
self.context.pending_counterparty_closing_signed = Some(msg.clone());
return Ok((None, None, None));
}
if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
if last_fee == msg.fee_satoshis {
let shutdown_result = ShutdownResult {
+ closure_reason: ClosureReason::CooperativeClosure,
monitor_update: None,
dropped_outbound_htlcs: Vec::new(),
unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
+ channel_id: self.context.channel_id,
+ user_channel_id: self.context.user_id,
+ channel_capacity_satoshis: self.context.channel_value_satoshis,
+ counterparty_node_id: self.context.counterparty_node_id,
+ unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
+ channel_funding_txo: self.context.get_funding_txo(),
};
let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
- self.context.channel_state = ChannelState::ShutdownComplete as u32;
+ self.context.channel_state = ChannelState::ShutdownComplete;
self.context.update_time_counter += 1;
return Ok((None, Some(tx), Some(shutdown_result)));
}
.map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
let shutdown_result = ShutdownResult {
+ closure_reason: ClosureReason::CooperativeClosure,
monitor_update: None,
dropped_outbound_htlcs: Vec::new(),
unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
+ channel_id: self.context.channel_id,
+ user_channel_id: self.context.user_id,
+ channel_capacity_satoshis: self.context.channel_value_satoshis,
+ counterparty_node_id: self.context.counterparty_node_id,
+ unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
+ channel_funding_txo: self.context.get_funding_txo(),
};
- self.context.channel_state = ChannelState::ShutdownComplete as u32;
+ self.context.channel_state = ChannelState::ShutdownComplete;
self.context.update_time_counter += 1;
let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
(Some(tx), Some(shutdown_result))
}
pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
- self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32) != 0 { 1 } else { 0 }
+ self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state.is_awaiting_remote_revoke() { 1 } else { 0 }
}
pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
/// Returns true if this channel has been marked as awaiting a monitor update to move forward.
/// Allowed in any state (including after shutdown)
pub fn is_awaiting_monitor_update(&self) -> bool {
- (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0
+ self.context.channel_state.is_monitor_update_in_progress()
}
/// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
/// advanced state.
pub fn is_awaiting_initial_mon_persist(&self) -> bool {
if !self.is_awaiting_monitor_update() { return false; }
- if self.context.channel_state &
- !(ChannelState::TheirChannelReady as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32 | ChannelState::WaitingForBatch as u32)
- == ChannelState::FundingSent as u32 {
+ if matches!(
+ self.context.channel_state, ChannelState::AwaitingChannelReady(flags)
+ if flags.clone().clear(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY | FundedStateFlags::PEER_DISCONNECTED | FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS | AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty()
+ ) {
// If we're not a 0conf channel, we'll be waiting on a monitor update with only
- // FundingSent set, though our peer could have sent their channel_ready.
+ // AwaitingChannelReady set, though our peer could have sent their channel_ready.
debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
return true;
}
if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
- // If we're a 0-conf channel, we'll move beyond FundingSent immediately even while
+ // If we're a 0-conf channel, we'll move beyond AwaitingChannelReady immediately even while
// waiting for the initial monitor persistence. Thus, we check if our commitment
// transaction numbers have both been iterated only exactly once (for the
// funding_signed), and we're awaiting monitor update.
/// Returns true if our channel_ready has been sent
pub fn is_our_channel_ready(&self) -> bool {
- (self.context.channel_state & ChannelState::OurChannelReady as u32) != 0 || self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32
+ matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY)) ||
+ matches!(self.context.channel_state, ChannelState::ChannelReady(_))
}
/// Returns true if our peer has either initiated or agreed to shut down the channel.
pub fn received_shutdown(&self) -> bool {
- (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) != 0
+ self.context.channel_state.is_remote_shutdown_sent()
}
/// Returns true if we either initiated or agreed to shut down the channel.
pub fn sent_shutdown(&self) -> bool {
- (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != 0
+ self.context.channel_state.is_local_shutdown_sent()
}
/// Returns true if this channel is fully shut down. True here implies that no further actions
/// may/will be taken on this channel, and thus this object should be freed. Any future changes
/// will be handled appropriately by the chain monitor.
pub fn is_shutdown(&self) -> bool {
- if (self.context.channel_state & ChannelState::ShutdownComplete as u32) == ChannelState::ShutdownComplete as u32 {
- assert!(self.context.channel_state == ChannelState::ShutdownComplete as u32);
- true
- } else { false }
+ matches!(self.context.channel_state, ChannelState::ShutdownComplete)
}
pub fn channel_update_status(&self) -> ChannelUpdateStatus {
// Note that we don't include ChannelState::WaitingForBatch as we don't want to send
// channel_ready until the entire batch is ready.
- let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
- let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 {
- self.context.channel_state |= ChannelState::OurChannelReady as u32;
+ let need_commitment_update = if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()).is_empty()) {
+ self.context.channel_state.set_our_channel_ready();
true
- } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) {
- self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
+ } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()) == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY) {
+ self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
self.context.update_time_counter += 1;
true
- } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
+ } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()) == AwaitingChannelReadyFlags::OUR_CHANNEL_READY) {
// We got a reorg but not enough to trigger a force close, just ignore.
false
} else {
- if self.context.funding_tx_confirmation_height != 0 && self.context.channel_state & !STATE_FLAGS < ChannelState::ChannelReady as u32 {
+ if self.context.funding_tx_confirmation_height != 0 &&
+ self.context.channel_state < ChannelState::ChannelReady(ChannelReadyFlags::new())
+ {
// We should never see a funding transaction on-chain until we've received
// funding_signed (if we're an outbound channel), or seen funding_generated (if we're
// an inbound channel - before that we have no known funding TXID). The fuzzer,
// however, may do this and we shouldn't treat it as a bug.
#[cfg(not(fuzzing))]
- panic!("Started confirming a channel in a state pre-FundingSent: {}.\n\
+ panic!("Started confirming a channel in a state pre-AwaitingChannelReady: {}.\n\
Do NOT broadcast a funding transaction manually - let LDK do it for you!",
- self.context.channel_state);
+ self.context.channel_state.to_u32());
}
// We got a reorg but not enough to trigger a force close, just ignore.
false
};
if need_commitment_update {
- if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) == 0 {
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
+ if !self.context.channel_state.is_monitor_update_in_progress() {
+ if !self.context.channel_state.is_peer_disconnected() {
let next_per_commitment_point =
self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
return Some(msgs::ChannelReady {
return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
}
- let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
- if non_shutdown_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 ||
- (non_shutdown_state & ChannelState::OurChannelReady as u32) == ChannelState::OurChannelReady as u32 {
+ if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
+ self.context.channel_state.is_our_channel_ready() {
let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
if self.context.funding_tx_confirmation_height == 0 {
// Note that check_get_channel_ready may reset funding_tx_confirmation_height to
height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
// If funding_tx_confirmed_in is unset, the channel must not be active
- assert!(non_shutdown_state & !STATE_FLAGS <= ChannelState::ChannelReady as u32);
- assert_eq!(non_shutdown_state & ChannelState::OurChannelReady as u32, 0);
+ assert!(self.context.channel_state <= ChannelState::ChannelReady(ChannelReadyFlags::new()));
+ assert!(!self.context.channel_state.is_our_channel_ready());
return Err(ClosureReason::FundingTimedOut);
}
// larger. If we don't know that time has moved forward, we can just set it to the last
// time we saw and it will be ignored.
let best_time = self.context.update_time_counter;
- match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&NodeSigner, &UserConfig)>, logger) {
+ match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&dyn NodeSigner, &UserConfig)>, logger) {
Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
return None;
}
- if self.context.channel_state & ChannelState::PeerDisconnected as u32 != 0 {
+ if self.context.channel_state.is_peer_disconnected() {
log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
return None;
}
/// May panic if called on a channel that wasn't immediately-previously
/// self.remove_uncommitted_htlcs_and_mark_paused()'d
pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
- assert_eq!(self.context.channel_state & ChannelState::PeerDisconnected as u32, ChannelState::PeerDisconnected as u32);
+ assert!(self.context.channel_state.is_peer_disconnected());
assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
// Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
// current to_remote balances. However, it no longer has any use, and thus is now simply
// (which is one further, as they always revoke previous commitment transaction, not
// the one we send) so we have to decrement by 1. Note that if
// cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
- // dropped this channel on disconnect as it hasn't yet reached FundingSent so we can't
+ // dropped this channel on disconnect as it hasn't yet reached AwaitingChannelReady so we can't
// overflow here.
next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
your_last_per_commitment_secret: remote_last_secret,
) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
where F::Target: FeeEstimator, L::Target: Logger
{
- if (self.context.channel_state & (ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelReady as u32) {
+ if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
+ self.context.channel_state.is_local_shutdown_sent() ||
+ self.context.channel_state.is_remote_shutdown_sent()
+ {
return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
}
let channel_total_msat = self.context.channel_value_satoshis * 1000;
available_balances.next_outbound_htlc_limit_msat)));
}
- if (self.context.channel_state & (ChannelState::PeerDisconnected as u32)) != 0 {
+ if self.context.channel_state.is_peer_disconnected() {
// Note that this should never really happen, if we're !is_live() on receipt of an
// incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
// the user to send directly into a !is_live() channel. However, if we
return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
}
- let need_holding_cell = (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0;
+ let need_holding_cell = !self.context.channel_state.can_generate_new_commitment();
log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
payment_hash, amount_msat,
if force_holding_cell { "into holding cell" }
self.context.latest_monitor_update_id += 1;
let monitor_update = ChannelMonitorUpdate {
update_id: self.context.latest_monitor_update_id,
+ counterparty_node_id: Some(self.context.counterparty_node_id),
updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
commitment_txid: counterparty_commitment_txid,
htlc_outputs: htlcs.clone(),
feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
- }]
+ }],
+ channel_id: Some(self.context.channel_id()),
};
- self.context.channel_state |= ChannelState::AwaitingRemoteRevoke as u32;
+ self.context.channel_state.set_awaiting_remote_revoke();
monitor_update
}
htlcs.push(htlc);
}
- let res = ecdsa.sign_counterparty_commitment(&commitment_stats.tx, commitment_stats.preimages, &self.context.secp_ctx)
- .map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
+ let res = ecdsa.sign_counterparty_commitment(
+ &commitment_stats.tx,
+ commitment_stats.inbound_htlc_preimages,
+ commitment_stats.outbound_htlc_preimages,
+ &self.context.secp_ctx,
+ ).map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
signature = res.0;
htlc_signatures = res.1;
/// Begins the shutdown process, getting a message for the remote peer and returning all
/// holding cell HTLCs for payment failure.
- ///
- /// May jump to the channel being fully shutdown (see [`Self::is_shutdown`]) in which case no
- /// [`ChannelMonitorUpdate`] will be returned).
pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
- -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>, Option<ShutdownResult>), APIError>
+ -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
{
for htlc in self.context.pending_outbound_htlcs.iter() {
if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
}
}
- if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0 {
- if (self.context.channel_state & ChannelState::LocalShutdownSent as u32) == ChannelState::LocalShutdownSent as u32 {
- return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
- }
- else if (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) == ChannelState::RemoteShutdownSent as u32 {
- return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
- }
+ if self.context.channel_state.is_local_shutdown_sent() {
+ return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
+ }
+ else if self.context.channel_state.is_remote_shutdown_sent() {
+ return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
}
if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
}
- assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0 {
+ assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
+ if self.context.channel_state.is_peer_disconnected() || self.context.channel_state.is_monitor_update_in_progress() {
return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
}
- // If we haven't funded the channel yet, we don't need to bother ensuring the shutdown
- // script is set, we just force-close and call it a day.
- let mut chan_closed = false;
- if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
- chan_closed = true;
- }
-
let update_shutdown_script = match self.context.shutdown_scriptpubkey {
Some(_) => false,
- None if !chan_closed => {
+ None => {
// use override shutdown script if provided
let shutdown_scriptpubkey = match override_shutdown_script {
Some(script) => script,
self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
true
},
- None => false,
};
// From here on out, we may not fail!
self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
- let shutdown_result = if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
- let shutdown_result = ShutdownResult {
- monitor_update: None,
- dropped_outbound_htlcs: Vec::new(),
- unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
- };
- self.context.channel_state = ChannelState::ShutdownComplete as u32;
- Some(shutdown_result)
- } else {
- self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
- None
- };
+ self.context.channel_state.set_local_shutdown_sent();
self.context.update_time_counter += 1;
let monitor_update = if update_shutdown_script {
self.context.latest_monitor_update_id += 1;
let monitor_update = ChannelMonitorUpdate {
update_id: self.context.latest_monitor_update_id,
+ counterparty_node_id: Some(self.context.counterparty_node_id),
updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
scriptpubkey: self.get_closing_scriptpubkey(),
}],
+ channel_id: Some(self.context.channel_id()),
};
self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
self.push_ret_blockable_mon_update(monitor_update)
debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
"we can't both complete shutdown and return a monitor update");
- Ok((shutdown, monitor_update, dropped_outbound_htlcs, shutdown_result))
+ Ok((shutdown, monitor_update, dropped_outbound_htlcs))
}
pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
channel_id: temporary_channel_id,
temporary_channel_id: Some(temporary_channel_id),
- channel_state: ChannelState::OurInitSent as u32,
+ channel_state: ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT),
announcement_sigs_state: AnnouncementSigsState::NotSent,
secp_ctx,
channel_value_satoshis,
})
}
+ /// Only allowed after [`ChannelContext::channel_transaction_parameters`] is set.
+ fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
+ let counterparty_keys = self.context.build_remote_transaction_keys();
+ let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
+ let signature = match &self.context.holder_signer {
+ // TODO (taproot|arik): move match into calling method for Taproot
+ ChannelSignerType::Ecdsa(ecdsa) => {
+ ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.context.secp_ctx)
+ .map(|(sig, _)| sig).ok()?
+ },
+ // TODO (taproot|arik)
+ #[cfg(taproot)]
+ _ => todo!()
+ };
+
+ if self.context.signer_pending_funding {
+ log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
+ self.context.signer_pending_funding = false;
+ }
+
+ Some(msgs::FundingCreated {
+ temporary_channel_id: self.context.temporary_channel_id.unwrap(),
+ funding_txid: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid,
+ funding_output_index: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index,
+ signature,
+ #[cfg(taproot)]
+ partial_signature_with_nonce: None,
+ #[cfg(taproot)]
+ next_local_nonce: None,
+ })
+ }
+
/// Updates channel state with knowledge of the funding transaction's txid/index, and generates
/// a funding_created message for the remote peer.
/// Panics if called at some time other than immediately after initial handshake, if called twice,
/// Note that channel_id changes during this call!
/// Do NOT broadcast the funding transaction until after a successful funding_signed call!
/// If an Err is returned, it is a ChannelError::Close.
- pub fn get_funding_created<L: Deref>(mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
- -> Result<(Channel<SP>, Option<msgs::FundingCreated>), (Self, ChannelError)> where L::Target: Logger {
+ pub fn get_funding_created<L: Deref>(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
+ -> Result<Option<msgs::FundingCreated>, (Self, ChannelError)> where L::Target: Logger {
if !self.context.is_outbound() {
panic!("Tried to create outbound funding_created message on an inbound channel!");
}
- if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
+ if !matches!(
+ self.context.channel_state, ChannelState::NegotiatingFunding(flags)
+ if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
+ ) {
panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
}
if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
// Now that we're past error-generating stuff, update our local state:
- self.context.channel_state = ChannelState::FundingCreated as u32;
- self.context.channel_id = funding_txo.to_channel_id();
+ self.context.channel_state = ChannelState::FundingNegotiated;
+ self.context.channel_id = ChannelId::v1_from_funding_outpoint(funding_txo);
// If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
// We can skip this if it is a zero-conf channel.
self.context.funding_transaction = Some(funding_transaction);
self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
- let funding_created = self.context.get_funding_created_msg(logger);
+ let funding_created = self.get_funding_created_msg(logger);
if funding_created.is_none() {
- if !self.context.signer_pending_funding {
- log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
- self.context.signer_pending_funding = true;
+ #[cfg(not(async_signing))] {
+ panic!("Failed to get signature for new funding creation");
+ }
+ #[cfg(async_signing)] {
+ if !self.context.signer_pending_funding {
+ log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
+ self.context.signer_pending_funding = true;
+ }
}
}
- let channel = Channel {
- context: self.context,
- };
-
- Ok((channel, funding_created))
+ Ok(funding_created)
}
fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
where
F::Target: FeeEstimator
{
- if !self.context.is_outbound() || self.context.channel_state != ChannelState::OurInitSent as u32 { return Err(()); }
+ if !self.context.is_outbound() ||
+ !matches!(
+ self.context.channel_state, ChannelState::NegotiatingFunding(flags)
+ if flags == NegotiatingFundingFlags::OUR_INIT_SENT
+ )
+ {
+ return Err(());
+ }
if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
// We've exhausted our options
return Err(());
if !self.context.is_outbound() {
panic!("Tried to open a channel for an inbound channel?");
}
- if self.context.channel_state != ChannelState::OurInitSent as u32 {
+ if self.context.have_received_message() {
panic!("Cannot generate an open_channel after we've moved forward");
}
if !self.context.is_outbound() {
return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
}
- if self.context.channel_state != ChannelState::OurInitSent as u32 {
+ if !matches!(self.context.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) {
return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
}
if msg.dust_limit_satoshis > 21000000 * 100000000 {
self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
- self.context.channel_state = ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32;
+ self.context.channel_state = ChannelState::NegotiatingFunding(
+ NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
+ );
self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
Ok(())
}
+
+ /// Handles a funding_signed message from the remote end.
+ /// If this call is successful, broadcast the funding transaction (and not before!)
+ pub fn funding_signed<L: Deref>(
+ mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
+ ) -> Result<(Channel<SP>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (OutboundV1Channel<SP>, ChannelError)>
+ where
+ L::Target: Logger
+ {
+ if !self.context.is_outbound() {
+ return Err((self, ChannelError::Close("Received funding_signed for an inbound channel?".to_owned())));
+ }
+ if !matches!(self.context.channel_state, ChannelState::FundingNegotiated) {
+ return Err((self, ChannelError::Close("Received funding_signed in strange state!".to_owned())));
+ }
+ if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
+ self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
+ self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
+ panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
+ }
+
+ let funding_script = self.context.get_funding_redeemscript();
+
+ let counterparty_keys = self.context.build_remote_transaction_keys();
+ let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
+ let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
+ let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
+
+ log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
+ &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
+
+ let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
+ let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
+ {
+ let trusted_tx = initial_commitment_tx.trust();
+ let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
+ let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
+ // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
+ if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
+ return Err((self, ChannelError::Close("Invalid funding_signed signature from peer".to_owned())));
+ }
+ }
+
+ let holder_commitment_tx = HolderCommitmentTransaction::new(
+ initial_commitment_tx,
+ msg.signature,
+ Vec::new(),
+ &self.context.get_holder_pubkeys().funding_pubkey,
+ self.context.counterparty_funding_pubkey()
+ );
+
+ let validated =
+ self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new());
+ if validated.is_err() {
+ return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
+ }
+
+ let funding_redeemscript = self.context.get_funding_redeemscript();
+ let funding_txo = self.context.get_funding_txo().unwrap();
+ let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
+ let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
+ let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
+ let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
+ monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
+ let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
+ shutdown_script, self.context.get_holder_selected_contest_delay(),
+ &self.context.destination_script, (funding_txo, funding_txo_script),
+ &self.context.channel_transaction_parameters,
+ funding_redeemscript.clone(), self.context.channel_value_satoshis,
+ obscure_factor,
+ holder_commitment_tx, best_block, self.context.counterparty_node_id, self.context.channel_id());
+ channel_monitor.provide_initial_counterparty_commitment_tx(
+ counterparty_initial_bitcoin_tx.txid, Vec::new(),
+ self.context.cur_counterparty_commitment_transaction_number,
+ self.context.counterparty_cur_commitment_point.unwrap(),
+ counterparty_initial_commitment_tx.feerate_per_kw(),
+ counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
+ counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
+
+ assert!(!self.context.channel_state.is_monitor_update_in_progress()); // We have no had any monitor(s) yet to fail update!
+ if self.context.is_batch_funding() {
+ self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH);
+ } else {
+ self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
+ }
+ self.context.cur_holder_commitment_transaction_number -= 1;
+ self.context.cur_counterparty_commitment_transaction_number -= 1;
+
+ log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
+
+ let mut channel = Channel { context: self.context };
+
+ let need_channel_ready = channel.check_get_channel_ready(0).is_some();
+ channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
+ Ok((channel, channel_monitor))
+ }
+
+ /// Indicates that the signer may have some signatures for us, so we should retry if we're
+ /// blocked.
+ #[cfg(async_signing)]
+ pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
+ if self.context.signer_pending_funding && self.context.is_outbound() {
+ log_trace!(logger, "Signer unblocked a funding_created");
+ self.get_funding_created_msg(logger)
+ } else { None }
+ }
}
/// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
pub unfunded_context: UnfundedChannelContext,
}
+/// Fetches the [`ChannelTypeFeatures`] that will be used for a channel built from a given
+/// [`msgs::OpenChannel`].
+pub(super) fn channel_type_from_open_channel(
+ msg: &msgs::OpenChannel, their_features: &InitFeatures,
+ our_supported_features: &ChannelTypeFeatures
+) -> Result<ChannelTypeFeatures, ChannelError> {
+ if let Some(channel_type) = &msg.channel_type {
+ if channel_type.supports_any_optional_bits() {
+ return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
+ }
+
+ // We only support the channel types defined by the `ChannelManager` in
+ // `provided_channel_type_features`. The channel type must always support
+ // `static_remote_key`.
+ if !channel_type.requires_static_remote_key() {
+ return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
+ }
+ // Make sure we support all of the features behind the channel type.
+ if !channel_type.is_subset(our_supported_features) {
+ return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
+ }
+ let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
+ if channel_type.requires_scid_privacy() && announced_channel {
+ return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
+ }
+ Ok(channel_type.clone())
+ } else {
+ let channel_type = ChannelTypeFeatures::from_init(&their_features);
+ if channel_type != ChannelTypeFeatures::only_static_remote_key() {
+ return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
+ }
+ Ok(channel_type)
+ }
+}
+
impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
/// Creates a new channel from a remote sides' request for one.
/// Assumes chain_hash has already been checked and corresponds with what we expect!
// First check the channel type is known, failing before we do anything else if we don't
// support this channel type.
- let channel_type = if let Some(channel_type) = &msg.channel_type {
- if channel_type.supports_any_optional_bits() {
- return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
- }
-
- // We only support the channel types defined by the `ChannelManager` in
- // `provided_channel_type_features`. The channel type must always support
- // `static_remote_key`.
- if !channel_type.requires_static_remote_key() {
- return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
- }
- // Make sure we support all of the features behind the channel type.
- if !channel_type.is_subset(our_supported_features) {
- return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
- }
- if channel_type.requires_scid_privacy() && announced_channel {
- return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
- }
- channel_type.clone()
- } else {
- let channel_type = ChannelTypeFeatures::from_init(&their_features);
- if channel_type != ChannelTypeFeatures::only_static_remote_key() {
- return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
- }
- channel_type
- };
+ let channel_type = channel_type_from_open_channel(msg, their_features, our_supported_features)?;
let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
temporary_channel_id: Some(msg.temporary_channel_id),
channel_id: msg.temporary_channel_id,
- channel_state: (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32),
+ channel_state: ChannelState::NegotiatingFunding(
+ NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
+ ),
announcement_sigs_state: AnnouncementSigsState::NotSent,
secp_ctx,
if self.context.is_outbound() {
panic!("Tried to send accept_channel for an outbound channel?");
}
- if self.context.channel_state != (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32) {
+ if !matches!(
+ self.context.channel_state, ChannelState::NegotiatingFunding(flags)
+ if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
+ ) {
panic!("Tried to send accept_channel after channel had moved forward");
}
if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
if self.context.is_outbound() {
return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
}
- if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
+ if !matches!(
+ self.context.channel_state, ChannelState::NegotiatingFunding(flags)
+ if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
+ ) {
// BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
// remember the channel, so it's safe to just send an error_message here and drop the
// channel.
// Now that we're past error-generating stuff, update our local state:
- self.context.channel_state = ChannelState::FundingSent as u32;
- self.context.channel_id = funding_txo.to_channel_id();
+ self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
+ self.context.channel_id = ChannelId::v1_from_funding_outpoint(funding_txo);
self.context.cur_counterparty_commitment_transaction_number -= 1;
self.context.cur_holder_commitment_transaction_number -= 1;
&self.context.channel_transaction_parameters,
funding_redeemscript.clone(), self.context.channel_value_satoshis,
obscure_factor,
- holder_commitment_tx, best_block, self.context.counterparty_node_id);
-
+ holder_commitment_tx, best_block, self.context.counterparty_node_id, self.context.channel_id());
channel_monitor.provide_initial_counterparty_commitment_tx(
counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
self.context.cur_counterparty_commitment_transaction_number + 1,
writer.write_all(&[0; 8])?;
self.context.channel_id.write(writer)?;
- (self.context.channel_state | ChannelState::PeerDisconnected as u32).write(writer)?;
+ {
+ let mut channel_state = self.context.channel_state;
+ if matches!(channel_state, ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)) {
+ channel_state.set_peer_disconnected();
+ } else {
+ debug_assert!(false, "Pre-funded/shutdown channels should not be written");
+ }
+ channel_state.to_u32().write(writer)?;
+ }
self.context.channel_value_satoshis.write(writer)?;
self.context.latest_monitor_update_id.write(writer)?;
let mut pending_outbound_blinding_points: Vec<Option<PublicKey>> = Vec::new();
(self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
- for (idx, htlc) in self.context.pending_outbound_htlcs.iter().enumerate() {
+ for htlc in self.context.pending_outbound_htlcs.iter() {
htlc.htlc_id.write(writer)?;
htlc.amount_msat.write(writer)?;
htlc.cltv_expiry.write(writer)?;
reason.write(writer)?;
}
}
- if let Some(skimmed_fee) = htlc.skimmed_fee_msat {
- if pending_outbound_skimmed_fees.is_empty() {
- for _ in 0..idx { pending_outbound_skimmed_fees.push(None); }
- }
- pending_outbound_skimmed_fees.push(Some(skimmed_fee));
- } else if !pending_outbound_skimmed_fees.is_empty() {
- pending_outbound_skimmed_fees.push(None);
- }
+ pending_outbound_skimmed_fees.push(htlc.skimmed_fee_msat);
pending_outbound_blinding_points.push(htlc.blinding_point);
}
let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
let mut holding_cell_blinding_points: Vec<Option<PublicKey>> = Vec::new();
+ // Vec of (htlc_id, failure_code, sha256_of_onion)
+ let mut malformed_htlcs: Vec<(u64, u16, [u8; 32])> = Vec::new();
(self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
- for (idx, update) in self.context.holding_cell_htlc_updates.iter().enumerate() {
+ for update in self.context.holding_cell_htlc_updates.iter() {
match update {
&HTLCUpdateAwaitingACK::AddHTLC {
ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
source.write(writer)?;
onion_routing_packet.write(writer)?;
- if let Some(skimmed_fee) = skimmed_fee_msat {
- if holding_cell_skimmed_fees.is_empty() {
- for _ in 0..idx { holding_cell_skimmed_fees.push(None); }
- }
- holding_cell_skimmed_fees.push(Some(skimmed_fee));
- } else if !holding_cell_skimmed_fees.is_empty() { holding_cell_skimmed_fees.push(None); }
-
+ holding_cell_skimmed_fees.push(skimmed_fee_msat);
holding_cell_blinding_points.push(blinding_point);
},
&HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
htlc_id.write(writer)?;
err_packet.write(writer)?;
}
+ &HTLCUpdateAwaitingACK::FailMalformedHTLC {
+ htlc_id, failure_code, sha256_of_onion
+ } => {
+ // We don't want to break downgrading by adding a new variant, so write a dummy
+ // `::FailHTLC` variant and write the real malformed error as an optional TLV.
+ malformed_htlcs.push((htlc_id, failure_code, sha256_of_onion));
+
+ let dummy_err_packet = msgs::OnionErrorPacket { data: Vec::new() };
+ 2u8.write(writer)?;
+ htlc_id.write(writer)?;
+ dummy_err_packet.write(writer)?;
+ }
}
}
(38, self.context.is_batch_funding, option),
(39, pending_outbound_blinding_points, optional_vec),
(41, holding_cell_blinding_points, optional_vec),
+ (43, malformed_htlcs, optional_vec), // Added in 0.0.119
});
Ok(())
}
let channel_id = Readable::read(reader)?;
- let channel_state = Readable::read(reader)?;
+ let channel_state = ChannelState::from_u32(Readable::read(reader)?).map_err(|_| DecodeError::InvalidValue)?;
let channel_value_satoshis = Readable::read(reader)?;
let latest_monitor_update_id = Readable::read(reader)?;
let mut pending_outbound_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
let mut holding_cell_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
+ let mut malformed_htlcs: Option<Vec<(u64, u16, [u8; 32])>> = None;
+
read_tlv_fields!(reader, {
(0, announcement_sigs, option),
(1, minimum_depth, option),
(38, is_batch_funding, option),
(39, pending_outbound_blinding_points_opt, optional_vec),
(41, holding_cell_blinding_points_opt, optional_vec),
+ (43, malformed_htlcs, optional_vec), // Added in 0.0.119
});
let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
// If we've gotten to the funding stage of the channel, populate the signer with its
// required channel parameters.
- let non_shutdown_state = channel_state & (!MULTI_STATE_FLAGS);
- if non_shutdown_state & !STATE_FLAGS >= (ChannelState::FundingCreated as u32) {
+ if channel_state >= ChannelState::FundingNegotiated {
holder_signer.provide_channel_parameters(&channel_parameters);
}
(channel_keys_id, holder_signer)
if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
}
+ if let Some(malformed_htlcs) = malformed_htlcs {
+ for (malformed_htlc_id, failure_code, sha256_of_onion) in malformed_htlcs {
+ let htlc_idx = holding_cell_htlc_updates.iter().position(|htlc| {
+ if let HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet } = htlc {
+ let matches = *htlc_id == malformed_htlc_id;
+ if matches { debug_assert!(err_packet.data.is_empty()) }
+ matches
+ } else { false }
+ }).ok_or(DecodeError::InvalidValue)?;
+ let malformed_htlc = HTLCUpdateAwaitingACK::FailMalformedHTLC {
+ htlc_id: malformed_htlc_id, failure_code, sha256_of_onion
+ };
+ let _ = core::mem::replace(&mut holding_cell_htlc_updates[htlc_idx], malformed_htlc);
+ }
+ }
+
Ok(Channel {
context: ChannelContext {
user_id,
use bitcoin::blockdata::transaction::{Transaction, TxOut};
use bitcoin::blockdata::opcodes;
use bitcoin::network::constants::Network;
+ use crate::ln::onion_utils::INVALID_ONION_BLINDING;
use crate::ln::{PaymentHash, PaymentPreimage};
use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint};
use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
use crate::ln::channel::InitFeatures;
- use crate::ln::channel::{Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, HTLCUpdateAwaitingACK, commit_tx_fee_msat};
+ use crate::ln::channel::{AwaitingChannelReadyFlags, Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, HTLCUpdateAwaitingACK, commit_tx_fee_msat};
use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
use crate::ln::msgs;
use bitcoin::address::{WitnessProgram, WitnessVersion};
use crate::prelude::*;
+ #[test]
+ fn test_channel_state_order() {
+ use crate::ln::channel::NegotiatingFundingFlags;
+ use crate::ln::channel::AwaitingChannelReadyFlags;
+ use crate::ln::channel::ChannelReadyFlags;
+
+ assert!(ChannelState::NegotiatingFunding(NegotiatingFundingFlags::new()) < ChannelState::FundingNegotiated);
+ assert!(ChannelState::FundingNegotiated < ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()));
+ assert!(ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()) < ChannelState::ChannelReady(ChannelReadyFlags::new()));
+ assert!(ChannelState::ChannelReady(ChannelReadyFlags::new()) < ChannelState::ShutdownComplete);
+ }
+
struct TestFeeEstimator {
fee_est: u32
}
value: 10000000, script_pubkey: output_script.clone(),
}]};
let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
- let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
+ let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
// Node B --> Node A: funding signed
- let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
+ let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
+ let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
// Put some inbound and outbound HTLCs in A's channel.
let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
value: 10000000, script_pubkey: output_script.clone(),
}]};
let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
- let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
+ let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
// Node B --> Node A: funding signed
- let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
+ let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
+ let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
// Now disconnect the two nodes and check that the commitment point in
// Node B's channel_reestablish message is sane.
value: 10000000, script_pubkey: output_script.clone(),
}]};
let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
- let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
+ let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
// Node B --> Node A: funding signed
- let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
+ let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
+ let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
// Make sure that receiving a channel update will update the Channel as expected.
let update = ChannelUpdate {
}
#[test]
- fn blinding_point_ser() {
- // Ensure that channel blinding points are (de)serialized properly.
+ fn blinding_point_skimmed_fee_malformed_ser() {
+ // Ensure that channel blinding points, skimmed fees, and malformed HTLCs are (de)serialized
+ // properly.
+ let logger = test_utils::TestLogger::new();
let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
let secp_ctx = Secp256k1::new();
let seed = [42; 32];
let network = Network::Testnet;
+ let best_block = BestBlock::from_network(network);
let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
let config = UserConfig::default();
let features = channelmanager::provided_init_features(&config);
- let outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None).unwrap();
- let mut chan = Channel { context: outbound_chan.context };
+ let mut outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(
+ &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None
+ ).unwrap();
+ let inbound_chan = InboundV1Channel::<&TestKeysInterface>::new(
+ &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config),
+ &features, &outbound_chan.get_open_channel(ChainHash::using_genesis_block(network)), 7, &config, 0, &&logger, false
+ ).unwrap();
+ outbound_chan.accept_channel(&inbound_chan.get_accept_channel_message(), &config.channel_handshake_limits, &features).unwrap();
+ let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
+ value: 10000000, script_pubkey: outbound_chan.context.get_funding_redeemscript(),
+ }]};
+ let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
+ let funding_created = outbound_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap().unwrap();
+ let mut chan = match inbound_chan.funding_created(&funding_created, best_block, &&keys_provider, &&logger) {
+ Ok((chan, _, _)) => chan,
+ Err((_, e)) => panic!("{}", e),
+ };
let dummy_htlc_source = HTLCSource::OutboundRoute {
path: Path {
if idx % 2 == 0 {
htlc.blinding_point = Some(test_utils::pubkey(42 + idx as u8));
}
+ if idx % 3 == 0 {
+ htlc.skimmed_fee_msat = Some(1);
+ }
}
chan.context.pending_outbound_htlcs = pending_outbound_htlcs.clone();
payment_preimage: PaymentPreimage([42; 32]),
htlc_id: 0,
};
- let mut holding_cell_htlc_updates = Vec::with_capacity(10);
- for i in 0..10 {
- if i % 3 == 0 {
+ let dummy_holding_cell_failed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailHTLC {
+ htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42] }
+ };
+ let dummy_holding_cell_malformed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailMalformedHTLC {
+ htlc_id, failure_code: INVALID_ONION_BLINDING, sha256_of_onion: [0; 32],
+ };
+ let mut holding_cell_htlc_updates = Vec::with_capacity(12);
+ for i in 0..12 {
+ if i % 5 == 0 {
holding_cell_htlc_updates.push(dummy_holding_cell_add_htlc.clone());
- } else if i % 3 == 1 {
+ } else if i % 5 == 1 {
holding_cell_htlc_updates.push(dummy_holding_cell_claim_htlc.clone());
- } else {
+ } else if i % 5 == 2 {
let mut dummy_add = dummy_holding_cell_add_htlc.clone();
- if let HTLCUpdateAwaitingACK::AddHTLC { ref mut blinding_point, .. } = &mut dummy_add {
+ if let HTLCUpdateAwaitingACK::AddHTLC {
+ ref mut blinding_point, ref mut skimmed_fee_msat, ..
+ } = &mut dummy_add {
*blinding_point = Some(test_utils::pubkey(42 + i));
+ *skimmed_fee_msat = Some(42);
} else { panic!() }
holding_cell_htlc_updates.push(dummy_add);
+ } else if i % 5 == 3 {
+ holding_cell_htlc_updates.push(dummy_holding_cell_malformed_htlc(i as u64));
+ } else {
+ holding_cell_htlc_updates.push(dummy_holding_cell_failed_htlc(i as u64));
}
}
chan.context.holding_cell_htlc_updates = holding_cell_htlc_updates.clone();
assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates);
}
- #[cfg(feature = "_test_vectors")]
+ #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
#[test]
fn outbound_commitment_test() {
use bitcoin::sighash;
// Test vectors from BOLT 3 Appendices C and F (anchors):
let feeest = TestFeeEstimator{fee_est: 15000};
- let logger : Arc<Logger> = Arc::new(test_utils::TestLogger::new());
+ let logger : Arc<dyn Logger> = Arc::new(test_utils::TestLogger::new());
let secp_ctx = Secp256k1::new();
let mut signer = InMemorySigner::new(
},
]};
let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
- let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(
- tx.clone(),
- funding_outpoint,
- true,
- &&logger,
+ let funding_created_msg = node_a_chan.get_funding_created(
+ tx.clone(), funding_outpoint, true, &&logger,
).map_err(|_| ()).unwrap();
let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
&funding_created_msg.unwrap(),
// Receive funding_signed, but the channel will be configured to hold sending channel_ready and
// broadcasting the funding transaction until the batch is ready.
- let _ = node_a_chan.funding_signed(
- &funding_signed_msg.unwrap(),
- best_block,
- &&keys_provider,
- &&logger,
- ).unwrap();
+ let res = node_a_chan.funding_signed(
+ &funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger,
+ );
+ let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
let node_a_updates = node_a_chan.monitor_updating_restored(
&&logger,
&&keys_provider,
// as the funding transaction depends on all channels in the batch becoming ready.
assert!(node_a_updates.channel_ready.is_none());
assert!(node_a_updates.funding_broadcastable.is_none());
- assert_eq!(
- node_a_chan.context.channel_state,
- ChannelState::FundingSent as u32 |
- ChannelState::WaitingForBatch as u32,
- );
+ assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
// It is possible to receive a 0conf channel_ready from the remote node.
node_a_chan.channel_ready(
).unwrap();
assert_eq!(
node_a_chan.context.channel_state,
- ChannelState::FundingSent as u32 |
- ChannelState::WaitingForBatch as u32 |
- ChannelState::TheirChannelReady as u32,
+ ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH | AwaitingChannelReadyFlags::THEIR_CHANNEL_READY)
);
// Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
node_a_chan.set_batch_ready();
- assert_eq!(
- node_a_chan.context.channel_state,
- ChannelState::FundingSent as u32 |
- ChannelState::TheirChannelReady as u32,
- );
+ assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY));
assert!(node_a_chan.check_get_channel_ready(0).is_some());
}
}