X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannel.rs;h=050585ef2673f81a6f9caf8484de05d2fa2bf258;hb=59778dac488cff735004671cdefb3f4ac1f920fd;hp=1b5d6cfa7bfe575a7c7ea75a8bd59193ca97ce08;hpb=6e40e5f18a8b6cde673e1ea80c2a2113d5c80483;p=rust-lightning diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 1b5d6cfa..58d35a1a 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -8,9 +8,10 @@ // licenses. use bitcoin::blockdata::constants::ChainHash; -use bitcoin::blockdata::script::{Script,Builder}; -use bitcoin::blockdata::transaction::{Transaction, EcdsaSighashType}; -use bitcoin::util::sighash; +use bitcoin::blockdata::script::{Script, ScriptBuf, Builder}; +use bitcoin::blockdata::transaction::Transaction; +use bitcoin::sighash; +use bitcoin::sighash::EcdsaSighashType; use bitcoin::consensus::encode; use bitcoin::hashes::Hash; @@ -36,11 +37,12 @@ use crate::chain::BestBlock; use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator}; use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID}; use crate::chain::transaction::{OutPoint, TransactionData}; -use crate::sign::{EcdsaChannelSigner, WriteableEcdsaChannelSigner, EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient}; +use crate::sign::ecdsa::{EcdsaChannelSigner, WriteableEcdsaChannelSigner}; +use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient}; use crate::events::ClosureReason; use crate::routing::gossip::NodeId; -use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer, VecWriter}; -use crate::util::logger::Logger; +use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer}; +use crate::util::logger::{Logger, Record, WithContext}; use crate::util::errors::APIError; use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure}; use crate::util::scid_utils::scid_from_parts; @@ -51,9 +53,10 @@ use core::{cmp,mem,fmt}; use core::ops::Deref; #[cfg(any(test, fuzzing, debug_assertions))] use crate::sync::Mutex; -use bitcoin::hashes::hex::ToHex; use crate::sign::type_resolver::ChannelSignerType; +use super::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint, RevocationBasepoint}; + #[cfg(test)] pub struct ChannelValueStat { pub value_to_self_msat: u64, @@ -100,10 +103,38 @@ enum InboundHTLCRemovalReason { Fulfill(PaymentPreimage), } +/// Represents the resolution status of an inbound HTLC. +#[derive(Clone)] +enum InboundHTLCResolution { + /// Resolved implies the action we must take with the inbound HTLC has already been determined, + /// i.e., we already know whether it must be failed back or forwarded. + // + // TODO: Once this variant is removed, we should also clean up + // [`MonitorRestoreUpdates::accepted_htlcs`] as the path will be unreachable. + Resolved { + pending_htlc_status: PendingHTLCStatus, + }, + /// Pending implies we will attempt to resolve the inbound HTLC once it has been fully committed + /// to by both sides of the channel, i.e., once a `revoke_and_ack` has been processed by both + /// nodes for the state update in which it was proposed. + Pending { + update_add_htlc: msgs::UpdateAddHTLC, + }, +} + +impl_writeable_tlv_based_enum!(InboundHTLCResolution, + (0, Resolved) => { + (0, pending_htlc_status, required), + }, + (2, Pending) => { + (0, update_add_htlc, required), + }; +); + enum InboundHTLCState { /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an /// update_add_htlc message for this HTLC. - RemoteAnnounced(PendingHTLCStatus), + RemoteAnnounced(InboundHTLCResolution), /// Included in a received commitment_signed message (implying we've /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous /// state (see the example below). We have not yet included this HTLC in a @@ -133,13 +164,13 @@ enum InboundHTLCState { /// Implies AwaitingRemoteRevoke. /// /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md - AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus), + AwaitingRemoteRevokeToAnnounce(InboundHTLCResolution), /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it). /// We have also included this HTLC in our latest commitment_signed and are now just waiting /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the /// channel (before it can then get forwarded and/or removed). /// Implies AwaitingRemoteRevoke. - AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus), + AwaitingAnnouncedRemoteRevoke(InboundHTLCResolution), Committed, /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we /// created it we would have put it in the holding cell instead). When they next revoke_and_ack @@ -154,6 +185,72 @@ enum InboundHTLCState { LocalRemoved(InboundHTLCRemovalReason), } +/// Exposes the state of pending inbound HTLCs. +/// +/// At a high level, an HTLC being forwarded from one Lightning node to another Lightning node goes +/// through the following states in the state machine: +/// - Announced for addition by the originating node through the update_add_htlc message. +/// - Added to the commitment transaction of the receiving node and originating node in turn +/// through the exchange of commitment_signed and revoke_and_ack messages. +/// - Announced for resolution (fulfillment or failure) by the receiving node through either one of +/// the update_fulfill_htlc, update_fail_htlc, and update_fail_malformed_htlc messages. +/// - Removed from the commitment transaction of the originating node and receiving node in turn +/// through the exchange of commitment_signed and revoke_and_ack messages. +/// +/// This can be used to inspect what next message an HTLC is waiting for to advance its state. +#[derive(Clone, Debug, PartialEq)] +pub enum InboundHTLCStateDetails { + /// We have added this HTLC in our commitment transaction by receiving commitment_signed and + /// returning revoke_and_ack. We are awaiting the appropriate revoke_and_ack's from the remote + /// before this HTLC is included on the remote commitment transaction. + AwaitingRemoteRevokeToAdd, + /// This HTLC has been included in the commitment_signed and revoke_and_ack messages on both sides + /// and is included in both commitment transactions. + /// + /// This HTLC is now safe to either forward or be claimed as a payment by us. The HTLC will + /// remain in this state until the forwarded upstream HTLC has been resolved and we resolve this + /// HTLC correspondingly, or until we claim it as a payment. If it is part of a multipart + /// payment, it will only be claimed together with other required parts. + Committed, + /// We have received the preimage for this HTLC and it is being removed by fulfilling it with + /// update_fulfill_htlc. This HTLC is still on both commitment transactions, but we are awaiting + /// the appropriate revoke_and_ack's from the remote before this HTLC is removed from the remote + /// commitment transaction after update_fulfill_htlc. + AwaitingRemoteRevokeToRemoveFulfill, + /// The HTLC is being removed by failing it with update_fail_htlc or update_fail_malformed_htlc. + /// This HTLC is still on both commitment transactions, but we are awaiting the appropriate + /// revoke_and_ack's from the remote before this HTLC is removed from the remote commitment + /// transaction. + AwaitingRemoteRevokeToRemoveFail, +} + +impl From<&InboundHTLCState> for Option { + fn from(state: &InboundHTLCState) -> Option { + match state { + InboundHTLCState::RemoteAnnounced(_) => None, + InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => + Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToAdd), + InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => + Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToAdd), + InboundHTLCState::Committed => + Some(InboundHTLCStateDetails::Committed), + InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(_)) => + Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail), + InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed(_)) => + Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail), + InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) => + Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFulfill), + } + } +} + +impl_writeable_tlv_based_enum_upgradable!(InboundHTLCStateDetails, + (0, AwaitingRemoteRevokeToAdd) => {}, + (2, Committed) => {}, + (4, AwaitingRemoteRevokeToRemoveFulfill) => {}, + (6, AwaitingRemoteRevokeToRemoveFail) => {}; +); + struct InboundHTLCOutput { htlc_id: u64, amount_msat: u64, @@ -162,6 +259,54 @@ struct InboundHTLCOutput { state: InboundHTLCState, } +/// Exposes details around pending inbound HTLCs. +#[derive(Clone, Debug, PartialEq)] +pub struct InboundHTLCDetails { + /// The HTLC ID. + /// The IDs are incremented by 1 starting from 0 for each offered HTLC. + /// They are unique per channel and inbound/outbound direction, unless an HTLC was only announced + /// and not part of any commitment transaction. + pub htlc_id: u64, + /// The amount in msat. + pub amount_msat: u64, + /// The block height at which this HTLC expires. + pub cltv_expiry: u32, + /// The payment hash. + pub payment_hash: PaymentHash, + /// The state of the HTLC in the state machine. + /// + /// Determines on which commitment transactions the HTLC is included and what message the HTLC is + /// waiting for to advance to the next state. + /// + /// See [`InboundHTLCStateDetails`] for information on the specific states. + /// + /// LDK will always fill this field in, but when downgrading to prior versions of LDK, new + /// states may result in `None` here. + pub state: Option, + /// Whether the HTLC has an output below the local dust limit. If so, the output will be trimmed + /// from the local commitment transaction and added to the commitment transaction fee. + /// For non-anchor channels, this takes into account the cost of the second-stage HTLC + /// transactions as well. + /// + /// When the local commitment transaction is broadcasted as part of a unilateral closure, + /// the value of this HTLC will therefore not be claimable but instead burned as a transaction + /// fee. + /// + /// Note that dust limits are specific to each party. An HTLC can be dust for the local + /// commitment transaction but not for the counterparty's commitment transaction and vice versa. + pub is_dust: bool, +} + +impl_writeable_tlv_based!(InboundHTLCDetails, { + (0, htlc_id, required), + (2, amount_msat, required), + (4, cltv_expiry, required), + (6, payment_hash, required), + (7, state, upgradable_option), + (8, is_dust, required), +}); + +#[cfg_attr(test, derive(Clone, Debug, PartialEq))] enum OutboundHTLCState { /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we /// created it we would have put it in the holding cell instead). When they next revoke_and_ack @@ -194,7 +339,74 @@ enum OutboundHTLCState { AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome), } +/// Exposes the state of pending outbound HTLCs. +/// +/// At a high level, an HTLC being forwarded from one Lightning node to another Lightning node goes +/// through the following states in the state machine: +/// - Announced for addition by the originating node through the update_add_htlc message. +/// - Added to the commitment transaction of the receiving node and originating node in turn +/// through the exchange of commitment_signed and revoke_and_ack messages. +/// - Announced for resolution (fulfillment or failure) by the receiving node through either one of +/// the update_fulfill_htlc, update_fail_htlc, and update_fail_malformed_htlc messages. +/// - Removed from the commitment transaction of the originating node and receiving node in turn +/// through the exchange of commitment_signed and revoke_and_ack messages. +/// +/// This can be used to inspect what next message an HTLC is waiting for to advance its state. +#[derive(Clone, Debug, PartialEq)] +pub enum OutboundHTLCStateDetails { + /// We are awaiting the appropriate revoke_and_ack's from the remote before the HTLC is added + /// on the remote's commitment transaction after update_add_htlc. + AwaitingRemoteRevokeToAdd, + /// The HTLC has been added to the remote's commitment transaction by sending commitment_signed + /// and receiving revoke_and_ack in return. + /// + /// The HTLC will remain in this state until the remote node resolves the HTLC, or until we + /// unilaterally close the channel due to a timeout with an uncooperative remote node. + Committed, + /// The HTLC has been fulfilled successfully by the remote with a preimage in update_fulfill_htlc, + /// and we removed the HTLC from our commitment transaction by receiving commitment_signed and + /// returning revoke_and_ack. We are awaiting the appropriate revoke_and_ack's from the remote + /// for the removal from its commitment transaction. + AwaitingRemoteRevokeToRemoveSuccess, + /// The HTLC has been failed by the remote with update_fail_htlc or update_fail_malformed_htlc, + /// and we removed the HTLC from our commitment transaction by receiving commitment_signed and + /// returning revoke_and_ack. We are awaiting the appropriate revoke_and_ack's from the remote + /// for the removal from its commitment transaction. + AwaitingRemoteRevokeToRemoveFailure, +} + +impl From<&OutboundHTLCState> for OutboundHTLCStateDetails { + fn from(state: &OutboundHTLCState) -> OutboundHTLCStateDetails { + match state { + OutboundHTLCState::LocalAnnounced(_) => + OutboundHTLCStateDetails::AwaitingRemoteRevokeToAdd, + OutboundHTLCState::Committed => + OutboundHTLCStateDetails::Committed, + // RemoteRemoved states are ignored as the state is transient and the remote has not committed to + // the state yet. + OutboundHTLCState::RemoteRemoved(_) => + OutboundHTLCStateDetails::Committed, + OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) => + OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveSuccess, + OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Failure(_)) => + OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFailure, + OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => + OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveSuccess, + OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Failure(_)) => + OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFailure, + } + } +} + +impl_writeable_tlv_based_enum_upgradable!(OutboundHTLCStateDetails, + (0, AwaitingRemoteRevokeToAdd) => {}, + (2, Committed) => {}, + (4, AwaitingRemoteRevokeToRemoveSuccess) => {}, + (6, AwaitingRemoteRevokeToRemoveFailure) => {}; +); + #[derive(Clone)] +#[cfg_attr(test, derive(Debug, PartialEq))] enum OutboundHTLCOutcome { /// LDK version 0.0.105+ will always fill in the preimage here. Success(Option), @@ -219,6 +431,7 @@ impl<'a> Into> for &'a OutboundHTLCOutcome { } } +#[cfg_attr(test, derive(Clone, Debug, PartialEq))] struct OutboundHTLCOutput { htlc_id: u64, amount_msat: u64, @@ -226,10 +439,64 @@ struct OutboundHTLCOutput { payment_hash: PaymentHash, state: OutboundHTLCState, source: HTLCSource, + blinding_point: Option, skimmed_fee_msat: Option, } +/// Exposes details around pending outbound HTLCs. +#[derive(Clone, Debug, PartialEq)] +pub struct OutboundHTLCDetails { + /// The HTLC ID. + /// The IDs are incremented by 1 starting from 0 for each offered HTLC. + /// They are unique per channel and inbound/outbound direction, unless an HTLC was only announced + /// and not part of any commitment transaction. + /// + /// Not present when we are awaiting a remote revocation and the HTLC is not added yet. + pub htlc_id: Option, + /// The amount in msat. + pub amount_msat: u64, + /// The block height at which this HTLC expires. + pub cltv_expiry: u32, + /// The payment hash. + pub payment_hash: PaymentHash, + /// The state of the HTLC in the state machine. + /// + /// Determines on which commitment transactions the HTLC is included and what message the HTLC is + /// waiting for to advance to the next state. + /// + /// See [`OutboundHTLCStateDetails`] for information on the specific states. + /// + /// LDK will always fill this field in, but when downgrading to prior versions of LDK, new + /// states may result in `None` here. + pub state: Option, + /// The extra fee being skimmed off the top of this HTLC. + pub skimmed_fee_msat: Option, + /// Whether the HTLC has an output below the local dust limit. If so, the output will be trimmed + /// from the local commitment transaction and added to the commitment transaction fee. + /// For non-anchor channels, this takes into account the cost of the second-stage HTLC + /// transactions as well. + /// + /// When the local commitment transaction is broadcasted as part of a unilateral closure, + /// the value of this HTLC will therefore not be claimable but instead burned as a transaction + /// fee. + /// + /// Note that dust limits are specific to each party. An HTLC can be dust for the local + /// commitment transaction but not for the counterparty's commitment transaction and vice versa. + pub is_dust: bool, +} + +impl_writeable_tlv_based!(OutboundHTLCDetails, { + (0, htlc_id, required), + (2, amount_msat, required), + (4, cltv_expiry, required), + (6, payment_hash, required), + (7, state, upgradable_option), + (8, skimmed_fee_msat, required), + (10, is_dust, required), +}); + /// See AwaitingRemoteRevoke ChannelState for more info +#[cfg_attr(test, derive(Clone, Debug, PartialEq))] enum HTLCUpdateAwaitingACK { AddHTLC { // TODO: Time out if we're getting close to cltv_expiry // always outbound @@ -240,6 +507,7 @@ enum HTLCUpdateAwaitingACK { onion_routing_packet: msgs::OnionPacket, // The extra fee we're skimming off the top of this HTLC. skimmed_fee_msat: Option, + blinding_point: Option, }, ClaimHTLC { payment_preimage: PaymentPreimage, @@ -249,78 +517,325 @@ enum HTLCUpdateAwaitingACK { htlc_id: u64, err_packet: msgs::OnionErrorPacket, }, + FailMalformedHTLC { + htlc_id: u64, + failure_code: u16, + sha256_of_onion: [u8; 32], + }, +} + +macro_rules! define_state_flags { + ($flag_type_doc: expr, $flag_type: ident, [$(($flag_doc: expr, $flag: ident, $value: expr, $get: ident, $set: ident, $clear: ident)),+], $extra_flags: expr) => { + #[doc = $flag_type_doc] + #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)] + struct $flag_type(u32); + + impl $flag_type { + $( + #[doc = $flag_doc] + const $flag: $flag_type = $flag_type($value); + )* + + /// All flags that apply to the specified [`ChannelState`] variant. + #[allow(unused)] + const ALL: $flag_type = Self($(Self::$flag.0 | )* $extra_flags); + + #[allow(unused)] + fn new() -> Self { Self(0) } + + #[allow(unused)] + fn from_u32(flags: u32) -> Result { + if flags & !Self::ALL.0 != 0 { + Err(()) + } else { + Ok($flag_type(flags)) + } + } + + #[allow(unused)] + fn is_empty(&self) -> bool { self.0 == 0 } + #[allow(unused)] + fn is_set(&self, flag: Self) -> bool { *self & flag == flag } + #[allow(unused)] + fn set(&mut self, flag: Self) { *self |= flag } + #[allow(unused)] + fn clear(&mut self, flag: Self) -> Self { self.0 &= !flag.0; *self } + } + + $( + define_state_flags!($flag_type, Self::$flag, $get, $set, $clear); + )* + + impl core::ops::BitOr for $flag_type { + type Output = Self; + fn bitor(self, rhs: Self) -> Self::Output { Self(self.0 | rhs.0) } + } + impl core::ops::BitOrAssign for $flag_type { + fn bitor_assign(&mut self, rhs: Self) { self.0 |= rhs.0; } + } + impl core::ops::BitAnd for $flag_type { + type Output = Self; + fn bitand(self, rhs: Self) -> Self::Output { Self(self.0 & rhs.0) } + } + impl core::ops::BitAndAssign for $flag_type { + fn bitand_assign(&mut self, rhs: Self) { self.0 &= rhs.0; } + } + }; + ($flag_type_doc: expr, $flag_type: ident, $flags: tt) => { + define_state_flags!($flag_type_doc, $flag_type, $flags, 0); + }; + ($flag_type: ident, $flag: expr, $get: ident, $set: ident, $clear: ident) => { + impl $flag_type { + #[allow(unused)] + fn $get(&self) -> bool { self.is_set($flag_type::new() | $flag) } + #[allow(unused)] + fn $set(&mut self) { self.set($flag_type::new() | $flag) } + #[allow(unused)] + fn $clear(&mut self) -> Self { self.clear($flag_type::new() | $flag) } + } + }; + ($flag_type_doc: expr, FUNDED_STATE, $flag_type: ident, $flags: tt) => { + define_state_flags!($flag_type_doc, $flag_type, $flags, FundedStateFlags::ALL.0); + + define_state_flags!($flag_type, FundedStateFlags::PEER_DISCONNECTED, + is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected); + define_state_flags!($flag_type, FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS, + is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress); + define_state_flags!($flag_type, FundedStateFlags::REMOTE_SHUTDOWN_SENT, + is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent); + define_state_flags!($flag_type, FundedStateFlags::LOCAL_SHUTDOWN_SENT, + is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent); + + impl core::ops::BitOr for $flag_type { + type Output = Self; + fn bitor(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 | rhs.0) } + } + impl core::ops::BitOrAssign for $flag_type { + fn bitor_assign(&mut self, rhs: FundedStateFlags) { self.0 |= rhs.0; } + } + impl core::ops::BitAnd for $flag_type { + type Output = Self; + fn bitand(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 & rhs.0) } + } + impl core::ops::BitAndAssign for $flag_type { + fn bitand_assign(&mut self, rhs: FundedStateFlags) { self.0 &= rhs.0; } + } + impl PartialEq for $flag_type { + fn eq(&self, other: &FundedStateFlags) -> bool { self.0 == other.0 } + } + impl From for $flag_type { + fn from(flags: FundedStateFlags) -> Self { Self(flags.0) } + } + }; +} + +/// We declare all the states/flags here together to help determine which bits are still available +/// to choose. +mod state_flags { + pub const OUR_INIT_SENT: u32 = 1 << 0; + pub const THEIR_INIT_SENT: u32 = 1 << 1; + pub const FUNDING_NEGOTIATED: u32 = 1 << 2; + pub const AWAITING_CHANNEL_READY: u32 = 1 << 3; + pub const THEIR_CHANNEL_READY: u32 = 1 << 4; + pub const OUR_CHANNEL_READY: u32 = 1 << 5; + pub const CHANNEL_READY: u32 = 1 << 6; + pub const PEER_DISCONNECTED: u32 = 1 << 7; + pub const MONITOR_UPDATE_IN_PROGRESS: u32 = 1 << 8; + pub const AWAITING_REMOTE_REVOKE: u32 = 1 << 9; + pub const REMOTE_SHUTDOWN_SENT: u32 = 1 << 10; + pub const LOCAL_SHUTDOWN_SENT: u32 = 1 << 11; + pub const SHUTDOWN_COMPLETE: u32 = 1 << 12; + pub const WAITING_FOR_BATCH: u32 = 1 << 13; } -/// There are a few "states" and then a number of flags which can be applied: -/// We first move through init with `OurInitSent` -> `TheirInitSent` -> `FundingCreated` -> `FundingSent`. -/// `TheirChannelReady` and `OurChannelReady` then get set on `FundingSent`, and when both are set we -/// move on to `ChannelReady`. -/// Note that `PeerDisconnected` can be set on both `ChannelReady` and `FundingSent`. -/// `ChannelReady` can then get all remaining flags set on it, until we finish shutdown, then we -/// move on to `ShutdownComplete`, at which point most calls into this channel are disallowed. +define_state_flags!( + "Flags that apply to all [`ChannelState`] variants in which the channel is funded.", + FundedStateFlags, [ + ("Indicates the remote side is considered \"disconnected\" and no updates are allowed \ + until after we've done a `channel_reestablish` dance.", PEER_DISCONNECTED, state_flags::PEER_DISCONNECTED, + is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected), + ("Indicates the user has told us a `ChannelMonitor` update is pending async persistence \ + somewhere and we should pause sending any outbound messages until they've managed to \ + complete it.", MONITOR_UPDATE_IN_PROGRESS, state_flags::MONITOR_UPDATE_IN_PROGRESS, + is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress), + ("Indicates we received a `shutdown` message from the remote end. If set, they may not add \ + any new HTLCs to the channel, and we are expected to respond with our own `shutdown` \ + message when possible.", REMOTE_SHUTDOWN_SENT, state_flags::REMOTE_SHUTDOWN_SENT, + is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent), + ("Indicates we sent a `shutdown` message. At this point, we may not add any new HTLCs to \ + the channel.", LOCAL_SHUTDOWN_SENT, state_flags::LOCAL_SHUTDOWN_SENT, + is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent) + ] +); + +define_state_flags!( + "Flags that only apply to [`ChannelState::NegotiatingFunding`].", + NegotiatingFundingFlags, [ + ("Indicates we have (or are prepared to) send our `open_channel`/`accept_channel` message.", + OUR_INIT_SENT, state_flags::OUR_INIT_SENT, is_our_init_sent, set_our_init_sent, clear_our_init_sent), + ("Indicates we have received their `open_channel`/`accept_channel` message.", + THEIR_INIT_SENT, state_flags::THEIR_INIT_SENT, is_their_init_sent, set_their_init_sent, clear_their_init_sent) + ] +); + +define_state_flags!( + "Flags that only apply to [`ChannelState::AwaitingChannelReady`].", + FUNDED_STATE, AwaitingChannelReadyFlags, [ + ("Indicates they sent us a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \ + `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.", + THEIR_CHANNEL_READY, state_flags::THEIR_CHANNEL_READY, + is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready), + ("Indicates we sent them a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \ + `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.", + OUR_CHANNEL_READY, state_flags::OUR_CHANNEL_READY, + is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready), + ("Indicates the channel was funded in a batch and the broadcast of the funding transaction \ + is being held until all channels in the batch have received `funding_signed` and have \ + their monitors persisted.", WAITING_FOR_BATCH, state_flags::WAITING_FOR_BATCH, + is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch) + ] +); + +define_state_flags!( + "Flags that only apply to [`ChannelState::ChannelReady`].", + FUNDED_STATE, ChannelReadyFlags, [ + ("Indicates that we have sent a `commitment_signed` but are awaiting the responding \ + `revoke_and_ack` message. During this period, we can't generate new `commitment_signed` \ + messages as we'd be unable to determine which HTLCs they included in their `revoke_and_ack` \ + implicit ACK, so instead we have to hold them away temporarily to be sent later.", + AWAITING_REMOTE_REVOKE, state_flags::AWAITING_REMOTE_REVOKE, + is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke) + ] +); + +// Note that the order of this enum is implicitly defined by where each variant is placed. Take this +// into account when introducing new states and update `test_channel_state_order` accordingly. +#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)] enum ChannelState { - /// Implies we have (or are prepared to) send our open_channel/accept_channel message - OurInitSent = 1 << 0, - /// Implies we have received their `open_channel`/`accept_channel` message - TheirInitSent = 1 << 1, - /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to `FundingSent`. - /// Note that this is nonsense for an inbound channel as we immediately generate `funding_signed` - /// upon receipt of `funding_created`, so simply skip this state. - FundingCreated = 4, - /// Set when we have received/sent `funding_created` and `funding_signed` and are thus now waiting - /// on the funding transaction to confirm. The `ChannelReady` flags are set to indicate when we - /// and our counterparty consider the funding transaction confirmed. - FundingSent = 8, - /// Flag which can be set on `FundingSent` to indicate they sent us a `channel_ready` message. - /// Once both `TheirChannelReady` and `OurChannelReady` are set, state moves on to `ChannelReady`. - TheirChannelReady = 1 << 4, - /// Flag which can be set on `FundingSent` to indicate we sent them a `channel_ready` message. - /// Once both `TheirChannelReady` and `OurChannelReady` are set, state moves on to `ChannelReady`. - OurChannelReady = 1 << 5, - ChannelReady = 64, - /// Flag which is set on `ChannelReady` and `FundingSent` indicating remote side is considered - /// "disconnected" and no updates are allowed until after we've done a `channel_reestablish` - /// dance. - PeerDisconnected = 1 << 7, - /// Flag which is set on `ChannelReady`, FundingCreated, and `FundingSent` indicating the user has - /// told us a `ChannelMonitor` update is pending async persistence somewhere and we should pause - /// sending any outbound messages until they've managed to finish. - MonitorUpdateInProgress = 1 << 8, - /// Flag which implies that we have sent a commitment_signed but are awaiting the responding - /// revoke_and_ack message. During this time period, we can't generate new commitment_signed - /// messages as then we will be unable to determine which HTLCs they included in their - /// revoke_and_ack implicit ACK, so instead we have to hold them away temporarily to be sent - /// later. - /// Flag is set on `ChannelReady`. - AwaitingRemoteRevoke = 1 << 9, - /// Flag which is set on `ChannelReady` or `FundingSent` after receiving a shutdown message from - /// the remote end. If set, they may not add any new HTLCs to the channel, and we are expected - /// to respond with our own shutdown message when possible. - RemoteShutdownSent = 1 << 10, - /// Flag which is set on `ChannelReady` or `FundingSent` after sending a shutdown message. At this - /// point, we may not add any new HTLCs to the channel. - LocalShutdownSent = 1 << 11, - /// We've successfully negotiated a closing_signed dance. At this point ChannelManager is about - /// to drop us, but we store this anyway. - ShutdownComplete = 4096, - /// Flag which is set on `FundingSent` to indicate this channel is funded in a batch and the - /// broadcasting of the funding transaction is being held until all channels in the batch - /// have received funding_signed and have their monitors persisted. - WaitingForBatch = 1 << 13, + /// We are negotiating the parameters required for the channel prior to funding it. + NegotiatingFunding(NegotiatingFundingFlags), + /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to + /// `AwaitingChannelReady`. Note that this is nonsense for an inbound channel as we immediately generate + /// `funding_signed` upon receipt of `funding_created`, so simply skip this state. + FundingNegotiated, + /// We've received/sent `funding_created` and `funding_signed` and are thus now waiting on the + /// funding transaction to confirm. + AwaitingChannelReady(AwaitingChannelReadyFlags), + /// Both we and our counterparty consider the funding transaction confirmed and the channel is + /// now operational. + ChannelReady(ChannelReadyFlags), + /// We've successfully negotiated a `closing_signed` dance. At this point, the `ChannelManager` + /// is about to drop us, but we store this anyway. + ShutdownComplete, +} + +macro_rules! impl_state_flag { + ($get: ident, $set: ident, $clear: ident, [$($state: ident),+]) => { + #[allow(unused)] + fn $get(&self) -> bool { + match self { + $( + ChannelState::$state(flags) => flags.$get(), + )* + _ => false, + } + } + #[allow(unused)] + fn $set(&mut self) { + match self { + $( + ChannelState::$state(flags) => flags.$set(), + )* + _ => debug_assert!(false, "Attempted to set flag on unexpected ChannelState"), + } + } + #[allow(unused)] + fn $clear(&mut self) { + match self { + $( + ChannelState::$state(flags) => { let _ = flags.$clear(); }, + )* + _ => debug_assert!(false, "Attempted to clear flag on unexpected ChannelState"), + } + } + }; + ($get: ident, $set: ident, $clear: ident, FUNDED_STATES) => { + impl_state_flag!($get, $set, $clear, [AwaitingChannelReady, ChannelReady]); + }; + ($get: ident, $set: ident, $clear: ident, $state: ident) => { + impl_state_flag!($get, $set, $clear, [$state]); + }; +} + +impl ChannelState { + fn from_u32(state: u32) -> Result { + match state { + state_flags::FUNDING_NEGOTIATED => Ok(ChannelState::FundingNegotiated), + state_flags::SHUTDOWN_COMPLETE => Ok(ChannelState::ShutdownComplete), + val => { + if val & state_flags::AWAITING_CHANNEL_READY == state_flags::AWAITING_CHANNEL_READY { + AwaitingChannelReadyFlags::from_u32(val & !state_flags::AWAITING_CHANNEL_READY) + .map(|flags| ChannelState::AwaitingChannelReady(flags)) + } else if val & state_flags::CHANNEL_READY == state_flags::CHANNEL_READY { + ChannelReadyFlags::from_u32(val & !state_flags::CHANNEL_READY) + .map(|flags| ChannelState::ChannelReady(flags)) + } else if let Ok(flags) = NegotiatingFundingFlags::from_u32(val) { + Ok(ChannelState::NegotiatingFunding(flags)) + } else { + Err(()) + } + }, + } + } + + fn to_u32(&self) -> u32 { + match self { + ChannelState::NegotiatingFunding(flags) => flags.0, + ChannelState::FundingNegotiated => state_flags::FUNDING_NEGOTIATED, + ChannelState::AwaitingChannelReady(flags) => state_flags::AWAITING_CHANNEL_READY | flags.0, + ChannelState::ChannelReady(flags) => state_flags::CHANNEL_READY | flags.0, + ChannelState::ShutdownComplete => state_flags::SHUTDOWN_COMPLETE, + } + } + + fn is_pre_funded_state(&self) -> bool { + matches!(self, ChannelState::NegotiatingFunding(_)|ChannelState::FundingNegotiated) + } + + fn is_both_sides_shutdown(&self) -> bool { + self.is_local_shutdown_sent() && self.is_remote_shutdown_sent() + } + + fn with_funded_state_flags_mask(&self) -> FundedStateFlags { + match self { + ChannelState::AwaitingChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0), + ChannelState::ChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0), + _ => FundedStateFlags::new(), + } + } + + fn can_generate_new_commitment(&self) -> bool { + match self { + ChannelState::ChannelReady(flags) => + !flags.is_set(ChannelReadyFlags::AWAITING_REMOTE_REVOKE) && + !flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into()) && + !flags.is_set(FundedStateFlags::PEER_DISCONNECTED.into()), + _ => { + debug_assert!(false, "Can only generate new commitment within ChannelReady"); + false + }, + } + } + + impl_state_flag!(is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected, FUNDED_STATES); + impl_state_flag!(is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress, FUNDED_STATES); + impl_state_flag!(is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent, FUNDED_STATES); + impl_state_flag!(is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent, FUNDED_STATES); + impl_state_flag!(is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready, AwaitingChannelReady); + impl_state_flag!(is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready, AwaitingChannelReady); + impl_state_flag!(is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch, AwaitingChannelReady); + impl_state_flag!(is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke, ChannelReady); } -const BOTH_SIDES_SHUTDOWN_MASK: u32 = - ChannelState::LocalShutdownSent as u32 | - ChannelState::RemoteShutdownSent as u32; -const MULTI_STATE_FLAGS: u32 = - BOTH_SIDES_SHUTDOWN_MASK | - ChannelState::PeerDisconnected as u32 | - ChannelState::MonitorUpdateInProgress as u32; -const STATE_FLAGS: u32 = - MULTI_STATE_FLAGS | - ChannelState::TheirChannelReady as u32 | - ChannelState::OurChannelReady as u32 | - ChannelState::AwaitingRemoteRevoke as u32 | - ChannelState::WaitingForBatch as u32; pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1; @@ -404,6 +919,33 @@ impl fmt::Display for ChannelError { } } +pub(super) struct WithChannelContext<'a, L: Deref> where L::Target: Logger { + pub logger: &'a L, + pub peer_id: Option, + pub channel_id: Option, +} + +impl<'a, L: Deref> Logger for WithChannelContext<'a, L> where L::Target: Logger { + fn log(&self, mut record: Record) { + record.peer_id = self.peer_id; + record.channel_id = self.channel_id; + self.logger.log(record) + } +} + +impl<'a, 'b, L: Deref> WithChannelContext<'a, L> +where L::Target: Logger { + pub(super) fn from(logger: &'a L, context: &'b ChannelContext) -> Self + where S::Target: SignerProvider + { + WithChannelContext { + logger, + peer_id: Some(context.counterparty_node_id), + channel_id: Some(context.channel_id), + } + } +} + macro_rules! secp_check { ($res: expr, $err: expr) => { match $res { @@ -473,9 +1015,10 @@ struct CommitmentStats<'a> { total_fee_sat: u64, // the total fee included in the transaction num_nondust_htlcs: usize, // the number of HTLC outputs (dust HTLCs *non*-included) htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction - local_balance_msat: u64, // local balance before fees but considering dust limits - remote_balance_msat: u64, // remote balance before fees but considering dust limits - preimages: Vec, // preimages for successful offered HTLCs since last commitment + local_balance_msat: u64, // local balance before fees *not* considering dust limits + remote_balance_msat: u64, // remote balance before fees *not* considering dust limits + outbound_htlc_preimages: Vec, // preimages for successful offered HTLCs since last commitment + inbound_htlc_preimages: Vec, // preimages for successful received HTLCs since last commitment } /// Used when calculating whether we or the remote can afford an additional HTLC. @@ -528,6 +1071,7 @@ pub(super) struct MonitorRestoreUpdates { pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>, pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, pub finalized_claimed_htlcs: Vec, + pub pending_update_adds: Vec, pub funding_broadcastable: Option, pub channel_ready: Option, pub announcement_sigs: Option, @@ -538,7 +1082,6 @@ pub(super) struct MonitorRestoreUpdates { pub(super) struct SignerResumeUpdates { pub commitment_update: Option, pub funding_signed: Option, - pub funding_created: Option, pub channel_ready: Option, } @@ -555,13 +1098,20 @@ pub(super) struct ReestablishResponses { /// The result of a shutdown that should be handled. #[must_use] pub(crate) struct ShutdownResult { + pub(crate) closure_reason: ClosureReason, /// A channel monitor update to apply. - pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>, + pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelId, ChannelMonitorUpdate)>, /// A list of dropped outbound HTLCs that can safely be failed backwards immediately. pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>, /// An unbroadcasted batch funding transaction id. The closure of this channel should be /// propagated to the remainder of the batch. pub(crate) unbroadcasted_batch_funding_txid: Option, + pub(crate) channel_id: ChannelId, + pub(crate) user_channel_id: u128, + pub(crate) channel_capacity_satoshis: u64, + pub(crate) counterparty_node_id: PublicKey, + pub(crate) unbroadcasted_funding_tx: Option, + pub(crate) channel_funding_txo: Option, } /// If the majority of the channels funds are to the fundee and the initiator holds only just @@ -639,18 +1189,26 @@ impl_writeable_tlv_based!(PendingChannelMonitorUpdate, { pub(super) enum ChannelPhase where SP::Target: SignerProvider { UnfundedOutboundV1(OutboundV1Channel), UnfundedInboundV1(InboundV1Channel), + #[cfg(dual_funding)] + UnfundedOutboundV2(OutboundV2Channel), + #[cfg(dual_funding)] + UnfundedInboundV2(InboundV2Channel), Funded(Channel), } impl<'a, SP: Deref> ChannelPhase where SP::Target: SignerProvider, - ::Signer: ChannelSigner, + ::EcdsaSigner: ChannelSigner, { pub fn context(&'a self) -> &'a ChannelContext { match self { ChannelPhase::Funded(chan) => &chan.context, ChannelPhase::UnfundedOutboundV1(chan) => &chan.context, ChannelPhase::UnfundedInboundV1(chan) => &chan.context, + #[cfg(dual_funding)] + ChannelPhase::UnfundedOutboundV2(chan) => &chan.context, + #[cfg(dual_funding)] + ChannelPhase::UnfundedInboundV2(chan) => &chan.context, } } @@ -659,6 +1217,10 @@ impl<'a, SP: Deref> ChannelPhase where ChannelPhase::Funded(ref mut chan) => &mut chan.context, ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context, ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context, + #[cfg(dual_funding)] + ChannelPhase::UnfundedOutboundV2(ref mut chan) => &mut chan.context, + #[cfg(dual_funding)] + ChannelPhase::UnfundedInboundV2(ref mut chan) => &mut chan.context, } } } @@ -703,7 +1265,7 @@ pub(super) struct ChannelContext where SP::Target: SignerProvider { /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID. /// Will be `None` for channels created prior to 0.0.115. temporary_channel_id: Option, - channel_state: u32, + channel_state: ChannelState, // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to // our peer. However, we want to make sure they received it, or else rebroadcast it when we @@ -722,9 +1284,9 @@ pub(super) struct ChannelContext where SP::Target: SignerProvider { latest_monitor_update_id: u64, - holder_signer: ChannelSignerType<::Signer>, + holder_signer: ChannelSignerType, shutdown_scriptpubkey: Option, - destination_script: Script, + destination_script: ScriptBuf, // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction // generation start at 0 and count up...this simplifies some parts of implementation at the @@ -757,6 +1319,7 @@ pub(super) struct ChannelContext where SP::Target: SignerProvider { monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>, monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, monitor_pending_finalized_fulfills: Vec, + monitor_pending_update_adds: Vec, /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`]) /// but our signer (initially) refused to give us a signature, we should retry at some point in @@ -815,6 +1378,19 @@ pub(super) struct ChannelContext where SP::Target: SignerProvider { #[cfg(not(test))] closing_fee_limits: Option<(u64, u64)>, + /// If we remove an HTLC (or fee update), commit, and receive our counterparty's + /// `revoke_and_ack`, we remove all knowledge of said HTLC (or fee update). However, the latest + /// local commitment transaction that we can broadcast still contains the HTLC (or old fee) + /// until we receive a further `commitment_signed`. Thus we are not eligible for initiating the + /// `closing_signed` negotiation if we're expecting a counterparty `commitment_signed`. + /// + /// To ensure we don't send a `closing_signed` too early, we track this state here, waiting + /// until we see a `commitment_signed` before doing so. + /// + /// We don't bother to persist this - we anticipate this state won't last longer than a few + /// milliseconds, so any accidental force-closes here should be exceedingly rare. + expecting_peer_commitment_signed: bool, + /// The hash of the block in which the funding transaction was included. funding_tx_confirmed_in: Option, funding_tx_confirmation_height: u32, @@ -868,7 +1444,7 @@ pub(super) struct ChannelContext where SP::Target: SignerProvider { counterparty_prev_commitment_point: Option, counterparty_node_id: PublicKey, - counterparty_shutdown_scriptpubkey: Option