//! The top-level channel management and payment tracking stuff lives here.
//!
-//! The ChannelManager is the main chunk of logic implementing the lightning protocol and is
+//! The [`ChannelManager`] is the main chunk of logic implementing the lightning protocol and is
//! responsible for tracking which channels are open, HTLCs are in flight and reestablishing those
//! upon reconnect to the relevant peer(s).
//!
-//! It does not manage routing logic (see [`find_route`] for that) nor does it manage constructing
+//! It does not manage routing logic (see [`Router`] for that) nor does it manage constructing
//! on-chain transactions (it only monitors the chain to watch for any force-closes that might
//! imply it needs to fail HTLCs/payments/channels it manages).
-//!
-//! [`find_route`]: crate::routing::router::find_route
use bitcoin::blockdata::block::BlockHeader;
use bitcoin::blockdata::transaction::Transaction;
-use bitcoin::blockdata::constants::genesis_block;
+use bitcoin::blockdata::constants::{genesis_block, ChainHash};
use bitcoin::network::constants::Network;
use bitcoin::hashes::Hash;
use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator};
use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID};
use crate::chain::transaction::{OutPoint, TransactionData};
+use crate::events;
+use crate::events::{Event, EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination, PaymentFailureReason};
// Since this struct is returned in `list_channels` methods, expose it here in case users want to
// construct one themselves.
use crate::ln::{inbound_payment, PaymentHash, PaymentPreimage, PaymentSecret};
-use crate::ln::channel::{Channel, ChannelError, ChannelUpdateStatus, UpdateFulfillCommitFetch};
+use crate::ln::channel::{Channel, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel};
use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
#[cfg(any(feature = "_test_utils", test))]
use crate::ln::features::InvoiceFeatures;
use crate::routing::gossip::NetworkGraph;
-use crate::routing::router::{DefaultRouter, InFlightHtlcs, PaymentParameters, Route, RouteHop, RouteParameters, RoutePath, Router};
-use crate::routing::scoring::ProbabilisticScorer;
+use crate::routing::router::{BlindedTail, DefaultRouter, InFlightHtlcs, Path, Payee, PaymentParameters, Route, RouteParameters, Router};
+use crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters};
use crate::ln::msgs;
use crate::ln::onion_utils;
use crate::ln::onion_utils::HTLCFailReason;
-use crate::ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, MAX_VALUE_MSAT};
+use crate::ln::msgs::{ChannelMessageHandler, DecodeError, LightningError};
#[cfg(test)]
use crate::ln::outbound_payment;
use crate::ln::outbound_payment::{OutboundPayments, PaymentAttempts, PendingOutboundPayment};
use crate::ln::wire::Encode;
-use crate::chain::keysinterface::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider, ChannelSigner};
-use crate::util::config::{UserConfig, ChannelConfig};
-use crate::util::events::{Event, EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination};
-use crate::util::events;
+use crate::sign::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider, ChannelSigner, WriteableEcdsaChannelSigner};
+use crate::util::config::{UserConfig, ChannelConfig, ChannelConfigUpdate};
use crate::util::wakers::{Future, Notifier};
use crate::util::scid_utils::fake_scid;
+use crate::util::string::UntrustedString;
use crate::util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer, VecWriter};
use crate::util::logger::{Level, Logger};
use crate::util::errors::APIError;
use core::cell::RefCell;
use crate::io::Read;
use crate::sync::{Arc, Mutex, RwLock, RwLockReadGuard, FairRwLock, LockTestExt, LockHeldState};
-use core::sync::atomic::{AtomicUsize, Ordering};
+use core::sync::atomic::{AtomicUsize, AtomicBool, Ordering};
use core::time::Duration;
use core::ops::Deref;
// Re-export this for use in the public API.
-pub use crate::ln::outbound_payment::{PaymentSendFailure, Retry, RetryableSendFailure};
+pub use crate::ln::outbound_payment::{PaymentSendFailure, Retry, RetryableSendFailure, RecipientOnionFields};
+use crate::ln::script::ShutdownScript;
// We hold various information about HTLC relay in the HTLC objects in Channel itself:
//
},
Receive {
payment_data: msgs::FinalOnionHopData,
+ payment_metadata: Option<Vec<u8>>,
incoming_cltv_expiry: u32, // Used to track when we should expire pending HTLCs that go unclaimed
phantom_shared_secret: Option<[u8; 32]>,
},
ReceiveKeysend {
+ /// This was added in 0.0.116 and will break deserialization on downgrades.
+ payment_data: Option<msgs::FinalOnionHopData>,
payment_preimage: PaymentPreimage,
+ payment_metadata: Option<Vec<u8>>,
incoming_cltv_expiry: u32, // Used to track when we should expire pending HTLCs that go unclaimed
},
}
pub(super) routing: PendingHTLCRouting,
pub(super) incoming_shared_secret: [u8; 32],
payment_hash: PaymentHash,
+ /// Amount received
pub(super) incoming_amt_msat: Option<u64>, // Added in 0.0.113
+ /// Sender intended amount to forward or receive (actual amount received
+ /// may overshoot this in either case)
pub(super) outgoing_amt_msat: u64,
pub(super) outgoing_cltv_value: u32,
+ /// The fee being skimmed off the top of this HTLC. If this is a forward, it'll be the fee we are
+ /// skimming. If we're receiving this HTLC, it's the fee that our counterparty skimmed.
+ pub(super) skimmed_fee_msat: Option<u64>,
}
#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
cltv_expiry: u32,
/// The amount (in msats) of this MPP part
value: u64,
+ /// The amount (in msats) that the sender intended to be sent in this MPP
+ /// part (used for validating total MPP amount)
+ sender_intended_value: u64,
onion_payload: OnionPayload,
timer_ticks: u8,
- /// The sum total of all MPP parts
+ /// The total value received for a payment (sum of all MPP parts if the payment is a MPP).
+ /// Gets set to the amount reported when pushing [`Event::PaymentClaimable`].
+ total_value_received: Option<u64>,
+ /// The sender intended sum total of all MPP parts specified in the onion
total_msat: u64,
+ /// The extra fee our counterparty skimmed off the top of this HTLC.
+ counterparty_skimmed_fee_msat: Option<u64>,
}
/// A payment identifier used to uniquely identify a payment to LDK.
-/// (C-not exported) as we just use [u8; 32] directly
+///
+/// This is not exported to bindings users as we just use [u8; 32] directly
#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
pub struct PaymentId(pub [u8; 32]);
}
/// An identifier used to uniquely identify an intercepted HTLC to LDK.
-/// (C-not exported) as we just use [u8; 32] directly
+///
+/// This is not exported to bindings users as we just use [u8; 32] directly
#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
pub struct InterceptId(pub [u8; 32]);
Ok(InterceptId(buf))
}
}
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash)]
+/// Uniquely describes an HTLC by its source. Just the guaranteed-unique subset of [`HTLCSource`].
+pub(crate) enum SentHTLCId {
+ PreviousHopData { short_channel_id: u64, htlc_id: u64 },
+ OutboundRoute { session_priv: SecretKey },
+}
+impl SentHTLCId {
+ pub(crate) fn from_source(source: &HTLCSource) -> Self {
+ match source {
+ HTLCSource::PreviousHopData(hop_data) => Self::PreviousHopData {
+ short_channel_id: hop_data.short_channel_id,
+ htlc_id: hop_data.htlc_id,
+ },
+ HTLCSource::OutboundRoute { session_priv, .. } =>
+ Self::OutboundRoute { session_priv: *session_priv },
+ }
+ }
+}
+impl_writeable_tlv_based_enum!(SentHTLCId,
+ (0, PreviousHopData) => {
+ (0, short_channel_id, required),
+ (2, htlc_id, required),
+ },
+ (2, OutboundRoute) => {
+ (0, session_priv, required),
+ };
+);
+
+
/// Tracks the inbound corresponding to an outbound HTLC
#[allow(clippy::derive_hash_xor_eq)] // Our Hash is faithful to the data, we just don't have SecretKey::hash
#[derive(Clone, PartialEq, Eq)]
pub(crate) enum HTLCSource {
PreviousHopData(HTLCPreviousHopData),
OutboundRoute {
- path: Vec<RouteHop>,
+ path: Path,
session_priv: SecretKey,
/// Technically we can recalculate this from the route, but we cache it here to avoid
/// doing a double-pass on route when we get a failure back
first_hop_htlc_msat: u64,
payment_id: PaymentId,
- payment_secret: Option<PaymentSecret>,
- /// Note that this is now "deprecated" - we write it for forwards (and read it for
- /// backwards) compatibility reasons, but prefer to use the data in the
- /// [`super::outbound_payment`] module, which stores per-payment data once instead of in
- /// each HTLC.
- payment_params: Option<PaymentParameters>,
},
}
#[allow(clippy::derive_hash_xor_eq)] // Our Hash is faithful to the data, we just don't have SecretKey::hash
0u8.hash(hasher);
prev_hop_data.hash(hasher);
},
- HTLCSource::OutboundRoute { path, session_priv, payment_id, payment_secret, first_hop_htlc_msat, payment_params } => {
+ HTLCSource::OutboundRoute { path, session_priv, payment_id, first_hop_htlc_msat } => {
1u8.hash(hasher);
path.hash(hasher);
session_priv[..].hash(hasher);
payment_id.hash(hasher);
- payment_secret.hash(hasher);
first_hop_htlc_msat.hash(hasher);
- payment_params.hash(hasher);
},
}
}
}
-#[cfg(not(feature = "grind_signatures"))]
-#[cfg(test)]
impl HTLCSource {
+ #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
+ #[cfg(test)]
pub fn dummy() -> Self {
HTLCSource::OutboundRoute {
- path: Vec::new(),
+ path: Path { hops: Vec::new(), blinded_tail: None },
session_priv: SecretKey::from_slice(&[1; 32]).unwrap(),
first_hop_htlc_msat: 0,
payment_id: PaymentId([2; 32]),
- payment_secret: None,
- payment_params: None,
+ }
+ }
+
+ #[cfg(debug_assertions)]
+ /// Checks whether this HTLCSource could possibly match the given HTLC output in a commitment
+ /// transaction. Useful to ensure different datastructures match up.
+ pub(crate) fn possibly_matches_output(&self, htlc: &super::chan_utils::HTLCOutputInCommitment) -> bool {
+ if let HTLCSource::OutboundRoute { first_hop_htlc_msat, .. } = self {
+ *first_hop_htlc_msat == htlc.amount_msat
+ } else {
+ // There's nothing we can check for forwarded HTLCs
+ true
}
}
}
IncorrectOrUnknownPaymentDetails = 0x4000 | 15,
}
-type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>);
-
/// Error type returned across the peer_state mutex boundary. When an Err is generated for a
/// Channel, we generally end up with a ChannelError::Close for which we have to close the channel
/// immediately (ie with no further calls on it made). Thus, this step happens inside a
(4, receiver_node_id, required),
});
+struct ClaimablePayment {
+ purpose: events::PaymentPurpose,
+ onion_fields: Option<RecipientOnionFields>,
+ htlcs: Vec<ClaimableHTLC>,
+}
+
/// Information about claimable or being-claimed payments
struct ClaimablePayments {
/// Map from payment hash to the payment data and any HTLCs which are to us and can be
///
/// When adding to the map, [`Self::pending_claiming_payments`] must also be checked to ensure
/// we don't get a duplicate payment.
- claimable_htlcs: HashMap<PaymentHash, (events::PaymentPurpose, Vec<ClaimableHTLC>)>,
+ claimable_payments: HashMap<PaymentHash, ClaimablePayment>,
/// Map from payment hash to the payment data for HTLCs which we have begun claiming, but which
/// are waiting on a [`ChannelMonitorUpdate`] to complete in order to be surfaced to the user
pending_claiming_payments: HashMap<PaymentHash, ClaimingPayment>,
}
-/// Events which we process internally but cannot be procsesed immediately at the generation site
-/// for some reason. They are handled in timer_tick_occurred, so may be processed with
-/// quite some time lag.
+/// Events which we process internally but cannot be processed immediately at the generation site
+/// usually because we're running pre-full-init. They are handled immediately once we detect we are
+/// running normally, and specifically must be processed before any other non-background
+/// [`ChannelMonitorUpdate`]s are applied.
enum BackgroundEvent {
- /// Handle a ChannelMonitorUpdate that closes a channel, broadcasting its current latest holder
- /// commitment transaction.
- ClosingMonitorUpdate((OutPoint, ChannelMonitorUpdate)),
+ /// Handle a ChannelMonitorUpdate which closes the channel or for an already-closed channel.
+ /// This is only separated from [`Self::MonitorUpdateRegeneratedOnStartup`] as the
+ /// maybe-non-closing variant needs a public key to handle channel resumption, whereas if the
+ /// channel has been force-closed we do not need the counterparty node_id.
+ ///
+ /// Note that any such events are lost on shutdown, so in general they must be updates which
+ /// are regenerated on startup.
+ ClosedMonitorUpdateRegeneratedOnStartup((OutPoint, ChannelMonitorUpdate)),
+ /// Handle a ChannelMonitorUpdate which may or may not close the channel and may unblock the
+ /// channel to continue normal operation.
+ ///
+ /// In general this should be used rather than
+ /// [`Self::ClosedMonitorUpdateRegeneratedOnStartup`], however in cases where the
+ /// `counterparty_node_id` is not available as the channel has closed from a [`ChannelMonitor`]
+ /// error the other variant is acceptable.
+ ///
+ /// Note that any such events are lost on shutdown, so in general they must be updates which
+ /// are regenerated on startup.
+ MonitorUpdateRegeneratedOnStartup {
+ counterparty_node_id: PublicKey,
+ funding_txo: OutPoint,
+ update: ChannelMonitorUpdate
+ },
}
#[derive(Debug)]
/// this payment. Note that this is only best-effort. On restart it's possible such a duplicate
/// event can be generated.
PaymentClaimed { payment_hash: PaymentHash },
- /// Indicates an [`events::Event`] should be surfaced to the user.
- EmitEvent { event: events::Event },
+ /// Indicates an [`events::Event`] should be surfaced to the user and possibly resume the
+ /// operation of another channel.
+ ///
+ /// This is usually generated when we've forwarded an HTLC and want to block the outbound edge
+ /// from completing a monitor update which removes the payment preimage until the inbound edge
+ /// completes a monitor update containing the payment preimage. In that case, after the inbound
+ /// edge completes, we will surface an [`Event::PaymentForwarded`] as well as unblock the
+ /// outbound edge.
+ EmitEventAndFreeOtherChannel {
+ event: events::Event,
+ downstream_counterparty_and_funding_outpoint: Option<(PublicKey, OutPoint, RAAMonitorUpdateBlockingAction)>,
+ },
}
impl_writeable_tlv_based_enum_upgradable!(MonitorUpdateCompletionAction,
(0, PaymentClaimed) => { (0, payment_hash, required) },
- (2, EmitEvent) => { (0, event, upgradable_required) },
+ (2, EmitEventAndFreeOtherChannel) => {
+ (0, event, upgradable_required),
+ // LDK prior to 0.0.116 did not have this field as the monitor update application order was
+ // required by clients. If we downgrade to something prior to 0.0.116 this may result in
+ // monitor updates which aren't properly blocked or resumed, however that's fine - we don't
+ // support async monitor updates even in LDK 0.0.116 and once we do we'll require no
+ // downgrades to prior versions.
+ (1, downstream_counterparty_and_funding_outpoint, option),
+ },
+);
+
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub(crate) enum EventCompletionAction {
+ ReleaseRAAChannelMonitorUpdate {
+ counterparty_node_id: PublicKey,
+ channel_funding_outpoint: OutPoint,
+ },
+}
+impl_writeable_tlv_based_enum!(EventCompletionAction,
+ (0, ReleaseRAAChannelMonitorUpdate) => {
+ (0, channel_funding_outpoint, required),
+ (2, counterparty_node_id, required),
+ };
);
+#[derive(Clone, PartialEq, Eq, Debug)]
+/// If something is blocked on the completion of an RAA-generated [`ChannelMonitorUpdate`] we track
+/// the blocked action here. See enum variants for more info.
+pub(crate) enum RAAMonitorUpdateBlockingAction {
+ /// A forwarded payment was claimed. We block the downstream channel completing its monitor
+ /// update which removes the HTLC preimage until the upstream channel has gotten the preimage
+ /// durably to disk.
+ ForwardedPaymentInboundClaim {
+ /// The upstream channel ID (i.e. the inbound edge).
+ channel_id: [u8; 32],
+ /// The HTLC ID on the inbound edge.
+ htlc_id: u64,
+ },
+}
+
+impl RAAMonitorUpdateBlockingAction {
+ #[allow(unused)]
+ fn from_prev_hop_data(prev_hop: &HTLCPreviousHopData) -> Self {
+ Self::ForwardedPaymentInboundClaim {
+ channel_id: prev_hop.outpoint.to_channel_id(),
+ htlc_id: prev_hop.htlc_id,
+ }
+ }
+}
+
+impl_writeable_tlv_based_enum!(RAAMonitorUpdateBlockingAction,
+ (0, ForwardedPaymentInboundClaim) => { (0, channel_id, required), (2, htlc_id, required) }
+;);
+
+
/// State we hold per-peer.
pub(super) struct PeerState<Signer: ChannelSigner> {
- /// `temporary_channel_id` or `channel_id` -> `channel`.
+ /// `channel_id` -> `Channel`.
///
- /// Holds all channels where the peer is the counterparty. Once a channel has been assigned a
- /// `channel_id`, the `temporary_channel_id` key in the map is updated and is replaced by the
- /// `channel_id`.
+ /// Holds all funded channels where the peer is the counterparty.
pub(super) channel_by_id: HashMap<[u8; 32], Channel<Signer>>,
+ /// `temporary_channel_id` -> `OutboundV1Channel`.
+ ///
+ /// Holds all outbound V1 channels where the peer is the counterparty. Once an outbound channel has
+ /// been assigned a `channel_id`, the entry in this map is removed and one is created in
+ /// `channel_by_id`.
+ pub(super) outbound_v1_channel_by_id: HashMap<[u8; 32], OutboundV1Channel<Signer>>,
+ /// `temporary_channel_id` -> `InboundV1Channel`.
+ ///
+ /// Holds all inbound V1 channels where the peer is the counterparty. Once an inbound channel has
+ /// been assigned a `channel_id`, the entry in this map is removed and one is created in
+ /// `channel_by_id`.
+ pub(super) inbound_v1_channel_by_id: HashMap<[u8; 32], InboundV1Channel<Signer>>,
/// The latest `InitFeatures` we heard from the peer.
latest_features: InitFeatures,
/// Messages to send to the peer - pushed to in the same lock that they are generated in (except
/// for broadcast messages, where ordering isn't as strict).
pub(super) pending_msg_events: Vec<MessageSendEvent>,
+ /// Map from Channel IDs to pending [`ChannelMonitorUpdate`]s which have been passed to the
+ /// user but which have not yet completed.
+ ///
+ /// Note that the channel may no longer exist. For example if the channel was closed but we
+ /// later needed to claim an HTLC which is pending on-chain, we may generate a monitor update
+ /// for a missing channel.
+ in_flight_monitor_updates: BTreeMap<OutPoint, Vec<ChannelMonitorUpdate>>,
/// Map from a specific channel to some action(s) that should be taken when all pending
/// [`ChannelMonitorUpdate`]s for the channel complete updating.
///
/// to funding appearing on-chain), the downstream `ChannelMonitor` set is required to ensure
/// duplicates do not occur, so such channels should fail without a monitor update completing.
monitor_update_blocked_actions: BTreeMap<[u8; 32], Vec<MonitorUpdateCompletionAction>>,
+ /// If another channel's [`ChannelMonitorUpdate`] needs to complete before a channel we have
+ /// with this peer can complete an RAA [`ChannelMonitorUpdate`] (e.g. because the RAA update
+ /// will remove a preimage that needs to be durably in an upstream channel first), we put an
+ /// entry here to note that the channel with the key's ID is blocked on a set of actions.
+ actions_blocking_raa_monitor_updates: BTreeMap<[u8; 32], Vec<RAAMonitorUpdateBlockingAction>>,
/// The peer is currently connected (i.e. we've seen a
/// [`ChannelMessageHandler::peer_connected`] and no corresponding
/// [`ChannelMessageHandler::peer_disconnected`].
return false
}
self.channel_by_id.is_empty() && self.monitor_update_blocked_actions.is_empty()
+ && self.in_flight_monitor_updates.is_empty()
+ }
+
+ // Returns a count of all channels we have with this peer, including pending channels.
+ fn total_channel_count(&self) -> usize {
+ self.channel_by_id.len() +
+ self.outbound_v1_channel_by_id.len() +
+ self.inbound_v1_channel_by_id.len()
+ }
+
+ // Returns a bool indicating if the given `channel_id` matches a channel we have with this peer.
+ fn has_channel(&self, channel_id: &[u8; 32]) -> bool {
+ self.channel_by_id.contains_key(channel_id) ||
+ self.outbound_v1_channel_by_id.contains_key(channel_id) ||
+ self.inbound_v1_channel_by_id.contains_key(channel_id)
}
}
min_value_msat: Option<u64>,
}
-/// SimpleArcChannelManager is useful when you need a ChannelManager with a static lifetime, e.g.
-/// when you're using lightning-net-tokio (since tokio::spawn requires parameters with static
+/// [`SimpleArcChannelManager`] is useful when you need a [`ChannelManager`] with a static lifetime, e.g.
+/// when you're using `lightning-net-tokio` (since `tokio::spawn` requires parameters with static
/// lifetimes). Other times you can afford a reference, which is more efficient, in which case
-/// SimpleRefChannelManager is the more appropriate type. Defining these type aliases prevents
-/// issues such as overly long function definitions. Note that the ChannelManager can take any type
-/// that implements KeysInterface or Router for its keys manager and router, respectively, but this
-/// type alias chooses the concrete types of KeysManager and DefaultRouter.
+/// [`SimpleRefChannelManager`] is the more appropriate type. Defining these type aliases prevents
+/// issues such as overly long function definitions. Note that the `ChannelManager` can take any type
+/// that implements [`NodeSigner`], [`EntropySource`], and [`SignerProvider`] for its keys manager,
+/// or, respectively, [`Router`] for its router, but this type alias chooses the concrete types
+/// of [`KeysManager`] and [`DefaultRouter`].
///
-/// (C-not exported) as Arcs don't make sense in bindings
+/// This is not exported to bindings users as Arcs don't make sense in bindings
pub type SimpleArcChannelManager<M, T, F, L> = ChannelManager<
Arc<M>,
Arc<T>,
Arc<DefaultRouter<
Arc<NetworkGraph<Arc<L>>>,
Arc<L>,
- Arc<Mutex<ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>>>
+ Arc<Mutex<ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>>>,
+ ProbabilisticScoringFeeParameters,
+ ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>,
>>,
Arc<L>
>;
-/// SimpleRefChannelManager is a type alias for a ChannelManager reference, and is the reference
-/// counterpart to the SimpleArcChannelManager type alias. Use this type by default when you don't
+/// [`SimpleRefChannelManager`] is a type alias for a ChannelManager reference, and is the reference
+/// counterpart to the [`SimpleArcChannelManager`] type alias. Use this type by default when you don't
/// need a ChannelManager with a static lifetime. You'll need a static lifetime in cases such as
-/// usage of lightning-net-tokio (since tokio::spawn requires parameters with static lifetimes).
+/// usage of lightning-net-tokio (since `tokio::spawn` requires parameters with static lifetimes).
/// But if this is not necessary, using a reference is more efficient. Defining these type aliases
/// issues such as overly long function definitions. Note that the ChannelManager can take any type
-/// that implements KeysInterface or Router for its keys manager and router, respectively, but this
-/// type alias chooses the concrete types of KeysManager and DefaultRouter.
+/// that implements [`NodeSigner`], [`EntropySource`], and [`SignerProvider`] for its keys manager,
+/// or, respectively, [`Router`] for its router, but this type alias chooses the concrete types
+/// of [`KeysManager`] and [`DefaultRouter`].
///
-/// (C-not exported) as Arcs don't make sense in bindings
-pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = ChannelManager<&'a M, &'b T, &'c KeysManager, &'c KeysManager, &'c KeysManager, &'d F, &'e DefaultRouter<&'f NetworkGraph<&'g L>, &'g L, &'h Mutex<ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>>, &'g L>;
+/// This is not exported to bindings users as Arcs don't make sense in bindings
+pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> =
+ ChannelManager<
+ &'a M,
+ &'b T,
+ &'c KeysManager,
+ &'c KeysManager,
+ &'c KeysManager,
+ &'d F,
+ &'e DefaultRouter<
+ &'f NetworkGraph<&'g L>,
+ &'g L,
+ &'h Mutex<ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>,
+ ProbabilisticScoringFeeParameters,
+ ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>
+ >,
+ &'g L
+ >;
+
+macro_rules! define_test_pub_trait { ($vis: vis) => {
+/// A trivial trait which describes any [`ChannelManager`] used in testing.
+$vis trait AChannelManager {
+ type Watch: chain::Watch<Self::Signer> + ?Sized;
+ type M: Deref<Target = Self::Watch>;
+ type Broadcaster: BroadcasterInterface + ?Sized;
+ type T: Deref<Target = Self::Broadcaster>;
+ type EntropySource: EntropySource + ?Sized;
+ type ES: Deref<Target = Self::EntropySource>;
+ type NodeSigner: NodeSigner + ?Sized;
+ type NS: Deref<Target = Self::NodeSigner>;
+ type Signer: WriteableEcdsaChannelSigner + Sized;
+ type SignerProvider: SignerProvider<Signer = Self::Signer> + ?Sized;
+ type SP: Deref<Target = Self::SignerProvider>;
+ type FeeEstimator: FeeEstimator + ?Sized;
+ type F: Deref<Target = Self::FeeEstimator>;
+ type Router: Router + ?Sized;
+ type R: Deref<Target = Self::Router>;
+ type Logger: Logger + ?Sized;
+ type L: Deref<Target = Self::Logger>;
+ fn get_cm(&self) -> &ChannelManager<Self::M, Self::T, Self::ES, Self::NS, Self::SP, Self::F, Self::R, Self::L>;
+}
+} }
+#[cfg(any(test, feature = "_test_utils"))]
+define_test_pub_trait!(pub);
+#[cfg(not(any(test, feature = "_test_utils")))]
+define_test_pub_trait!(pub(crate));
+impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> AChannelManager
+for ChannelManager<M, T, ES, NS, SP, F, R, L>
+where
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
+ T::Target: BroadcasterInterface,
+ ES::Target: EntropySource,
+ NS::Target: NodeSigner,
+ SP::Target: SignerProvider,
+ F::Target: FeeEstimator,
+ R::Target: Router,
+ L::Target: Logger,
+{
+ type Watch = M::Target;
+ type M = M;
+ type Broadcaster = T::Target;
+ type T = T;
+ type EntropySource = ES::Target;
+ type ES = ES;
+ type NodeSigner = NS::Target;
+ type NS = NS;
+ type Signer = <SP::Target as SignerProvider>::Signer;
+ type SignerProvider = SP::Target;
+ type SP = SP;
+ type FeeEstimator = F::Target;
+ type F = F;
+ type Router = R::Target;
+ type R = R;
+ type Logger = L::Target;
+ type L = L;
+ fn get_cm(&self) -> &ChannelManager<M, T, ES, NS, SP, F, R, L> { self }
+}
/// Manager which keeps track of a number of channels and sends messages to the appropriate
/// channel, also tracking HTLC preimages and forwarding onion packets appropriately.
///
-/// Implements ChannelMessageHandler, handling the multi-channel parts and passing things through
+/// Implements [`ChannelMessageHandler`], handling the multi-channel parts and passing things through
/// to individual Channels.
///
-/// Implements Writeable to write out all channel state to disk. Implies peer_disconnected() for
+/// Implements [`Writeable`] to write out all channel state to disk. Implies [`peer_disconnected`] for
/// all peers during write/read (though does not modify this instance, only the instance being
-/// serialized). This will result in any channels which have not yet exchanged funding_created (ie
-/// called funding_transaction_generated for outbound channels).
+/// serialized). This will result in any channels which have not yet exchanged [`funding_created`] (i.e.,
+/// called [`funding_transaction_generated`] for outbound channels) being closed.
///
-/// Note that you can be a bit lazier about writing out ChannelManager than you can be with
-/// ChannelMonitors. With ChannelMonitors you MUST write each monitor update out to disk before
-/// returning from chain::Watch::watch_/update_channel, with ChannelManagers, writing updates
-/// happens out-of-band (and will prevent any other ChannelManager operations from occurring during
+/// Note that you can be a bit lazier about writing out `ChannelManager` than you can be with
+/// [`ChannelMonitor`]. With [`ChannelMonitor`] you MUST write each monitor update out to disk before
+/// returning from [`chain::Watch::watch_channel`]/[`update_channel`], with ChannelManagers, writing updates
+/// happens out-of-band (and will prevent any other `ChannelManager` operations from occurring during
/// the serialization process). If the deserialized version is out-of-date compared to the
-/// ChannelMonitors passed by reference to read(), those channels will be force-closed based on the
-/// ChannelMonitor state and no funds will be lost (mod on-chain transaction fees).
+/// [`ChannelMonitor`] passed by reference to [`read`], those channels will be force-closed based on the
+/// `ChannelMonitor` state and no funds will be lost (mod on-chain transaction fees).
///
-/// Note that the deserializer is only implemented for (BlockHash, ChannelManager), which
-/// tells you the last block hash which was block_connect()ed. You MUST rescan any blocks along
-/// the "reorg path" (ie call block_disconnected() until you get to a common block and then call
-/// block_connected() to step towards your best block) upon deserialization before using the
-/// object!
+/// Note that the deserializer is only implemented for `(`[`BlockHash`]`, `[`ChannelManager`]`)`, which
+/// tells you the last block hash which was connected. You should get the best block tip before using the manager.
+/// See [`chain::Listen`] and [`chain::Confirm`] for more details.
///
-/// Note that ChannelManager is responsible for tracking liveness of its channels and generating
-/// ChannelUpdate messages informing peers that the channel is temporarily disabled. To avoid
+/// Note that `ChannelManager` is responsible for tracking liveness of its channels and generating
+/// [`ChannelUpdate`] messages informing peers that the channel is temporarily disabled. To avoid
/// spam due to quick disconnection/reconnection, updates are not sent until the channel has been
/// offline for a full minute. In order to track this, you must call
-/// timer_tick_occurred roughly once per minute, though it doesn't have to be perfect.
+/// [`timer_tick_occurred`] roughly once per minute, though it doesn't have to be perfect.
///
-/// To avoid trivial DoS issues, ChannelManager limits the number of inbound connections and
+/// To avoid trivial DoS issues, `ChannelManager` limits the number of inbound connections and
/// inbound channels without confirmed funding transactions. This may result in nodes which we do
/// not have a channel with being unable to connect to us or open new channels with us if we have
/// many peers with unfunded channels.
/// exempted from the count of unfunded channels. Similarly, outbound channels and connections are
/// never limited. Please ensure you limit the count of such channels yourself.
///
-/// Rather than using a plain ChannelManager, it is preferable to use either a SimpleArcChannelManager
-/// a SimpleRefChannelManager, for conciseness. See their documentation for more details, but
-/// essentially you should default to using a SimpleRefChannelManager, and use a
-/// SimpleArcChannelManager when you require a ChannelManager with a static lifetime, such as when
+/// Rather than using a plain `ChannelManager`, it is preferable to use either a [`SimpleArcChannelManager`]
+/// a [`SimpleRefChannelManager`], for conciseness. See their documentation for more details, but
+/// essentially you should default to using a [`SimpleRefChannelManager`], and use a
+/// [`SimpleArcChannelManager`] when you require a `ChannelManager` with a static lifetime, such as when
/// you're using lightning-net-tokio.
+///
+/// [`peer_disconnected`]: msgs::ChannelMessageHandler::peer_disconnected
+/// [`funding_created`]: msgs::FundingCreated
+/// [`funding_transaction_generated`]: Self::funding_transaction_generated
+/// [`BlockHash`]: bitcoin::hash_types::BlockHash
+/// [`update_channel`]: chain::Watch::update_channel
+/// [`ChannelUpdate`]: msgs::ChannelUpdate
+/// [`timer_tick_occurred`]: Self::timer_tick_occurred
+/// [`read`]: ReadableArgs::read
//
// Lock order:
// The tree structure below illustrates the lock order requirements for the different locks of the
#[cfg(any(test, feature = "_test_utils"))]
pub(super) per_peer_state: FairRwLock<HashMap<PublicKey, Mutex<PeerState<<SP::Target as SignerProvider>::Signer>>>>,
+ /// The set of events which we need to give to the user to handle. In some cases an event may
+ /// require some further action after the user handles it (currently only blocking a monitor
+ /// update from being handed to the user to ensure the included changes to the channel state
+ /// are handled by the user before they're persisted durably to disk). In that case, the second
+ /// element in the tuple is set to `Some` with further details of the action.
+ ///
+ /// Note that events MUST NOT be removed from pending_events after deserialization, as they
+ /// could be in the middle of being processed without the direct mutex held.
+ ///
/// See `ChannelManager` struct-level documentation for lock order requirements.
- pending_events: Mutex<Vec<events::Event>>,
+ pending_events: Mutex<VecDeque<(events::Event, Option<EventCompletionAction>)>>,
+ /// A simple atomic flag to ensure only one task at a time can be processing events asynchronously.
+ pending_events_processor: AtomicBool,
+
+ /// If we are running during init (either directly during the deserialization method or in
+ /// block connection methods which run after deserialization but before normal operation) we
+ /// cannot provide the user with [`ChannelMonitorUpdate`]s through the normal update flow -
+ /// prior to normal operation the user may not have loaded the [`ChannelMonitor`]s into their
+ /// [`ChainMonitor`] and thus attempting to update it will fail or panic.
+ ///
+ /// Thus, we place them here to be handled as soon as possible once we are running normally.
+ ///
/// See `ChannelManager` struct-level documentation for lock order requirements.
+ ///
+ /// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor
pending_background_events: Mutex<Vec<BackgroundEvent>>,
/// Used when we have to take a BIG lock to make sure everything is self-consistent.
/// Essentially just when we're serializing ourselves out.
/// Notifier the lock contains sends out a notification when the lock is released.
total_consistency_lock: RwLock<()>,
+ background_events_processed_since_startup: AtomicBool,
+
persistence_notifier: Notifier,
entropy_source: ES,
}
#[derive(Copy, Clone, PartialEq)]
+#[must_use]
enum NotifyOption {
DoPersist,
SkipPersist,
}
impl<'a> PersistenceNotifierGuard<'a, fn() -> NotifyOption> { // We don't care what the concrete F is here, it's unused
- fn notify_on_drop(lock: &'a RwLock<()>, notifier: &'a Notifier) -> PersistenceNotifierGuard<'a, impl Fn() -> NotifyOption> {
- PersistenceNotifierGuard::optionally_notify(lock, notifier, || -> NotifyOption { NotifyOption::DoPersist })
+ fn notify_on_drop<C: AChannelManager>(cm: &'a C) -> PersistenceNotifierGuard<'a, impl Fn() -> NotifyOption> {
+ let read_guard = cm.get_cm().total_consistency_lock.read().unwrap();
+ let _ = cm.get_cm().process_background_events(); // We always persist
+
+ PersistenceNotifierGuard {
+ persistence_notifier: &cm.get_cm().persistence_notifier,
+ should_persist: || -> NotifyOption { NotifyOption::DoPersist },
+ _read_guard: read_guard,
+ }
+
}
+ /// Note that if any [`ChannelMonitorUpdate`]s are possibly generated,
+ /// [`ChannelManager::process_background_events`] MUST be called first.
fn optionally_notify<F: Fn() -> NotifyOption>(lock: &'a RwLock<()>, notifier: &'a Notifier, persist_check: F) -> PersistenceNotifierGuard<'a, F> {
let read_guard = lock.read().unwrap();
/// [`OutboundPayments::remove_stale_resolved_payments`].
pub(crate) const IDEMPOTENCY_TIMEOUT_TICKS: u8 = 7;
+/// The number of ticks of [`ChannelManager::timer_tick_occurred`] where a peer is disconnected
+/// until we mark the channel disabled and gossip the update.
+pub(crate) const DISABLE_GOSSIP_TICKS: u8 = 10;
+
+/// The number of ticks of [`ChannelManager::timer_tick_occurred`] where a peer is connected until
+/// we mark the channel enabled and gossip the update.
+pub(crate) const ENABLE_GOSSIP_TICKS: u8 = 5;
+
/// The maximum number of unfunded channels we can have per-peer before we start rejecting new
/// (inbound) ones. The number of peers with unfunded channels is limited separately in
/// [`MAX_UNFUNDED_CHANNEL_PEERS`].
pub outbound_htlc_maximum_msat: Option<u64>,
}
-/// Details of a channel, as returned by ChannelManager::list_channels and ChannelManager::list_usable_channels
+/// Details of a channel, as returned by [`ChannelManager::list_channels`] and [`ChannelManager::list_usable_channels`]
#[derive(Clone, Debug, PartialEq)]
pub struct ChannelDetails {
/// The channel's ID (prior to funding transaction generation, this is a random 32 bytes,
/// inbound. This may be zero for inbound channels serialized with LDK versions prior to
/// 0.0.113.
pub user_channel_id: u128,
+ /// The currently negotiated fee rate denominated in satoshi per 1000 weight units,
+ /// which is applied to commitment and HTLC transactions.
+ ///
+ /// This value will be `None` for objects serialized with LDK versions prior to 0.0.115.
+ pub feerate_sat_per_1000_weight: Option<u32>,
/// Our total balance. This is the amount we would get if we close the channel.
/// This value is not exact. Due to various in-flight changes and feerate changes, exactly this
/// amount is not likely to be recoverable on close.
/// the current state and per-HTLC limit(s). This is intended for use when routing, allowing us
/// to use a limit as close as possible to the HTLC limit we can currently send.
///
- /// See also [`ChannelDetails::balance_msat`] and [`ChannelDetails::outbound_capacity_msat`].
+ /// See also [`ChannelDetails::next_outbound_htlc_minimum_msat`],
+ /// [`ChannelDetails::balance_msat`], and [`ChannelDetails::outbound_capacity_msat`].
pub next_outbound_htlc_limit_msat: u64,
+ /// The minimum value for sending a single HTLC to the remote peer. This is the equivalent of
+ /// [`ChannelDetails::next_outbound_htlc_limit_msat`] but represents a lower-bound, rather than
+ /// an upper-bound. This is intended for use when routing, allowing us to ensure we pick a
+ /// route which is valid.
+ pub next_outbound_htlc_minimum_msat: u64,
/// The available inbound capacity for the remote peer to send HTLCs to us. This does not
/// include any pending HTLCs which are not yet fully resolved (and, thus, whose balance is not
/// available for inclusion in new inbound HTLCs).
///
/// [`confirmations_required`]: ChannelDetails::confirmations_required
pub is_channel_ready: bool,
+ /// The stage of the channel's shutdown.
+ /// `None` for `ChannelDetails` serialized on LDK versions prior to 0.0.116.
+ pub channel_shutdown_state: Option<ChannelShutdownState>,
/// True if the channel is (a) confirmed and channel_ready messages have been exchanged, (b)
/// the peer is connected, and (c) the channel is not currently negotiating a shutdown.
///
pub fn get_outbound_payment_scid(&self) -> Option<u64> {
self.short_channel_id.or(self.outbound_scid_alias)
}
+
+ fn from_channel_context<Signer: WriteableEcdsaChannelSigner, F: Deref>(
+ context: &ChannelContext<Signer>, best_block_height: u32, latest_features: InitFeatures,
+ fee_estimator: &LowerBoundedFeeEstimator<F>
+ ) -> Self
+ where F::Target: FeeEstimator
+ {
+ let balance = context.get_available_balances(fee_estimator);
+ let (to_remote_reserve_satoshis, to_self_reserve_satoshis) =
+ context.get_holder_counterparty_selected_channel_reserve_satoshis();
+ ChannelDetails {
+ channel_id: context.channel_id(),
+ counterparty: ChannelCounterparty {
+ node_id: context.get_counterparty_node_id(),
+ features: latest_features,
+ unspendable_punishment_reserve: to_remote_reserve_satoshis,
+ forwarding_info: context.counterparty_forwarding_info(),
+ // Ensures that we have actually received the `htlc_minimum_msat` value
+ // from the counterparty through the `OpenChannel` or `AcceptChannel`
+ // message (as they are always the first message from the counterparty).
+ // Else `Channel::get_counterparty_htlc_minimum_msat` could return the
+ // default `0` value set by `Channel::new_outbound`.
+ outbound_htlc_minimum_msat: if context.have_received_message() {
+ Some(context.get_counterparty_htlc_minimum_msat()) } else { None },
+ outbound_htlc_maximum_msat: context.get_counterparty_htlc_maximum_msat(),
+ },
+ funding_txo: context.get_funding_txo(),
+ // Note that accept_channel (or open_channel) is always the first message, so
+ // `have_received_message` indicates that type negotiation has completed.
+ channel_type: if context.have_received_message() { Some(context.get_channel_type().clone()) } else { None },
+ short_channel_id: context.get_short_channel_id(),
+ outbound_scid_alias: if context.is_usable() { Some(context.outbound_scid_alias()) } else { None },
+ inbound_scid_alias: context.latest_inbound_scid_alias(),
+ channel_value_satoshis: context.get_value_satoshis(),
+ feerate_sat_per_1000_weight: Some(context.get_feerate_sat_per_1000_weight()),
+ unspendable_punishment_reserve: to_self_reserve_satoshis,
+ balance_msat: balance.balance_msat,
+ inbound_capacity_msat: balance.inbound_capacity_msat,
+ outbound_capacity_msat: balance.outbound_capacity_msat,
+ next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat,
+ next_outbound_htlc_minimum_msat: balance.next_outbound_htlc_minimum_msat,
+ user_channel_id: context.get_user_id(),
+ confirmations_required: context.minimum_depth(),
+ confirmations: Some(context.get_funding_tx_confirmations(best_block_height)),
+ force_close_spend_delay: context.get_counterparty_selected_contest_delay(),
+ is_outbound: context.is_outbound(),
+ is_channel_ready: context.is_usable(),
+ is_usable: context.is_live(),
+ is_public: context.should_announce(),
+ inbound_htlc_minimum_msat: Some(context.get_holder_htlc_minimum_msat()),
+ inbound_htlc_maximum_msat: context.get_holder_htlc_maximum_msat(),
+ config: Some(context.config()),
+ channel_shutdown_state: Some(context.shutdown_state()),
+ }
+ }
+}
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+/// Further information on the details of the channel shutdown.
+/// Upon channels being forced closed (i.e. commitment transaction confirmation detected
+/// by `ChainMonitor`), ChannelShutdownState will be set to `ShutdownComplete` or
+/// the channel will be removed shortly.
+/// Also note, that in normal operation, peers could disconnect at any of these states
+/// and require peer re-connection before making progress onto other states
+pub enum ChannelShutdownState {
+ /// Channel has not sent or received a shutdown message.
+ NotShuttingDown,
+ /// Local node has sent a shutdown message for this channel.
+ ShutdownInitiated,
+ /// Shutdown message exchanges have concluded and the channels are in the midst of
+ /// resolving all existing open HTLCs before closing can continue.
+ ResolvingHTLCs,
+ /// All HTLCs have been resolved, nodes are currently negotiating channel close onchain fee rates.
+ NegotiatingClosingFee,
+ /// We've successfully negotiated a closing_signed dance. At this point `ChannelManager` is about
+ /// to drop the channel.
+ ShutdownComplete,
}
/// Used by [`ChannelManager::list_recent_payments`] to express the status of recent payments.
/// Route hints used in constructing invoices for [phantom node payents].
///
-/// [phantom node payments]: crate::chain::keysinterface::PhantomKeysManager
+/// [phantom node payments]: crate::sign::PhantomKeysManager
#[derive(Clone)]
pub struct PhantomRouteHints {
/// The list of channels to be included in the invoice route hints.
}
macro_rules! handle_error {
- ($self: ident, $internal: expr, $counterparty_node_id: expr) => {
+ ($self: ident, $internal: expr, $counterparty_node_id: expr) => { {
+ // In testing, ensure there are no deadlocks where the lock is already held upon
+ // entering the macro.
+ debug_assert_ne!($self.pending_events.held_by_thread(), LockHeldState::HeldByThread);
+ debug_assert_ne!($self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
+
match $internal {
Ok(msg) => Ok(msg),
Err(MsgHandleErrInternal { err, chan_id, shutdown_finish }) => {
- // In testing, ensure there are no deadlocks where the lock is already held upon
- // entering the macro.
- debug_assert_ne!($self.pending_events.held_by_thread(), LockHeldState::HeldByThread);
- debug_assert_ne!($self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
-
let mut msg_events = Vec::with_capacity(2);
if let Some((shutdown_res, update_option)) = shutdown_finish {
});
}
if let Some((channel_id, user_channel_id)) = chan_id {
- $self.pending_events.lock().unwrap().push(events::Event::ChannelClosed {
+ $self.pending_events.lock().unwrap().push_back((events::Event::ChannelClosed {
channel_id, user_channel_id,
reason: ClosureReason::ProcessingError { err: err.err.clone() }
- });
+ }, None));
}
}
Err(err)
},
}
- }
+ } };
+ ($self: ident, $internal: expr) => {
+ match $internal {
+ Ok(res) => Ok(res),
+ Err((chan, msg_handle_err)) => {
+ let counterparty_node_id = chan.get_counterparty_node_id();
+ handle_error!($self, Err(msg_handle_err), counterparty_node_id).map_err(|err| (chan, err))
+ },
+ }
+ };
}
macro_rules! update_maps_on_chan_removal {
- ($self: expr, $channel: expr) => {{
- $self.id_to_peer.lock().unwrap().remove(&$channel.channel_id());
+ ($self: expr, $channel_context: expr) => {{
+ $self.id_to_peer.lock().unwrap().remove(&$channel_context.channel_id());
let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap();
- if let Some(short_id) = $channel.get_short_channel_id() {
+ if let Some(short_id) = $channel_context.get_short_channel_id() {
short_to_chan_info.remove(&short_id);
} else {
// If the channel was never confirmed on-chain prior to its closure, remove the
// also don't want a counterparty to be able to trivially cause a memory leak by simply
// opening a million channels with us which are closed before we ever reach the funding
// stage.
- let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$channel.outbound_scid_alias());
+ let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$channel_context.outbound_scid_alias());
debug_assert!(alias_removed);
}
- short_to_chan_info.remove(&$channel.outbound_scid_alias());
+ short_to_chan_info.remove(&$channel_context.outbound_scid_alias());
}}
}
},
ChannelError::Close(msg) => {
log_error!($self.logger, "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg);
- update_maps_on_chan_removal!($self, $channel);
- let shutdown_res = $channel.force_shutdown(true);
- (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.get_user_id(),
+ update_maps_on_chan_removal!($self, &$channel.context);
+ let shutdown_res = $channel.context.force_shutdown(true);
+ (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.context.get_user_id(),
shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
},
}
+ };
+ ($self: ident, $err: expr, $channel_context: expr, $channel_id: expr, PREFUNDED) => {
+ match $err {
+ // We should only ever have `ChannelError::Close` when prefunded channels error.
+ // In any case, just close the channel.
+ ChannelError::Warn(msg) | ChannelError::Ignore(msg) | ChannelError::Close(msg) => {
+ log_error!($self.logger, "Closing prefunded channel {} due to an error: {}", log_bytes!($channel_id[..]), msg);
+ update_maps_on_chan_removal!($self, &$channel_context);
+ let shutdown_res = $channel_context.force_shutdown(false);
+ (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel_context.get_user_id(),
+ shutdown_res, None))
+ },
+ }
}
}
}
}
+macro_rules! try_v1_outbound_chan_entry {
+ ($self: ident, $res: expr, $entry: expr) => {
+ match $res {
+ Ok(res) => res,
+ Err(e) => {
+ let (drop, res) = convert_chan_err!($self, e, $entry.get_mut().context, $entry.key(), PREFUNDED);
+ if drop {
+ $entry.remove_entry();
+ }
+ return Err(res);
+ }
+ }
+ }
+}
+
macro_rules! try_chan_entry {
($self: ident, $res: expr, $entry: expr) => {
match $res {
($self: expr, $entry: expr) => {
{
let channel = $entry.remove_entry().1;
- update_maps_on_chan_removal!($self, channel);
+ update_maps_on_chan_removal!($self, &channel.context);
channel
}
}
macro_rules! send_channel_ready {
($self: ident, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => {{
$pending_msg_events.push(events::MessageSendEvent::SendChannelReady {
- node_id: $channel.get_counterparty_node_id(),
+ node_id: $channel.context.get_counterparty_node_id(),
msg: $channel_ready_msg,
});
// Note that we may send a `channel_ready` multiple times for a channel if we reconnect, so
// we allow collisions, but we shouldn't ever be updating the channel ID pointed to.
let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap();
- let outbound_alias_insert = short_to_chan_info.insert($channel.outbound_scid_alias(), ($channel.get_counterparty_node_id(), $channel.channel_id()));
- assert!(outbound_alias_insert.is_none() || outbound_alias_insert.unwrap() == ($channel.get_counterparty_node_id(), $channel.channel_id()),
+ let outbound_alias_insert = short_to_chan_info.insert($channel.context.outbound_scid_alias(), ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()));
+ assert!(outbound_alias_insert.is_none() || outbound_alias_insert.unwrap() == ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()),
"SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels");
- if let Some(real_scid) = $channel.get_short_channel_id() {
- let scid_insert = short_to_chan_info.insert(real_scid, ($channel.get_counterparty_node_id(), $channel.channel_id()));
- assert!(scid_insert.is_none() || scid_insert.unwrap() == ($channel.get_counterparty_node_id(), $channel.channel_id()),
+ if let Some(real_scid) = $channel.context.get_short_channel_id() {
+ let scid_insert = short_to_chan_info.insert(real_scid, ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()));
+ assert!(scid_insert.is_none() || scid_insert.unwrap() == ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()),
"SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels");
}
}}
}
+macro_rules! emit_channel_pending_event {
+ ($locked_events: expr, $channel: expr) => {
+ if $channel.context.should_emit_channel_pending_event() {
+ $locked_events.push_back((events::Event::ChannelPending {
+ channel_id: $channel.context.channel_id(),
+ former_temporary_channel_id: $channel.context.temporary_channel_id(),
+ counterparty_node_id: $channel.context.get_counterparty_node_id(),
+ user_channel_id: $channel.context.get_user_id(),
+ funding_txo: $channel.context.get_funding_txo().unwrap().into_bitcoin_outpoint(),
+ }, None));
+ $channel.context.set_channel_pending_event_emitted();
+ }
+ }
+}
+
macro_rules! emit_channel_ready_event {
- ($self: expr, $channel: expr) => {
- if $channel.should_emit_channel_ready_event() {
- {
- let mut pending_events = $self.pending_events.lock().unwrap();
- pending_events.push(events::Event::ChannelReady {
- channel_id: $channel.channel_id(),
- user_channel_id: $channel.get_user_id(),
- counterparty_node_id: $channel.get_counterparty_node_id(),
- channel_type: $channel.get_channel_type().clone(),
- });
- }
- $channel.set_channel_ready_event_emitted();
+ ($locked_events: expr, $channel: expr) => {
+ if $channel.context.should_emit_channel_ready_event() {
+ debug_assert!($channel.context.channel_pending_event_emitted());
+ $locked_events.push_back((events::Event::ChannelReady {
+ channel_id: $channel.context.channel_id(),
+ user_channel_id: $channel.context.get_user_id(),
+ counterparty_node_id: $channel.context.get_counterparty_node_id(),
+ channel_type: $channel.context.get_channel_type().clone(),
+ }, None));
+ $channel.context.set_channel_ready_event_emitted();
}
}
}
macro_rules! handle_monitor_update_completion {
- ($self: ident, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
+ ($self: ident, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
let mut updates = $chan.monitor_updating_restored(&$self.logger,
&$self.node_signer, $self.genesis_hash, &$self.default_configuration,
$self.best_block.read().unwrap().height());
- let counterparty_node_id = $chan.get_counterparty_node_id();
- let channel_update = if updates.channel_ready.is_some() && $chan.is_usable() {
+ let counterparty_node_id = $chan.context.get_counterparty_node_id();
+ let channel_update = if updates.channel_ready.is_some() && $chan.context.is_usable() {
// We only send a channel_update in the case where we are just now sending a
// channel_ready and the channel is in a usable state. We may re-send a
// channel_update later through the announcement_signatures process for public
} else { None };
let update_actions = $peer_state.monitor_update_blocked_actions
- .remove(&$chan.channel_id()).unwrap_or(Vec::new());
+ .remove(&$chan.context.channel_id()).unwrap_or(Vec::new());
let htlc_forwards = $self.handle_channel_resumption(
&mut $peer_state.pending_msg_events, $chan, updates.raa,
$peer_state.pending_msg_events.push(upd);
}
- let channel_id = $chan.channel_id();
+ let channel_id = $chan.context.channel_id();
core::mem::drop($peer_state_lock);
core::mem::drop($per_peer_state_lock);
}
macro_rules! handle_new_monitor_update {
- ($self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING, $remove: expr) => { {
+ ($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, _internal, $remove: expr, $completed: expr) => { {
// update_maps_on_chan_removal needs to be able to take id_to_peer, so make sure we can in
// any case so that it won't deadlock.
- debug_assert!($self.id_to_peer.try_lock().is_ok());
+ debug_assert_ne!($self.id_to_peer.held_by_thread(), LockHeldState::HeldByThread);
+ debug_assert!($self.background_events_processed_since_startup.load(Ordering::Acquire));
match $update_res {
ChannelMonitorUpdateStatus::InProgress => {
log_debug!($self.logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.",
- log_bytes!($chan.channel_id()[..]));
- Ok(())
+ log_bytes!($chan.context.channel_id()[..]));
+ Ok(false)
},
ChannelMonitorUpdateStatus::PermanentFailure => {
log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure",
- log_bytes!($chan.channel_id()[..]));
- update_maps_on_chan_removal!($self, $chan);
- let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown(
- "ChannelMonitor storage failure".to_owned(), $chan.channel_id(),
- $chan.get_user_id(), $chan.force_shutdown(false),
+ log_bytes!($chan.context.channel_id()[..]));
+ update_maps_on_chan_removal!($self, &$chan.context);
+ let res = Err(MsgHandleErrInternal::from_finish_shutdown(
+ "ChannelMonitor storage failure".to_owned(), $chan.context.channel_id(),
+ $chan.context.get_user_id(), $chan.context.force_shutdown(false),
$self.get_channel_update_for_broadcast(&$chan).ok()));
$remove;
res
},
ChannelMonitorUpdateStatus::Completed => {
- if ($update_id == 0 || $chan.get_next_monitor_update()
- .expect("We can't be processing a monitor update if it isn't queued")
- .update_id == $update_id) &&
- $chan.get_latest_monitor_update_id() == $update_id
- {
- handle_monitor_update_completion!($self, $update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan);
- }
- Ok(())
+ $completed;
+ Ok(true)
},
}
} };
- ($self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr) => {
- handle_new_monitor_update!($self, $update_res, $update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan_entry.get_mut(), MANUALLY_REMOVING, $chan_entry.remove_entry())
+ ($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING_INITIAL_MONITOR, $remove: expr) => {
+ handle_new_monitor_update!($self, $update_res, $peer_state_lock, $peer_state,
+ $per_peer_state_lock, $chan, _internal, $remove,
+ handle_monitor_update_completion!($self, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan))
+ };
+ ($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr, INITIAL_MONITOR) => {
+ handle_new_monitor_update!($self, $update_res, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan_entry.get_mut(), MANUALLY_REMOVING_INITIAL_MONITOR, $chan_entry.remove_entry())
+ };
+ ($self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING, $remove: expr) => { {
+ let in_flight_updates = $peer_state.in_flight_monitor_updates.entry($funding_txo)
+ .or_insert_with(Vec::new);
+ // During startup, we push monitor updates as background events through to here in
+ // order to replay updates that were in-flight when we shut down. Thus, we have to
+ // filter for uniqueness here.
+ let idx = in_flight_updates.iter().position(|upd| upd == &$update)
+ .unwrap_or_else(|| {
+ in_flight_updates.push($update);
+ in_flight_updates.len() - 1
+ });
+ let update_res = $self.chain_monitor.update_channel($funding_txo, &in_flight_updates[idx]);
+ handle_new_monitor_update!($self, update_res, $peer_state_lock, $peer_state,
+ $per_peer_state_lock, $chan, _internal, $remove,
+ {
+ let _ = in_flight_updates.remove(idx);
+ if in_flight_updates.is_empty() && $chan.blocked_monitor_updates_pending() == 0 {
+ handle_monitor_update_completion!($self, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan);
+ }
+ })
+ } };
+ ($self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr) => {
+ handle_new_monitor_update!($self, $funding_txo, $update, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan_entry.get_mut(), MANUALLY_REMOVING, $chan_entry.remove_entry())
+ }
+}
+
+macro_rules! process_events_body {
+ ($self: expr, $event_to_handle: expr, $handle_event: expr) => {
+ let mut processed_all_events = false;
+ while !processed_all_events {
+ if $self.pending_events_processor.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed).is_err() {
+ return;
+ }
+
+ let mut result = NotifyOption::SkipPersist;
+
+ {
+ // We'll acquire our total consistency lock so that we can be sure no other
+ // persists happen while processing monitor events.
+ let _read_guard = $self.total_consistency_lock.read().unwrap();
+
+ // Because `handle_post_event_actions` may send `ChannelMonitorUpdate`s to the user we must
+ // ensure any startup-generated background events are handled first.
+ if $self.process_background_events() == NotifyOption::DoPersist { result = NotifyOption::DoPersist; }
+
+ // TODO: This behavior should be documented. It's unintuitive that we query
+ // ChannelMonitors when clearing other events.
+ if $self.process_pending_monitor_events() {
+ result = NotifyOption::DoPersist;
+ }
+ }
+
+ let pending_events = $self.pending_events.lock().unwrap().clone();
+ let num_events = pending_events.len();
+ if !pending_events.is_empty() {
+ result = NotifyOption::DoPersist;
+ }
+
+ let mut post_event_actions = Vec::new();
+
+ for (event, action_opt) in pending_events {
+ $event_to_handle = event;
+ $handle_event;
+ if let Some(action) = action_opt {
+ post_event_actions.push(action);
+ }
+ }
+
+ {
+ let mut pending_events = $self.pending_events.lock().unwrap();
+ pending_events.drain(..num_events);
+ processed_all_events = pending_events.is_empty();
+ // Note that `push_pending_forwards_ev` relies on `pending_events_processor` being
+ // updated here with the `pending_events` lock acquired.
+ $self.pending_events_processor.store(false, Ordering::Release);
+ }
+
+ if !post_event_actions.is_empty() {
+ $self.handle_post_event_actions(post_event_actions);
+ // If we had some actions, go around again as we may have more events now
+ processed_all_events = false;
+ }
+
+ if result == NotifyOption::DoPersist {
+ $self.persistence_notifier.notify();
+ }
+ }
}
}
R::Target: Router,
L::Target: Logger,
{
- /// Constructs a new ChannelManager to hold several channels and route between them.
+ /// Constructs a new `ChannelManager` to hold several channels and route between them.
+ ///
+ /// The current time or latest block header time can be provided as the `current_timestamp`.
///
/// This is the main "logic hub" for all channel-related actions, and implements
- /// ChannelMessageHandler.
+ /// [`ChannelMessageHandler`].
///
/// Non-proportional fees are fixed according to our risk using the provided fee estimator.
///
- /// Users need to notify the new ChannelManager when a new block is connected or
- /// disconnected using its `block_connected` and `block_disconnected` methods, starting
- /// from after `params.latest_hash`.
- pub fn new(fee_est: F, chain_monitor: M, tx_broadcaster: T, router: R, logger: L, entropy_source: ES, node_signer: NS, signer_provider: SP, config: UserConfig, params: ChainParameters) -> Self {
+ /// Users need to notify the new `ChannelManager` when a new block is connected or
+ /// disconnected using its [`block_connected`] and [`block_disconnected`] methods, starting
+ /// from after [`params.best_block.block_hash`]. See [`chain::Listen`] and [`chain::Confirm`] for
+ /// more details.
+ ///
+ /// [`block_connected`]: chain::Listen::block_connected
+ /// [`block_disconnected`]: chain::Listen::block_disconnected
+ /// [`params.best_block.block_hash`]: chain::BestBlock::block_hash
+ pub fn new(
+ fee_est: F, chain_monitor: M, tx_broadcaster: T, router: R, logger: L, entropy_source: ES,
+ node_signer: NS, signer_provider: SP, config: UserConfig, params: ChainParameters,
+ current_timestamp: u32,
+ ) -> Self {
let mut secp_ctx = Secp256k1::new();
secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
let inbound_pmt_key_material = node_signer.get_inbound_payment_key_material();
pending_inbound_payments: Mutex::new(HashMap::new()),
pending_outbound_payments: OutboundPayments::new(),
forward_htlcs: Mutex::new(HashMap::new()),
- claimable_payments: Mutex::new(ClaimablePayments { claimable_htlcs: HashMap::new(), pending_claiming_payments: HashMap::new() }),
+ claimable_payments: Mutex::new(ClaimablePayments { claimable_payments: HashMap::new(), pending_claiming_payments: HashMap::new() }),
pending_intercepted_htlcs: Mutex::new(HashMap::new()),
id_to_peer: Mutex::new(HashMap::new()),
short_to_chan_info: FairRwLock::new(HashMap::new()),
probing_cookie_secret: entropy_source.get_secure_random_bytes(),
- highest_seen_timestamp: AtomicUsize::new(0),
+ highest_seen_timestamp: AtomicUsize::new(current_timestamp as usize),
per_peer_state: FairRwLock::new(HashMap::new()),
- pending_events: Mutex::new(Vec::new()),
+ pending_events: Mutex::new(VecDeque::new()),
+ pending_events_processor: AtomicBool::new(false),
pending_background_events: Mutex::new(Vec::new()),
total_consistency_lock: RwLock::new(()),
+ background_events_processed_since_startup: AtomicBool::new(false),
persistence_notifier: Notifier::new(),
entropy_source,
/// Raises [`APIError::APIMisuseError`] when `channel_value_satoshis` > 2**24 or `push_msat` is
/// greater than `channel_value_satoshis * 1k` or `channel_value_satoshis < 1000`.
///
+ /// Raises [`APIError::ChannelUnavailable`] if the channel cannot be opened due to failing to
+ /// generate a shutdown scriptpubkey or destination script set by
+ /// [`SignerProvider::get_shutdown_scriptpubkey`] or [`SignerProvider::get_destination_script`].
+ ///
/// Note that we do not check if you are currently connected to the given peer. If no
/// connection is available, the outbound `open_channel` message may fail to send, resulting in
/// the channel eventually being silently forgotten (dropped on reload).
return Err(APIError::APIMisuseError { err: format!("Channel value must be at least 1000 satoshis. It was {}", channel_value_satoshis) });
}
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
// We want to make sure the lock is actually acquired by PersistenceNotifierGuard.
debug_assert!(&self.total_consistency_lock.try_write().is_err());
let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
let their_features = &peer_state.latest_features;
let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration };
- match Channel::new_outbound(&self.fee_estimator, &self.entropy_source, &self.signer_provider, their_network_key,
+ match OutboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider, their_network_key,
their_features, channel_value_satoshis, push_msat, user_channel_id, config,
self.best_block.read().unwrap().height(), outbound_scid_alias)
{
};
let res = channel.get_open_channel(self.genesis_hash.clone());
- let temporary_channel_id = channel.channel_id();
- match peer_state.channel_by_id.entry(temporary_channel_id) {
+ let temporary_channel_id = channel.context.channel_id();
+ match peer_state.outbound_v1_channel_by_id.entry(temporary_channel_id) {
hash_map::Entry::Occupied(_) => {
if cfg!(fuzzing) {
return Err(APIError::APIMisuseError { err: "Fuzzy bad RNG".to_owned() });
Ok(temporary_channel_id)
}
- fn list_channels_with_filter<Fn: FnMut(&(&[u8; 32], &Channel<<SP::Target as SignerProvider>::Signer>)) -> bool + Copy>(&self, f: Fn) -> Vec<ChannelDetails> {
+ fn list_funded_channels_with_filter<Fn: FnMut(&(&[u8; 32], &Channel<<SP::Target as SignerProvider>::Signer>)) -> bool + Copy>(&self, f: Fn) -> Vec<ChannelDetails> {
// Allocate our best estimate of the number of channels we have in the `res`
// Vec. Sadly the `short_to_chan_info` map doesn't cover channels without
// a scid or a scid alias, and the `id_to_peer` shouldn't be used outside
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
- for (channel_id, channel) in peer_state.channel_by_id.iter().filter(f) {
- let balance = channel.get_available_balances();
- let (to_remote_reserve_satoshis, to_self_reserve_satoshis) =
- channel.get_holder_counterparty_selected_channel_reserve_satoshis();
- res.push(ChannelDetails {
- channel_id: (*channel_id).clone(),
- counterparty: ChannelCounterparty {
- node_id: channel.get_counterparty_node_id(),
- features: peer_state.latest_features.clone(),
- unspendable_punishment_reserve: to_remote_reserve_satoshis,
- forwarding_info: channel.counterparty_forwarding_info(),
- // Ensures that we have actually received the `htlc_minimum_msat` value
- // from the counterparty through the `OpenChannel` or `AcceptChannel`
- // message (as they are always the first message from the counterparty).
- // Else `Channel::get_counterparty_htlc_minimum_msat` could return the
- // default `0` value set by `Channel::new_outbound`.
- outbound_htlc_minimum_msat: if channel.have_received_message() {
- Some(channel.get_counterparty_htlc_minimum_msat()) } else { None },
- outbound_htlc_maximum_msat: channel.get_counterparty_htlc_maximum_msat(),
- },
- funding_txo: channel.get_funding_txo(),
- // Note that accept_channel (or open_channel) is always the first message, so
- // `have_received_message` indicates that type negotiation has completed.
- channel_type: if channel.have_received_message() { Some(channel.get_channel_type().clone()) } else { None },
- short_channel_id: channel.get_short_channel_id(),
- outbound_scid_alias: if channel.is_usable() { Some(channel.outbound_scid_alias()) } else { None },
- inbound_scid_alias: channel.latest_inbound_scid_alias(),
- channel_value_satoshis: channel.get_value_satoshis(),
- unspendable_punishment_reserve: to_self_reserve_satoshis,
- balance_msat: balance.balance_msat,
- inbound_capacity_msat: balance.inbound_capacity_msat,
- outbound_capacity_msat: balance.outbound_capacity_msat,
- next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat,
- user_channel_id: channel.get_user_id(),
- confirmations_required: channel.minimum_depth(),
- confirmations: Some(channel.get_funding_tx_confirmations(best_block_height)),
- force_close_spend_delay: channel.get_counterparty_selected_contest_delay(),
- is_outbound: channel.is_outbound(),
- is_channel_ready: channel.is_usable(),
- is_usable: channel.is_live(),
- is_public: channel.should_announce(),
- inbound_htlc_minimum_msat: Some(channel.get_holder_htlc_minimum_msat()),
- inbound_htlc_maximum_msat: channel.get_holder_htlc_maximum_msat(),
- config: Some(channel.config()),
- });
+ for (_channel_id, channel) in peer_state.channel_by_id.iter().filter(f) {
+ let details = ChannelDetails::from_channel_context(&channel.context, best_block_height,
+ peer_state.latest_features.clone(), &self.fee_estimator);
+ res.push(details);
}
}
}
res
}
- /// Gets the list of open channels, in random order. See ChannelDetail field documentation for
+ /// Gets the list of open channels, in random order. See [`ChannelDetails`] field documentation for
/// more information.
pub fn list_channels(&self) -> Vec<ChannelDetails> {
- self.list_channels_with_filter(|_| true)
+ // Allocate our best estimate of the number of channels we have in the `res`
+ // Vec. Sadly the `short_to_chan_info` map doesn't cover channels without
+ // a scid or a scid alias, and the `id_to_peer` shouldn't be used outside
+ // of the ChannelMonitor handling. Therefore reallocations may still occur, but is
+ // unlikely as the `short_to_chan_info` map often contains 2 entries for
+ // the same channel.
+ let mut res = Vec::with_capacity(self.short_to_chan_info.read().unwrap().len());
+ {
+ let best_block_height = self.best_block.read().unwrap().height();
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ for (_channel_id, channel) in peer_state.channel_by_id.iter() {
+ let details = ChannelDetails::from_channel_context(&channel.context, best_block_height,
+ peer_state.latest_features.clone(), &self.fee_estimator);
+ res.push(details);
+ }
+ for (_channel_id, channel) in peer_state.inbound_v1_channel_by_id.iter() {
+ let details = ChannelDetails::from_channel_context(&channel.context, best_block_height,
+ peer_state.latest_features.clone(), &self.fee_estimator);
+ res.push(details);
+ }
+ for (_channel_id, channel) in peer_state.outbound_v1_channel_by_id.iter() {
+ let details = ChannelDetails::from_channel_context(&channel.context, best_block_height,
+ peer_state.latest_features.clone(), &self.fee_estimator);
+ res.push(details);
+ }
+ }
+ }
+ res
}
- /// Gets the list of usable channels, in random order. Useful as an argument to [`find_route`]
- /// to ensure non-announced channels are used.
+ /// Gets the list of usable channels, in random order. Useful as an argument to
+ /// [`Router::find_route`] to ensure non-announced channels are used.
///
/// These are guaranteed to have their [`ChannelDetails::is_usable`] value set to true, see the
/// documentation for [`ChannelDetails::is_usable`] for more info on exactly what the criteria
/// are.
- ///
- /// [`find_route`]: crate::routing::router::find_route
pub fn list_usable_channels(&self) -> Vec<ChannelDetails> {
// Note we use is_live here instead of usable which leads to somewhat confused
// internal/external nomenclature, but that's ok cause that's probably what the user
// really wanted anyway.
- self.list_channels_with_filter(|&(_, ref channel)| channel.is_live())
+ self.list_funded_channels_with_filter(|&(_, ref channel)| channel.context.is_live())
+ }
+
+ /// Gets the list of channels we have with a given counterparty, in random order.
+ pub fn list_channels_with_counterparty(&self, counterparty_node_id: &PublicKey) -> Vec<ChannelDetails> {
+ let best_block_height = self.best_block.read().unwrap().height();
+ let per_peer_state = self.per_peer_state.read().unwrap();
+
+ if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ let features = &peer_state.latest_features;
+ return peer_state.channel_by_id
+ .iter()
+ .map(|(_, channel)|
+ ChannelDetails::from_channel_context(&channel.context, best_block_height,
+ features.clone(), &self.fee_estimator))
+ .collect();
+ }
+ vec![]
}
/// Returns in an undefined order recent payments that -- if not fulfilled -- have yet to find a
}
/// Helper function that issues the channel close events
- fn issue_channel_close_events(&self, channel: &Channel<<SP::Target as SignerProvider>::Signer>, closure_reason: ClosureReason) {
+ fn issue_channel_close_events(&self, context: &ChannelContext<<SP::Target as SignerProvider>::Signer>, closure_reason: ClosureReason) {
let mut pending_events_lock = self.pending_events.lock().unwrap();
- match channel.unbroadcasted_funding() {
+ match context.unbroadcasted_funding() {
Some(transaction) => {
- pending_events_lock.push(events::Event::DiscardFunding { channel_id: channel.channel_id(), transaction })
+ pending_events_lock.push_back((events::Event::DiscardFunding {
+ channel_id: context.channel_id(), transaction
+ }, None));
},
None => {},
}
- pending_events_lock.push(events::Event::ChannelClosed {
- channel_id: channel.channel_id(),
- user_channel_id: channel.get_user_id(),
+ pending_events_lock.push_back((events::Event::ChannelClosed {
+ channel_id: context.channel_id(),
+ user_channel_id: context.get_user_id(),
reason: closure_reason
- });
+ }, None));
}
- fn close_channel_internal(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>) -> Result<(), APIError> {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ fn close_channel_internal(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>, override_shutdown_script: Option<ShutdownScript>) -> Result<(), APIError> {
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>;
let result: Result<(), _> = loop {
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(channel_id.clone()) {
hash_map::Entry::Occupied(mut chan_entry) => {
- let funding_txo_opt = chan_entry.get().get_funding_txo();
+ let funding_txo_opt = chan_entry.get().context.get_funding_txo();
let their_features = &peer_state.latest_features;
let (shutdown_msg, mut monitor_update_opt, htlcs) = chan_entry.get_mut()
- .get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight)?;
+ .get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
failed_htlcs = htlcs;
// We can send the `shutdown` message before updating the `ChannelMonitor`
// Update the monitor with the shutdown script if necessary.
if let Some(monitor_update) = monitor_update_opt.take() {
- let update_id = monitor_update.update_id;
- let update_res = self.chain_monitor.update_channel(funding_txo_opt.unwrap(), monitor_update);
- break handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, per_peer_state, chan_entry);
+ break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
+ peer_state_lock, peer_state, per_peer_state, chan_entry).map(|_| ());
}
if chan_entry.get().is_shutdown() {
msg: channel_update
});
}
- self.issue_channel_close_events(&channel, ClosureReason::HolderForceClosed);
+ self.issue_channel_close_events(&channel.context, ClosureReason::HolderForceClosed);
}
break Ok(());
},
/// would appear on a force-closure transaction, whichever is lower. We will allow our
/// counterparty to pay as much fee as they'd like, however.
///
- /// May generate a SendShutdown message event on success, which should be relayed.
+ /// May generate a [`SendShutdown`] message event on success, which should be relayed.
+ ///
+ /// Raises [`APIError::ChannelUnavailable`] if the channel cannot be closed due to failing to
+ /// generate a shutdown scriptpubkey or destination script set by
+ /// [`SignerProvider::get_shutdown_scriptpubkey`]. A force-closure may be needed to close the
+ /// channel.
///
/// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis
/// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background
/// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal
+ /// [`SendShutdown`]: crate::events::MessageSendEvent::SendShutdown
pub fn close_channel(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey) -> Result<(), APIError> {
- self.close_channel_internal(channel_id, counterparty_node_id, None)
+ self.close_channel_internal(channel_id, counterparty_node_id, None, None)
}
/// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs
/// transaction feerate below `target_feerate_sat_per_1000_weight` (or the feerate which
/// will appear on a force-closure transaction, whichever is lower).
///
- /// May generate a SendShutdown message event on success, which should be relayed.
+ /// The `shutdown_script` provided will be used as the `scriptPubKey` for the closing transaction.
+ /// Will fail if a shutdown script has already been set for this channel by
+ /// ['ChannelHandshakeConfig::commit_upfront_shutdown_pubkey`]. The given shutdown script must
+ /// also be compatible with our and the counterparty's features.
+ ///
+ /// May generate a [`SendShutdown`] message event on success, which should be relayed.
+ ///
+ /// Raises [`APIError::ChannelUnavailable`] if the channel cannot be closed due to failing to
+ /// generate a shutdown scriptpubkey or destination script set by
+ /// [`SignerProvider::get_shutdown_scriptpubkey`]. A force-closure may be needed to close the
+ /// channel.
///
/// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis
/// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background
/// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal
- pub fn close_channel_with_target_feerate(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: u32) -> Result<(), APIError> {
- self.close_channel_internal(channel_id, counterparty_node_id, Some(target_feerate_sats_per_1000_weight))
+ /// [`SendShutdown`]: crate::events::MessageSendEvent::SendShutdown
+ pub fn close_channel_with_feerate_and_script(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>, shutdown_script: Option<ShutdownScript>) -> Result<(), APIError> {
+ self.close_channel_internal(channel_id, counterparty_node_id, target_feerate_sats_per_1000_weight, shutdown_script)
}
#[inline]
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
}
- if let Some((funding_txo, monitor_update)) = monitor_update_option {
+ if let Some((_, funding_txo, monitor_update)) = monitor_update_option {
// There isn't anything we can do if we get an update failure - we're already
// force-closing. The monitor update on the required in-memory copy should broadcast
// the latest local state, which is the best we can do anyway. Thus, it is safe to
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(peer_node_id)
.ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", peer_node_id) })?;
- let mut chan = {
+ let (update_opt, counterparty_node_id) = {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
+ let closure_reason = if let Some(peer_msg) = peer_msg {
+ ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(peer_msg.to_string()) }
+ } else {
+ ClosureReason::HolderForceClosed
+ };
if let hash_map::Entry::Occupied(chan) = peer_state.channel_by_id.entry(channel_id.clone()) {
- if let Some(peer_msg) = peer_msg {
- self.issue_channel_close_events(chan.get(),ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() });
- } else {
- self.issue_channel_close_events(chan.get(),ClosureReason::HolderForceClosed);
- }
- remove_channel!(self, chan)
+ log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
+ self.issue_channel_close_events(&chan.get().context, closure_reason);
+ let mut chan = remove_channel!(self, chan);
+ self.finish_force_close_channel(chan.context.force_shutdown(broadcast));
+ (self.get_channel_update_for_broadcast(&chan).ok(), chan.context.get_counterparty_node_id())
+ } else if let hash_map::Entry::Occupied(chan) = peer_state.outbound_v1_channel_by_id.entry(channel_id.clone()) {
+ log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
+ self.issue_channel_close_events(&chan.get().context, closure_reason);
+ let mut chan = remove_channel!(self, chan);
+ self.finish_force_close_channel(chan.context.force_shutdown(false));
+ // Prefunded channel has no update
+ (None, chan.context.get_counterparty_node_id())
+ } else if let hash_map::Entry::Occupied(chan) = peer_state.inbound_v1_channel_by_id.entry(channel_id.clone()) {
+ log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
+ self.issue_channel_close_events(&chan.get().context, closure_reason);
+ let mut chan = remove_channel!(self, chan);
+ self.finish_force_close_channel(chan.context.force_shutdown(false));
+ // Prefunded channel has no update
+ (None, chan.context.get_counterparty_node_id())
} else {
return Err(APIError::ChannelUnavailable{ err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*channel_id), peer_node_id) });
}
};
- log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
- self.finish_force_close_channel(chan.force_shutdown(broadcast));
- if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
+ if let Some(update) = update_opt {
let mut peer_state = peer_state_mutex.lock().unwrap();
peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
- Ok(chan.get_counterparty_node_id())
+ Ok(counterparty_node_id)
}
fn force_close_sending_error(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, broadcast: bool) -> Result<(), APIError> {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
match self.force_close_channel_with_peer(channel_id, counterparty_node_id, None, broadcast) {
Ok(counterparty_node_id) => {
let per_peer_state = self.per_peer_state.read().unwrap();
}
}
- fn construct_recv_pending_htlc_info(&self, hop_data: msgs::OnionHopData, shared_secret: [u8; 32],
- payment_hash: PaymentHash, amt_msat: u64, cltv_expiry: u32, phantom_shared_secret: Option<[u8; 32]>) -> Result<PendingHTLCInfo, ReceiveError>
- {
+ fn construct_recv_pending_htlc_info(
+ &self, hop_data: msgs::OnionHopData, shared_secret: [u8; 32], payment_hash: PaymentHash,
+ amt_msat: u64, cltv_expiry: u32, phantom_shared_secret: Option<[u8; 32]>, allow_underpay: bool,
+ counterparty_skimmed_fee_msat: Option<u64>,
+ ) -> Result<PendingHTLCInfo, ReceiveError> {
// final_incorrect_cltv_expiry
- if hop_data.outgoing_cltv_value != cltv_expiry {
+ if hop_data.outgoing_cltv_value > cltv_expiry {
return Err(ReceiveError {
- msg: "Upstream node set CLTV to the wrong value",
+ msg: "Upstream node set CLTV to less than the CLTV set by the sender",
err_code: 18,
err_data: cltv_expiry.to_be_bytes().to_vec()
})
msg: "The final CLTV expiry is too soon to handle",
});
}
- if hop_data.amt_to_forward > amt_msat {
+ if (!allow_underpay && hop_data.amt_to_forward > amt_msat) ||
+ (allow_underpay && hop_data.amt_to_forward >
+ amt_msat.saturating_add(counterparty_skimmed_fee_msat.unwrap_or(0)))
+ {
return Err(ReceiveError {
err_code: 19,
err_data: amt_msat.to_be_bytes().to_vec(),
msg: "Got non final data with an HMAC of 0",
});
},
- msgs::OnionHopDataFormat::FinalNode { payment_data, keysend_preimage } => {
- if payment_data.is_some() && keysend_preimage.is_some() {
- return Err(ReceiveError {
- err_code: 0x4000|22,
- err_data: Vec::new(),
- msg: "We don't support MPP keysend payments",
- });
- } else if let Some(data) = payment_data {
- PendingHTLCRouting::Receive {
- payment_data: data,
- incoming_cltv_expiry: hop_data.outgoing_cltv_value,
- phantom_shared_secret,
- }
- } else if let Some(payment_preimage) = keysend_preimage {
+ msgs::OnionHopDataFormat::FinalNode { payment_data, keysend_preimage, payment_metadata } => {
+ if let Some(payment_preimage) = keysend_preimage {
// We need to check that the sender knows the keysend preimage before processing this
// payment further. Otherwise, an intermediary routing hop forwarding non-keysend-HTLC X
// could discover the final destination of X, by probing the adjacent nodes on the route
msg: "Payment preimage didn't match payment hash",
});
}
-
+ if !self.default_configuration.accept_mpp_keysend && payment_data.is_some() {
+ return Err(ReceiveError {
+ err_code: 0x4000|22,
+ err_data: Vec::new(),
+ msg: "We don't support MPP keysend payments",
+ });
+ }
PendingHTLCRouting::ReceiveKeysend {
+ payment_data,
payment_preimage,
+ payment_metadata,
+ incoming_cltv_expiry: hop_data.outgoing_cltv_value,
+ }
+ } else if let Some(data) = payment_data {
+ PendingHTLCRouting::Receive {
+ payment_data: data,
+ payment_metadata,
incoming_cltv_expiry: hop_data.outgoing_cltv_value,
+ phantom_shared_secret,
}
} else {
return Err(ReceiveError {
payment_hash,
incoming_shared_secret: shared_secret,
incoming_amt_msat: Some(amt_msat),
- outgoing_amt_msat: amt_msat,
+ outgoing_amt_msat: hop_data.amt_to_forward,
outgoing_cltv_value: hop_data.outgoing_cltv_value,
+ skimmed_fee_msat: counterparty_skimmed_fee_msat,
})
}
- fn decode_update_add_htlc_onion(&self, msg: &msgs::UpdateAddHTLC) -> PendingHTLCStatus {
+ fn decode_update_add_htlc_onion(
+ &self, msg: &msgs::UpdateAddHTLC
+ ) -> Result<(onion_utils::Hop, [u8; 32], Option<Result<PublicKey, secp256k1::Error>>), HTLCFailureMsg> {
macro_rules! return_malformed_err {
($msg: expr, $err_code: expr) => {
{
log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
- return PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
+ return Err(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
channel_id: msg.channel_id,
htlc_id: msg.htlc_id,
sha256_of_onion: Sha256::hash(&msg.onion_routing_packet.hop_data).into_inner(),
($msg: expr, $err_code: expr, $data: expr) => {
{
log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
- return PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
+ return Err(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
channel_id: msg.channel_id,
htlc_id: msg.htlc_id,
reason: HTLCFailReason::reason($err_code, $data.to_vec())
return_err!(err_msg, err_code, &[0; 0]);
},
};
+ let (outgoing_scid, outgoing_amt_msat, outgoing_cltv_value, next_packet_pk_opt) = match next_hop {
+ onion_utils::Hop::Forward {
+ next_hop_data: msgs::OnionHopData {
+ format: msgs::OnionHopDataFormat::NonFinalNode { short_channel_id }, amt_to_forward,
+ outgoing_cltv_value,
+ }, ..
+ } => {
+ let next_pk = onion_utils::next_hop_packet_pubkey(&self.secp_ctx,
+ msg.onion_routing_packet.public_key.unwrap(), &shared_secret);
+ (short_channel_id, amt_to_forward, outgoing_cltv_value, Some(next_pk))
+ },
+ // We'll do receive checks in [`Self::construct_pending_htlc_info`] so we have access to the
+ // inbound channel's state.
+ onion_utils::Hop::Receive { .. } => return Ok((next_hop, shared_secret, None)),
+ onion_utils::Hop::Forward {
+ next_hop_data: msgs::OnionHopData { format: msgs::OnionHopDataFormat::FinalNode { .. }, .. }, ..
+ } => {
+ return_err!("Final Node OnionHopData provided for us as an intermediary node", 0x4000 | 22, &[0; 0]);
+ }
+ };
- let pending_forward_info = match next_hop {
- onion_utils::Hop::Receive(next_hop_data) => {
- // OUR PAYMENT!
- match self.construct_recv_pending_htlc_info(next_hop_data, shared_secret, msg.payment_hash, msg.amount_msat, msg.cltv_expiry, None) {
- Ok(info) => {
- // Note that we could obviously respond immediately with an update_fulfill_htlc
- // message, however that would leak that we are the recipient of this payment, so
- // instead we stay symmetric with the forwarding case, only responding (after a
- // delay) once they've send us a commitment_signed!
- PendingHTLCStatus::Forward(info)
- },
- Err(ReceiveError { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data)
+ // Perform outbound checks here instead of in [`Self::construct_pending_htlc_info`] because we
+ // can't hold the outbound peer state lock at the same time as the inbound peer state lock.
+ if let Some((err, mut code, chan_update)) = loop {
+ let id_option = self.short_to_chan_info.read().unwrap().get(&outgoing_scid).cloned();
+ let forwarding_chan_info_opt = match id_option {
+ None => { // unknown_next_peer
+ // Note that this is likely a timing oracle for detecting whether an scid is a
+ // phantom or an intercept.
+ if (self.default_configuration.accept_intercept_htlcs &&
+ fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, outgoing_scid, &self.genesis_hash)) ||
+ fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, outgoing_scid, &self.genesis_hash)
+ {
+ None
+ } else {
+ break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
+ }
+ },
+ Some((cp_id, id)) => Some((cp_id.clone(), id.clone())),
+ };
+ let chan_update_opt = if let Some((counterparty_node_id, forwarding_id)) = forwarding_chan_info_opt {
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
+ if peer_state_mutex_opt.is_none() {
+ break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
}
- },
- onion_utils::Hop::Forward { next_hop_data, next_hop_hmac, new_packet_bytes } => {
- let new_pubkey = msg.onion_routing_packet.public_key.unwrap();
- let outgoing_packet = msgs::OnionPacket {
- version: 0,
- public_key: onion_utils::next_hop_packet_pubkey(&self.secp_ctx, new_pubkey, &shared_secret),
- hop_data: new_packet_bytes,
- hmac: next_hop_hmac.clone(),
+ let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ let chan = match peer_state.channel_by_id.get_mut(&forwarding_id) {
+ None => {
+ // Channel was removed. The short_to_chan_info and channel_by_id maps
+ // have no consistency guarantees.
+ break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
+ },
+ Some(chan) => chan
};
-
- let short_channel_id = match next_hop_data.format {
- msgs::OnionHopDataFormat::NonFinalNode { short_channel_id } => short_channel_id,
+ if !chan.context.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels {
+ // Note that the behavior here should be identical to the above block - we
+ // should NOT reveal the existence or non-existence of a private channel if
+ // we don't allow forwards outbound over them.
+ break Some(("Refusing to forward to a private channel based on our config.", 0x4000 | 10, None));
+ }
+ if chan.context.get_channel_type().supports_scid_privacy() && outgoing_scid != chan.context.outbound_scid_alias() {
+ // `option_scid_alias` (referred to in LDK as `scid_privacy`) means
+ // "refuse to forward unless the SCID alias was used", so we pretend
+ // we don't have the channel here.
+ break Some(("Refusing to forward over real channel SCID as our counterparty requested.", 0x4000 | 10, None));
+ }
+ let chan_update_opt = self.get_channel_update_for_onion(outgoing_scid, chan).ok();
+
+ // Note that we could technically not return an error yet here and just hope
+ // that the connection is reestablished or monitor updated by the time we get
+ // around to doing the actual forward, but better to fail early if we can and
+ // hopefully an attacker trying to path-trace payments cannot make this occur
+ // on a small/per-node/per-channel scale.
+ if !chan.context.is_live() { // channel_disabled
+ // If the channel_update we're going to return is disabled (i.e. the
+ // peer has been disabled for some time), return `channel_disabled`,
+ // otherwise return `temporary_channel_failure`.
+ if chan_update_opt.as_ref().map(|u| u.contents.flags & 2 == 2).unwrap_or(false) {
+ break Some(("Forwarding channel has been disconnected for some time.", 0x1000 | 20, chan_update_opt));
+ } else {
+ break Some(("Forwarding channel is not in a ready state.", 0x1000 | 7, chan_update_opt));
+ }
+ }
+ if outgoing_amt_msat < chan.context.get_counterparty_htlc_minimum_msat() { // amount_below_minimum
+ break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, chan_update_opt));
+ }
+ if let Err((err, code)) = chan.htlc_satisfies_config(&msg, outgoing_amt_msat, outgoing_cltv_value) {
+ break Some((err, code, chan_update_opt));
+ }
+ chan_update_opt
+ } else {
+ if (msg.cltv_expiry as u64) < (outgoing_cltv_value) as u64 + MIN_CLTV_EXPIRY_DELTA as u64 {
+ // We really should set `incorrect_cltv_expiry` here but as we're not
+ // forwarding over a real channel we can't generate a channel_update
+ // for it. Instead we just return a generic temporary_node_failure.
+ break Some((
+ "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
+ 0x2000 | 2, None,
+ ));
+ }
+ None
+ };
+
+ let cur_height = self.best_block.read().unwrap().height() + 1;
+ // Theoretically, channel counterparty shouldn't send us a HTLC expiring now,
+ // but we want to be robust wrt to counterparty packet sanitization (see
+ // HTLC_FAIL_BACK_BUFFER rationale).
+ if msg.cltv_expiry <= cur_height + HTLC_FAIL_BACK_BUFFER as u32 { // expiry_too_soon
+ break Some(("CLTV expiry is too close", 0x1000 | 14, chan_update_opt));
+ }
+ if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far
+ break Some(("CLTV expiry is too far in the future", 21, None));
+ }
+ // If the HTLC expires ~now, don't bother trying to forward it to our
+ // counterparty. They should fail it anyway, but we don't want to bother with
+ // the round-trips or risk them deciding they definitely want the HTLC and
+ // force-closing to ensure they get it if we're offline.
+ // We previously had a much more aggressive check here which tried to ensure
+ // our counterparty receives an HTLC which has *our* risk threshold met on it,
+ // but there is no need to do that, and since we're a bit conservative with our
+ // risk threshold it just results in failing to forward payments.
+ if (outgoing_cltv_value) as u64 <= (cur_height + LATENCY_GRACE_PERIOD_BLOCKS) as u64 {
+ break Some(("Outgoing CLTV value is too soon", 0x1000 | 14, chan_update_opt));
+ }
+
+ break None;
+ }
+ {
+ let mut res = VecWriter(Vec::with_capacity(chan_update.serialized_length() + 2 + 8 + 2));
+ if let Some(chan_update) = chan_update {
+ if code == 0x1000 | 11 || code == 0x1000 | 12 {
+ msg.amount_msat.write(&mut res).expect("Writes cannot fail");
+ }
+ else if code == 0x1000 | 13 {
+ msg.cltv_expiry.write(&mut res).expect("Writes cannot fail");
+ }
+ else if code == 0x1000 | 20 {
+ // TODO: underspecified, follow https://github.com/lightning/bolts/issues/791
+ 0u16.write(&mut res).expect("Writes cannot fail");
+ }
+ (chan_update.serialized_length() as u16 + 2).write(&mut res).expect("Writes cannot fail");
+ msgs::ChannelUpdate::TYPE.write(&mut res).expect("Writes cannot fail");
+ chan_update.write(&mut res).expect("Writes cannot fail");
+ } else if code & 0x1000 == 0x1000 {
+ // If we're trying to return an error that requires a `channel_update` but
+ // we're forwarding to a phantom or intercept "channel" (i.e. cannot
+ // generate an update), just use the generic "temporary_node_failure"
+ // instead.
+ code = 0x2000 | 2;
+ }
+ return_err!(err, code, &res.0[..]);
+ }
+ Ok((next_hop, shared_secret, next_packet_pk_opt))
+ }
+
+ fn construct_pending_htlc_status<'a>(
+ &self, msg: &msgs::UpdateAddHTLC, shared_secret: [u8; 32], decoded_hop: onion_utils::Hop,
+ allow_underpay: bool, next_packet_pubkey_opt: Option<Result<PublicKey, secp256k1::Error>>
+ ) -> PendingHTLCStatus {
+ macro_rules! return_err {
+ ($msg: expr, $err_code: expr, $data: expr) => {
+ {
+ log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
+ return PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
+ channel_id: msg.channel_id,
+ htlc_id: msg.htlc_id,
+ reason: HTLCFailReason::reason($err_code, $data.to_vec())
+ .get_encrypted_failure_packet(&shared_secret, &None),
+ }));
+ }
+ }
+ }
+ match decoded_hop {
+ onion_utils::Hop::Receive(next_hop_data) => {
+ // OUR PAYMENT!
+ match self.construct_recv_pending_htlc_info(next_hop_data, shared_secret, msg.payment_hash,
+ msg.amount_msat, msg.cltv_expiry, None, allow_underpay, msg.skimmed_fee_msat)
+ {
+ Ok(info) => {
+ // Note that we could obviously respond immediately with an update_fulfill_htlc
+ // message, however that would leak that we are the recipient of this payment, so
+ // instead we stay symmetric with the forwarding case, only responding (after a
+ // delay) once they've send us a commitment_signed!
+ PendingHTLCStatus::Forward(info)
+ },
+ Err(ReceiveError { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data)
+ }
+ },
+ onion_utils::Hop::Forward { next_hop_data, next_hop_hmac, new_packet_bytes } => {
+ debug_assert!(next_packet_pubkey_opt.is_some());
+ let outgoing_packet = msgs::OnionPacket {
+ version: 0,
+ public_key: next_packet_pubkey_opt.unwrap_or(Err(secp256k1::Error::InvalidPublicKey)),
+ hop_data: new_packet_bytes,
+ hmac: next_hop_hmac.clone(),
+ };
+
+ let short_channel_id = match next_hop_data.format {
+ msgs::OnionHopDataFormat::NonFinalNode { short_channel_id } => short_channel_id,
msgs::OnionHopDataFormat::FinalNode { .. } => {
return_err!("Final Node OnionHopData provided for us as an intermediary node", 0x4000 | 22, &[0;0]);
},
incoming_amt_msat: Some(msg.amount_msat),
outgoing_amt_msat: next_hop_data.amt_to_forward,
outgoing_cltv_value: next_hop_data.outgoing_cltv_value,
+ skimmed_fee_msat: None,
})
}
- };
-
- if let &PendingHTLCStatus::Forward(PendingHTLCInfo { ref routing, ref outgoing_amt_msat, ref outgoing_cltv_value, .. }) = &pending_forward_info {
- // If short_channel_id is 0 here, we'll reject the HTLC as there cannot be a channel
- // with a short_channel_id of 0. This is important as various things later assume
- // short_channel_id is non-0 in any ::Forward.
- if let &PendingHTLCRouting::Forward { ref short_channel_id, .. } = routing {
- if let Some((err, mut code, chan_update)) = loop {
- let id_option = self.short_to_chan_info.read().unwrap().get(short_channel_id).cloned();
- let forwarding_chan_info_opt = match id_option {
- None => { // unknown_next_peer
- // Note that this is likely a timing oracle for detecting whether an scid is a
- // phantom or an intercept.
- if (self.default_configuration.accept_intercept_htlcs &&
- fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, *short_channel_id, &self.genesis_hash)) ||
- fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, *short_channel_id, &self.genesis_hash)
- {
- None
- } else {
- break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
- }
- },
- Some((cp_id, id)) => Some((cp_id.clone(), id.clone())),
- };
- let chan_update_opt = if let Some((counterparty_node_id, forwarding_id)) = forwarding_chan_info_opt {
- let per_peer_state = self.per_peer_state.read().unwrap();
- let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
- if peer_state_mutex_opt.is_none() {
- break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
- }
- let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
- let peer_state = &mut *peer_state_lock;
- let chan = match peer_state.channel_by_id.get_mut(&forwarding_id) {
- None => {
- // Channel was removed. The short_to_chan_info and channel_by_id maps
- // have no consistency guarantees.
- break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
- },
- Some(chan) => chan
- };
- if !chan.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels {
- // Note that the behavior here should be identical to the above block - we
- // should NOT reveal the existence or non-existence of a private channel if
- // we don't allow forwards outbound over them.
- break Some(("Refusing to forward to a private channel based on our config.", 0x4000 | 10, None));
- }
- if chan.get_channel_type().supports_scid_privacy() && *short_channel_id != chan.outbound_scid_alias() {
- // `option_scid_alias` (referred to in LDK as `scid_privacy`) means
- // "refuse to forward unless the SCID alias was used", so we pretend
- // we don't have the channel here.
- break Some(("Refusing to forward over real channel SCID as our counterparty requested.", 0x4000 | 10, None));
- }
- let chan_update_opt = self.get_channel_update_for_onion(*short_channel_id, chan).ok();
-
- // Note that we could technically not return an error yet here and just hope
- // that the connection is reestablished or monitor updated by the time we get
- // around to doing the actual forward, but better to fail early if we can and
- // hopefully an attacker trying to path-trace payments cannot make this occur
- // on a small/per-node/per-channel scale.
- if !chan.is_live() { // channel_disabled
- break Some(("Forwarding channel is not in a ready state.", 0x1000 | 20, chan_update_opt));
- }
- if *outgoing_amt_msat < chan.get_counterparty_htlc_minimum_msat() { // amount_below_minimum
- break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, chan_update_opt));
- }
- if let Err((err, code)) = chan.htlc_satisfies_config(&msg, *outgoing_amt_msat, *outgoing_cltv_value) {
- break Some((err, code, chan_update_opt));
- }
- chan_update_opt
- } else {
- if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + MIN_CLTV_EXPIRY_DELTA as u64 {
- // We really should set `incorrect_cltv_expiry` here but as we're not
- // forwarding over a real channel we can't generate a channel_update
- // for it. Instead we just return a generic temporary_node_failure.
- break Some((
- "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
- 0x2000 | 2, None,
- ));
- }
- None
- };
-
- let cur_height = self.best_block.read().unwrap().height() + 1;
- // Theoretically, channel counterparty shouldn't send us a HTLC expiring now,
- // but we want to be robust wrt to counterparty packet sanitization (see
- // HTLC_FAIL_BACK_BUFFER rationale).
- if msg.cltv_expiry <= cur_height + HTLC_FAIL_BACK_BUFFER as u32 { // expiry_too_soon
- break Some(("CLTV expiry is too close", 0x1000 | 14, chan_update_opt));
- }
- if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far
- break Some(("CLTV expiry is too far in the future", 21, None));
- }
- // If the HTLC expires ~now, don't bother trying to forward it to our
- // counterparty. They should fail it anyway, but we don't want to bother with
- // the round-trips or risk them deciding they definitely want the HTLC and
- // force-closing to ensure they get it if we're offline.
- // We previously had a much more aggressive check here which tried to ensure
- // our counterparty receives an HTLC which has *our* risk threshold met on it,
- // but there is no need to do that, and since we're a bit conservative with our
- // risk threshold it just results in failing to forward payments.
- if (*outgoing_cltv_value) as u64 <= (cur_height + LATENCY_GRACE_PERIOD_BLOCKS) as u64 {
- break Some(("Outgoing CLTV value is too soon", 0x1000 | 14, chan_update_opt));
- }
-
- break None;
- }
- {
- let mut res = VecWriter(Vec::with_capacity(chan_update.serialized_length() + 2 + 8 + 2));
- if let Some(chan_update) = chan_update {
- if code == 0x1000 | 11 || code == 0x1000 | 12 {
- msg.amount_msat.write(&mut res).expect("Writes cannot fail");
- }
- else if code == 0x1000 | 13 {
- msg.cltv_expiry.write(&mut res).expect("Writes cannot fail");
- }
- else if code == 0x1000 | 20 {
- // TODO: underspecified, follow https://github.com/lightning/bolts/issues/791
- 0u16.write(&mut res).expect("Writes cannot fail");
- }
- (chan_update.serialized_length() as u16 + 2).write(&mut res).expect("Writes cannot fail");
- msgs::ChannelUpdate::TYPE.write(&mut res).expect("Writes cannot fail");
- chan_update.write(&mut res).expect("Writes cannot fail");
- } else if code & 0x1000 == 0x1000 {
- // If we're trying to return an error that requires a `channel_update` but
- // we're forwarding to a phantom or intercept "channel" (i.e. cannot
- // generate an update), just use the generic "temporary_node_failure"
- // instead.
- code = 0x2000 | 2;
- }
- return_err!(err, code, &res.0[..]);
- }
- }
}
-
- pending_forward_info
}
- /// Gets the current channel_update for the given channel. This first checks if the channel is
+ /// Gets the current [`channel_update`] for the given channel. This first checks if the channel is
/// public, and thus should be called whenever the result is going to be passed out in a
/// [`MessageSendEvent::BroadcastChannelUpdate`] event.
///
- /// Note that in `internal_closing_signed`, this function is called without the `peer_state`
+ /// Note that in [`internal_closing_signed`], this function is called without the `peer_state`
/// corresponding to the channel's counterparty locked, as the channel been removed from the
/// storage and the `peer_state` lock has been dropped.
+ ///
+ /// [`channel_update`]: msgs::ChannelUpdate
+ /// [`internal_closing_signed`]: Self::internal_closing_signed
fn get_channel_update_for_broadcast(&self, chan: &Channel<<SP::Target as SignerProvider>::Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
- if !chan.should_announce() {
+ if !chan.context.should_announce() {
return Err(LightningError {
err: "Cannot broadcast a channel_update for a private channel".to_owned(),
action: msgs::ErrorAction::IgnoreError
});
}
- if chan.get_short_channel_id().is_none() {
+ if chan.context.get_short_channel_id().is_none() {
return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError});
}
- log_trace!(self.logger, "Attempting to generate broadcast channel update for channel {}", log_bytes!(chan.channel_id()));
+ log_trace!(self.logger, "Attempting to generate broadcast channel update for channel {}", log_bytes!(chan.context.channel_id()));
self.get_channel_update_for_unicast(chan)
}
- /// Gets the current channel_update for the given channel. This does not check if the channel
- /// is public (only returning an Err if the channel does not yet have an assigned short_id),
+ /// Gets the current [`channel_update`] for the given channel. This does not check if the channel
+ /// is public (only returning an `Err` if the channel does not yet have an assigned SCID),
/// and thus MUST NOT be called unless the recipient of the resulting message has already
/// provided evidence that they know about the existence of the channel.
///
- /// Note that through `internal_closing_signed`, this function is called without the
+ /// Note that through [`internal_closing_signed`], this function is called without the
/// `peer_state` corresponding to the channel's counterparty locked, as the channel been
/// removed from the storage and the `peer_state` lock has been dropped.
+ ///
+ /// [`channel_update`]: msgs::ChannelUpdate
+ /// [`internal_closing_signed`]: Self::internal_closing_signed
fn get_channel_update_for_unicast(&self, chan: &Channel<<SP::Target as SignerProvider>::Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
- log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.channel_id()));
- let short_channel_id = match chan.get_short_channel_id().or(chan.latest_inbound_scid_alias()) {
+ log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.context.channel_id()));
+ let short_channel_id = match chan.context.get_short_channel_id().or(chan.context.latest_inbound_scid_alias()) {
None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}),
Some(id) => id,
};
self.get_channel_update_for_onion(short_channel_id, chan)
}
+
fn get_channel_update_for_onion(&self, short_channel_id: u64, chan: &Channel<<SP::Target as SignerProvider>::Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
- log_trace!(self.logger, "Generating channel update for channel {}", log_bytes!(chan.channel_id()));
- let were_node_one = self.our_network_pubkey.serialize()[..] < chan.get_counterparty_node_id().serialize()[..];
+ log_trace!(self.logger, "Generating channel update for channel {}", log_bytes!(chan.context.channel_id()));
+ let were_node_one = self.our_network_pubkey.serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..];
+
+ let enabled = chan.context.is_usable() && match chan.channel_update_status() {
+ ChannelUpdateStatus::Enabled => true,
+ ChannelUpdateStatus::DisabledStaged(_) => true,
+ ChannelUpdateStatus::Disabled => false,
+ ChannelUpdateStatus::EnabledStaged(_) => false,
+ };
let unsigned = msgs::UnsignedChannelUpdate {
chain_hash: self.genesis_hash,
short_channel_id,
- timestamp: chan.get_update_time_counter(),
- flags: (!were_node_one) as u8 | ((!chan.is_live() as u8) << 1),
- cltv_expiry_delta: chan.get_cltv_expiry_delta(),
- htlc_minimum_msat: chan.get_counterparty_htlc_minimum_msat(),
- htlc_maximum_msat: chan.get_announced_htlc_max_msat(),
- fee_base_msat: chan.get_outbound_forwarding_fee_base_msat(),
- fee_proportional_millionths: chan.get_fee_proportional_millionths(),
+ timestamp: chan.context.get_update_time_counter(),
+ flags: (!were_node_one) as u8 | ((!enabled as u8) << 1),
+ cltv_expiry_delta: chan.context.get_cltv_expiry_delta(),
+ htlc_minimum_msat: chan.context.get_counterparty_htlc_minimum_msat(),
+ htlc_maximum_msat: chan.context.get_announced_htlc_max_msat(),
+ fee_base_msat: chan.context.get_outbound_forwarding_fee_base_msat(),
+ fee_proportional_millionths: chan.context.get_fee_proportional_millionths(),
excess_data: Vec::new(),
};
// Panic on failure to signal LDK should be restarted to retry signing the `ChannelUpdate`.
}
#[cfg(test)]
- pub(crate) fn test_send_payment_along_path(&self, path: &Vec<RouteHop>, payment_params: &Option<PaymentParameters>, payment_hash: &PaymentHash, payment_secret: &Option<PaymentSecret>, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option<PaymentPreimage>, session_priv_bytes: [u8; 32]) -> Result<(), APIError> {
+ pub(crate) fn test_send_payment_along_path(&self, path: &Path, payment_hash: &PaymentHash, recipient_onion: RecipientOnionFields, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option<PaymentPreimage>, session_priv_bytes: [u8; 32]) -> Result<(), APIError> {
let _lck = self.total_consistency_lock.read().unwrap();
- self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv_bytes)
+ self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv_bytes)
}
- fn send_payment_along_path(&self, path: &Vec<RouteHop>, payment_params: &Option<PaymentParameters>, payment_hash: &PaymentHash, payment_secret: &Option<PaymentSecret>, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option<PaymentPreimage>, session_priv_bytes: [u8; 32]) -> Result<(), APIError> {
+ fn send_payment_along_path(&self, path: &Path, payment_hash: &PaymentHash, recipient_onion: RecipientOnionFields, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option<PaymentPreimage>, session_priv_bytes: [u8; 32]) -> Result<(), APIError> {
// The top-level caller should hold the total_consistency_lock read lock.
debug_assert!(self.total_consistency_lock.try_write().is_err());
- log_trace!(self.logger, "Attempting to send payment for path with next hop {}", path.first().unwrap().short_channel_id);
+ log_trace!(self.logger, "Attempting to send payment for path with next hop {}", path.hops.first().unwrap().short_channel_id);
let prng_seed = self.entropy_source.get_secure_random_bytes();
let session_priv = SecretKey::from_slice(&session_priv_bytes[..]).expect("RNG is busted");
let onion_keys = onion_utils::construct_onion_keys(&self.secp_ctx, &path, &session_priv)
.map_err(|_| APIError::InvalidRoute{err: "Pubkey along hop was maliciously selected".to_owned()})?;
- let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(path, total_value, payment_secret, cur_height, keysend_preimage)?;
- if onion_utils::route_size_insane(&onion_payloads) {
- return Err(APIError::InvalidRoute{err: "Route size too large considering onion data".to_owned()});
- }
- let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, payment_hash);
+ let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(path, total_value, recipient_onion, cur_height, keysend_preimage)?;
+
+ let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, payment_hash)
+ .map_err(|_| APIError::InvalidRoute { err: "Route size too large considering onion data".to_owned()})?;
let err: Result<(), _> = loop {
- let (counterparty_node_id, id) = match self.short_to_chan_info.read().unwrap().get(&path.first().unwrap().short_channel_id) {
+ let (counterparty_node_id, id) = match self.short_to_chan_info.read().unwrap().get(&path.hops.first().unwrap().short_channel_id) {
None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()}),
Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()),
};
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(id) {
- if !chan.get().is_live() {
+ if !chan.get().context.is_live() {
return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected".to_owned()});
}
- let funding_txo = chan.get().get_funding_txo().unwrap();
+ let funding_txo = chan.get().context.get_funding_txo().unwrap();
let send_res = chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(),
htlc_cltv, HTLCSource::OutboundRoute {
path: path.clone(),
session_priv: session_priv.clone(),
first_hop_htlc_msat: htlc_msat,
payment_id,
- payment_secret: payment_secret.clone(),
- payment_params: payment_params.clone(),
- }, onion_packet, &self.logger);
+ }, onion_packet, None, &self.fee_estimator, &self.logger);
match break_chan_entry!(self, send_res, chan) {
Some(monitor_update) => {
- let update_id = monitor_update.update_id;
- let update_res = self.chain_monitor.update_channel(funding_txo, monitor_update);
- if let Err(e) = handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, per_peer_state, chan) {
- break Err(e);
- }
- if update_res == ChannelMonitorUpdateStatus::InProgress {
- // Note that MonitorUpdateInProgress here indicates (per function
- // docs) that we will resend the commitment update once monitor
- // updating completes. Therefore, we must return an error
- // indicating that it is unsafe to retry the payment wholesale,
- // which we do in the send_payment check for
- // MonitorUpdateInProgress, below.
- return Err(APIError::MonitorUpdateInProgress);
+ match handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) {
+ Err(e) => break Err(e),
+ Ok(false) => {
+ // Note that MonitorUpdateInProgress here indicates (per function
+ // docs) that we will resend the commitment update once monitor
+ // updating completes. Therefore, we must return an error
+ // indicating that it is unsafe to retry the payment wholesale,
+ // which we do in the send_payment check for
+ // MonitorUpdateInProgress, below.
+ return Err(APIError::MonitorUpdateInProgress);
+ },
+ Ok(true) => {},
}
},
None => { },
return Ok(());
};
- match handle_error!(self, err, path.first().unwrap().pubkey) {
+ match handle_error!(self, err, path.hops.first().unwrap().pubkey) {
Ok(_) => unreachable!(),
Err(e) => {
Err(APIError::ChannelUnavailable { err: e.err })
/// Value parameters are provided via the last hop in route, see documentation for [`RouteHop`]
/// fields for more info.
///
- /// May generate SendHTLCs message(s) event on success, which should be relayed (e.g. via
+ /// May generate [`UpdateHTLCs`] message(s) event on success, which should be relayed (e.g. via
/// [`PeerManager::process_events`]).
///
/// # Avoiding Duplicate Payments
///
/// # Possible Error States on [`PaymentSendFailure`]
///
- /// Each path may have a different return value, and PaymentSendValue may return a Vec with
+ /// Each path may have a different return value, and [`PaymentSendFailure`] may return a `Vec` with
/// each entry matching the corresponding-index entry in the route paths, see
/// [`PaymentSendFailure`] for more info.
///
/// * [`APIError::MonitorUpdateInProgress`] if a new monitor update failure prevented sending the
/// relevant updates.
///
- /// Note that depending on the type of the PaymentSendFailure the HTLC may have been
+ /// Note that depending on the type of the [`PaymentSendFailure`] the HTLC may have been
/// irrevocably committed to on our end. In such a case, do NOT retry the payment with a
/// different route unless you intend to pay twice!
///
- /// # A caution on `payment_secret`
- ///
- /// `payment_secret` is unrelated to `payment_hash` (or [`PaymentPreimage`]) and exists to
- /// authenticate the sender to the recipient and prevent payment-probing (deanonymization)
- /// attacks. For newer nodes, it will be provided to you in the invoice. If you do not have one,
- /// the [`Route`] must not contain multiple paths as multi-path payments require a
- /// recipient-provided `payment_secret`.
- ///
- /// If a `payment_secret` *is* provided, we assume that the invoice had the payment_secret
- /// feature bit set (either as required or as available). If multiple paths are present in the
- /// [`Route`], we assume the invoice had the basic_mpp feature set.
- ///
+ /// [`RouteHop`]: crate::routing::router::RouteHop
/// [`Event::PaymentSent`]: events::Event::PaymentSent
/// [`Event::PaymentFailed`]: events::Event::PaymentFailed
+ /// [`UpdateHTLCs`]: events::MessageSendEvent::UpdateHTLCs
/// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events
/// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
- pub fn send_payment(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>, payment_id: PaymentId) -> Result<(), PaymentSendFailure> {
+ pub fn send_payment_with_route(&self, route: &Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId) -> Result<(), PaymentSendFailure> {
let best_block_height = self.best_block.read().unwrap().height();
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.pending_outbound_payments
- .send_payment_with_route(route, payment_hash, payment_secret, payment_id, &self.entropy_source, &self.node_signer, best_block_height,
- |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
+ .send_payment_with_route(route, payment_hash, recipient_onion, payment_id, &self.entropy_source, &self.node_signer, best_block_height,
+ |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+ self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv))
}
- /// Similar to [`ChannelManager::send_payment`], but will automatically find a route based on
+ /// Similar to [`ChannelManager::send_payment_with_route`], but will automatically find a route based on
/// `route_params` and retry failed payment paths based on `retry_strategy`.
- pub fn send_payment_with_retry(&self, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result<(), RetryableSendFailure> {
+ pub fn send_payment(&self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result<(), RetryableSendFailure> {
let best_block_height = self.best_block.read().unwrap().height();
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.pending_outbound_payments
- .send_payment(payment_hash, payment_secret, payment_id, retry_strategy, route_params,
+ .send_payment(payment_hash, recipient_onion, payment_id, retry_strategy, route_params,
&self.router, self.list_usable_channels(), || self.compute_inflight_htlcs(),
&self.entropy_source, &self.node_signer, best_block_height, &self.logger,
&self.pending_events,
- |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
+ |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+ self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv))
}
#[cfg(test)]
- fn test_send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>, keysend_preimage: Option<PaymentPreimage>, payment_id: PaymentId, recv_value_msat: Option<u64>, onion_session_privs: Vec<[u8; 32]>) -> Result<(), PaymentSendFailure> {
+ pub(super) fn test_send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, keysend_preimage: Option<PaymentPreimage>, payment_id: PaymentId, recv_value_msat: Option<u64>, onion_session_privs: Vec<[u8; 32]>) -> Result<(), PaymentSendFailure> {
let best_block_height = self.best_block.read().unwrap().height();
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- self.pending_outbound_payments.test_send_payment_internal(route, payment_hash, payment_secret, keysend_preimage, payment_id, recv_value_msat, onion_session_privs, &self.node_signer, best_block_height,
- |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
+ self.pending_outbound_payments.test_send_payment_internal(route, payment_hash, recipient_onion, keysend_preimage, payment_id, recv_value_msat, onion_session_privs, &self.node_signer, best_block_height,
+ |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+ self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv))
}
#[cfg(test)]
- pub(crate) fn test_add_new_pending_payment(&self, payment_hash: PaymentHash, payment_secret: Option<PaymentSecret>, payment_id: PaymentId, route: &Route) -> Result<Vec<[u8; 32]>, PaymentSendFailure> {
+ pub(crate) fn test_add_new_pending_payment(&self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId, route: &Route) -> Result<Vec<[u8; 32]>, PaymentSendFailure> {
let best_block_height = self.best_block.read().unwrap().height();
- self.pending_outbound_payments.test_add_new_pending_payment(payment_hash, payment_secret, payment_id, route, None, &self.entropy_source, best_block_height)
+ self.pending_outbound_payments.test_add_new_pending_payment(payment_hash, recipient_onion, payment_id, route, None, &self.entropy_source, best_block_height)
+ }
+
+ #[cfg(test)]
+ pub(crate) fn test_set_payment_metadata(&self, payment_id: PaymentId, new_payment_metadata: Option<Vec<u8>>) {
+ self.pending_outbound_payments.test_set_payment_metadata(payment_id, new_payment_metadata);
}
/// [`Event::PaymentFailed`]: events::Event::PaymentFailed
/// [`Event::PaymentSent`]: events::Event::PaymentSent
pub fn abandon_payment(&self, payment_id: PaymentId) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- self.pending_outbound_payments.abandon_payment(payment_id, &self.pending_events);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
+ self.pending_outbound_payments.abandon_payment(payment_id, PaymentFailureReason::UserAbandoned, &self.pending_events);
}
/// Send a spontaneous payment, which is a payment that does not require the recipient to have
/// Similar to regular payments, you MUST NOT reuse a `payment_preimage` value. See
/// [`send_payment`] for more information about the risks of duplicate preimage usage.
///
- /// Note that `route` must have exactly one path.
- ///
/// [`send_payment`]: Self::send_payment
- pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option<PaymentPreimage>, payment_id: PaymentId) -> Result<PaymentHash, PaymentSendFailure> {
+ pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option<PaymentPreimage>, recipient_onion: RecipientOnionFields, payment_id: PaymentId) -> Result<PaymentHash, PaymentSendFailure> {
let best_block_height = self.best_block.read().unwrap().height();
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.pending_outbound_payments.send_spontaneous_payment_with_route(
- route, payment_preimage, payment_id, &self.entropy_source, &self.node_signer,
- best_block_height,
- |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
+ route, payment_preimage, recipient_onion, payment_id, &self.entropy_source,
+ &self.node_signer, best_block_height,
+ |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+ self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv))
}
/// Similar to [`ChannelManager::send_spontaneous_payment`], but will automatically find a route
/// payments.
///
/// [`PaymentParameters::for_keysend`]: crate::routing::router::PaymentParameters::for_keysend
- pub fn send_spontaneous_payment_with_retry(&self, payment_preimage: Option<PaymentPreimage>, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result<PaymentHash, RetryableSendFailure> {
+ pub fn send_spontaneous_payment_with_retry(&self, payment_preimage: Option<PaymentPreimage>, recipient_onion: RecipientOnionFields, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result<PaymentHash, RetryableSendFailure> {
let best_block_height = self.best_block.read().unwrap().height();
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- self.pending_outbound_payments.send_spontaneous_payment(payment_preimage, payment_id,
- retry_strategy, route_params, &self.router, self.list_usable_channels(),
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
+ self.pending_outbound_payments.send_spontaneous_payment(payment_preimage, recipient_onion,
+ payment_id, retry_strategy, route_params, &self.router, self.list_usable_channels(),
|| self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height,
&self.logger, &self.pending_events,
- |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
+ |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+ self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv))
}
/// Send a payment that is probing the given route for liquidity. We calculate the
/// [`PaymentHash`] of probes based on a static secret and a random [`PaymentId`], which allows
/// us to easily discern them from real payments.
- pub fn send_probe(&self, hops: Vec<RouteHop>) -> Result<(PaymentHash, PaymentId), PaymentSendFailure> {
+ pub fn send_probe(&self, path: Path) -> Result<(PaymentHash, PaymentId), PaymentSendFailure> {
let best_block_height = self.best_block.read().unwrap().height();
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- self.pending_outbound_payments.send_probe(hops, self.probing_cookie_secret, &self.entropy_source, &self.node_signer, best_block_height,
- |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
+ self.pending_outbound_payments.send_probe(path, self.probing_cookie_secret, &self.entropy_source, &self.node_signer, best_block_height,
+ |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+ self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv))
}
/// Returns whether a payment with the given [`PaymentHash`] and [`PaymentId`] is, in fact, a
/// Handles the generation of a funding transaction, optionally (for tests) with a function
/// which checks the correctness of the funding transaction given the associated channel.
- fn funding_transaction_generated_intern<FundingOutput: Fn(&Channel<<SP::Target as SignerProvider>::Signer>, &Transaction) -> Result<OutPoint, APIError>>(
+ fn funding_transaction_generated_intern<FundingOutput: Fn(&OutboundV1Channel<<SP::Target as SignerProvider>::Signer>, &Transaction) -> Result<OutPoint, APIError>>(
&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, funding_transaction: Transaction, find_funding_output: FundingOutput
) -> Result<(), APIError> {
let per_peer_state = self.per_peer_state.read().unwrap();
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
- let (chan, msg) = {
- let (res, chan) = {
- match peer_state.channel_by_id.remove(temporary_channel_id) {
- Some(mut chan) => {
- let funding_txo = find_funding_output(&chan, &funding_transaction)?;
-
- (chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger)
- .map_err(|e| if let ChannelError::Close(msg) = e {
- MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.get_user_id(), chan.force_shutdown(true), None)
- } else { unreachable!(); })
- , chan)
+ let (chan, msg) = match peer_state.outbound_v1_channel_by_id.remove(temporary_channel_id) {
+ Some(chan) => {
+ let funding_txo = find_funding_output(&chan, &funding_transaction)?;
+
+ let funding_res = chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger)
+ .map_err(|(mut chan, e)| if let ChannelError::Close(msg) = e {
+ let channel_id = chan.context.channel_id();
+ let user_id = chan.context.get_user_id();
+ let shutdown_res = chan.context.force_shutdown(false);
+ (chan, MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, user_id, shutdown_res, None))
+ } else { unreachable!(); });
+ match funding_res {
+ Ok((chan, funding_msg)) => (chan, funding_msg),
+ Err((chan, err)) => {
+ mem::drop(peer_state_lock);
+ mem::drop(per_peer_state);
+
+ let _: Result<(), _> = handle_error!(self, Err(err), chan.context.get_counterparty_node_id());
+ return Err(APIError::ChannelUnavailable {
+ err: "Signer refused to sign the initial commitment transaction".to_owned()
+ });
},
- None => { return Err(APIError::ChannelUnavailable { err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*temporary_channel_id), counterparty_node_id) }) },
}
- };
- match handle_error!(self, res, chan.get_counterparty_node_id()) {
- Ok(funding_msg) => {
- (chan, funding_msg)
- },
- Err(_) => { return Err(APIError::ChannelUnavailable {
- err: "Signer refused to sign the initial commitment transaction".to_owned()
- }) },
- }
+ },
+ None => {
+ return Err(APIError::ChannelUnavailable {
+ err: format!(
+ "Channel with id {} not found for the passed counterparty node_id {}",
+ log_bytes!(*temporary_channel_id), counterparty_node_id),
+ })
+ },
};
peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingCreated {
- node_id: chan.get_counterparty_node_id(),
+ node_id: chan.context.get_counterparty_node_id(),
msg,
});
- match peer_state.channel_by_id.entry(chan.channel_id()) {
+ match peer_state.channel_by_id.entry(chan.context.channel_id()) {
hash_map::Entry::Occupied(_) => {
panic!("Generated duplicate funding txid?");
},
hash_map::Entry::Vacant(e) => {
let mut id_to_peer = self.id_to_peer.lock().unwrap();
- if id_to_peer.insert(chan.channel_id(), chan.get_counterparty_node_id()).is_some() {
+ if id_to_peer.insert(chan.context.channel_id(), chan.context.get_counterparty_node_id()).is_some() {
panic!("id_to_peer map already contained funding txid, which shouldn't be possible");
}
e.insert(chan);
/// implemented by Bitcoin Core wallet. See <https://bitcoinops.org/en/topics/fee-sniping/>
/// for more details.
///
- /// [`Event::FundingGenerationReady`]: crate::util::events::Event::FundingGenerationReady
- /// [`Event::ChannelClosed`]: crate::util::events::Event::ChannelClosed
+ /// [`Event::FundingGenerationReady`]: crate::events::Event::FundingGenerationReady
+ /// [`Event::ChannelClosed`]: crate::events::Event::ChannelClosed
pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, funding_transaction: Transaction) -> Result<(), APIError> {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
for inp in funding_transaction.input.iter() {
if inp.witness.is_empty() {
}
{
let height = self.best_block.read().unwrap().height();
- // Transactions are evaluated as final by network mempools at the next block. However, the modules
- // constituting our Lightning node might not have perfect sync about their blockchain views. Thus, if
- // the wallet module is in advance on the LDK view, allow one more block of headroom.
- if !funding_transaction.input.iter().all(|input| input.sequence == Sequence::MAX) && LockTime::from(funding_transaction.lock_time).is_block_height() && funding_transaction.lock_time.0 > height + 2 {
+ // Transactions are evaluated as final by network mempools if their locktime is strictly
+ // lower than the next block height. However, the modules constituting our Lightning
+ // node might not have perfect sync about their blockchain views. Thus, if the wallet
+ // module is ahead of LDK, only allow one more block of headroom.
+ if !funding_transaction.input.iter().all(|input| input.sequence == Sequence::MAX) && LockTime::from(funding_transaction.lock_time).is_block_height() && funding_transaction.lock_time.0 > height + 1 {
return Err(APIError::APIMisuseError {
err: "Funding transaction absolute timelock is non-final".to_owned()
});
}
}
self.funding_transaction_generated_intern(temporary_channel_id, counterparty_node_id, funding_transaction, |chan, tx| {
+ if tx.output.len() > u16::max_value() as usize {
+ return Err(APIError::APIMisuseError {
+ err: "Transaction had more than 2^16 outputs, which is not supported".to_owned()
+ });
+ }
+
let mut output_index = None;
- let expected_spk = chan.get_funding_redeemscript().to_v0_p2wsh();
+ let expected_spk = chan.context.get_funding_redeemscript().to_v0_p2wsh();
for (idx, outp) in tx.output.iter().enumerate() {
- if outp.script_pubkey == expected_spk && outp.value == chan.get_value_satoshis() {
+ if outp.script_pubkey == expected_spk && outp.value == chan.context.get_value_satoshis() {
if output_index.is_some() {
return Err(APIError::APIMisuseError {
err: "Multiple outputs matched the expected script and value".to_owned()
});
}
- if idx > u16::max_value() as usize {
- return Err(APIError::APIMisuseError {
- err: "Transaction had more than 2^16 outputs, which is not supported".to_owned()
- });
- }
output_index = Some(idx as u16);
}
}
})
}
- /// Atomically updates the [`ChannelConfig`] for the given channels.
+ /// Atomically applies partial updates to the [`ChannelConfig`] of the given channels.
///
/// Once the updates are applied, each eligible channel (advertised with a known short channel
/// ID and a change in [`forwarding_fee_proportional_millionths`], [`forwarding_fee_base_msat`],
/// [`ChannelUpdate`]: msgs::ChannelUpdate
/// [`ChannelUnavailable`]: APIError::ChannelUnavailable
/// [`APIMisuseError`]: APIError::APIMisuseError
- pub fn update_channel_config(
- &self, counterparty_node_id: &PublicKey, channel_ids: &[[u8; 32]], config: &ChannelConfig,
+ pub fn update_partial_channel_config(
+ &self, counterparty_node_id: &PublicKey, channel_ids: &[[u8; 32]], config_update: &ChannelConfigUpdate,
) -> Result<(), APIError> {
- if config.cltv_expiry_delta < MIN_CLTV_EXPIRY_DELTA {
+ if config_update.cltv_expiry_delta.map(|delta| delta < MIN_CLTV_EXPIRY_DELTA).unwrap_or(false) {
return Err(APIError::APIMisuseError {
err: format!("The chosen CLTV expiry delta is below the minimum of {}", MIN_CLTV_EXPIRY_DELTA),
});
}
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(
- &self.total_consistency_lock, &self.persistence_notifier,
- );
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
}
for channel_id in channel_ids {
let channel = peer_state.channel_by_id.get_mut(channel_id).unwrap();
- if !channel.update_config(config) {
+ let mut config = channel.context.config();
+ config.apply(config_update);
+ if !channel.context.update_config(&config) {
continue;
}
if let Ok(msg) = self.get_channel_update_for_broadcast(channel) {
peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg });
} else if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
- node_id: channel.get_counterparty_node_id(),
+ node_id: channel.context.get_counterparty_node_id(),
msg,
});
}
Ok(())
}
+ /// Atomically updates the [`ChannelConfig`] for the given channels.
+ ///
+ /// Once the updates are applied, each eligible channel (advertised with a known short channel
+ /// ID and a change in [`forwarding_fee_proportional_millionths`], [`forwarding_fee_base_msat`],
+ /// or [`cltv_expiry_delta`]) has a [`BroadcastChannelUpdate`] event message generated
+ /// containing the new [`ChannelUpdate`] message which should be broadcast to the network.
+ ///
+ /// Returns [`ChannelUnavailable`] when a channel is not found or an incorrect
+ /// `counterparty_node_id` is provided.
+ ///
+ /// Returns [`APIMisuseError`] when a [`cltv_expiry_delta`] update is to be applied with a value
+ /// below [`MIN_CLTV_EXPIRY_DELTA`].
+ ///
+ /// If an error is returned, none of the updates should be considered applied.
+ ///
+ /// [`forwarding_fee_proportional_millionths`]: ChannelConfig::forwarding_fee_proportional_millionths
+ /// [`forwarding_fee_base_msat`]: ChannelConfig::forwarding_fee_base_msat
+ /// [`cltv_expiry_delta`]: ChannelConfig::cltv_expiry_delta
+ /// [`BroadcastChannelUpdate`]: events::MessageSendEvent::BroadcastChannelUpdate
+ /// [`ChannelUpdate`]: msgs::ChannelUpdate
+ /// [`ChannelUnavailable`]: APIError::ChannelUnavailable
+ /// [`APIMisuseError`]: APIError::APIMisuseError
+ pub fn update_channel_config(
+ &self, counterparty_node_id: &PublicKey, channel_ids: &[[u8; 32]], config: &ChannelConfig,
+ ) -> Result<(), APIError> {
+ return self.update_partial_channel_config(counterparty_node_id, channel_ids, &(*config).into());
+ }
+
/// Attempts to forward an intercepted HTLC over the provided channel id and with the provided
/// amount to forward. Should only be called in response to an [`HTLCIntercepted`] event.
///
/// [`ChannelManager::fail_intercepted_htlc`] MUST be called in response to the event.
///
/// Note that LDK does not enforce fee requirements in `amt_to_forward_msat`, and will not stop
- /// you from forwarding more than you received.
+ /// you from forwarding more than you received. See
+ /// [`HTLCIntercepted::expected_outbound_amount_msat`] for more on forwarding a different amount
+ /// than expected.
///
/// Errors if the event was not handled in time, in which case the HTLC was automatically failed
/// backwards.
///
/// [`UserConfig::accept_intercept_htlcs`]: crate::util::config::UserConfig::accept_intercept_htlcs
/// [`HTLCIntercepted`]: events::Event::HTLCIntercepted
+ /// [`HTLCIntercepted::expected_outbound_amount_msat`]: events::Event::HTLCIntercepted::expected_outbound_amount_msat
// TODO: when we move to deciding the best outbound channel at forward time, only take
// `next_node_id` and not `next_hop_channel_id`
pub fn forward_intercepted_htlc(&self, intercept_id: InterceptId, next_hop_channel_id: &[u8; 32], next_node_id: PublicKey, amt_to_forward_msat: u64) -> Result<(), APIError> {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let next_hop_scid = {
let peer_state_lock = self.per_peer_state.read().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.get(next_hop_channel_id) {
Some(chan) => {
- if !chan.is_usable() {
+ if !chan.context.is_usable() {
return Err(APIError::ChannelUnavailable {
err: format!("Channel with id {} not fully established", log_bytes!(*next_hop_channel_id))
})
}
- chan.get_short_channel_id().unwrap_or(chan.outbound_scid_alias())
+ chan.context.get_short_channel_id().unwrap_or(chan.context.outbound_scid_alias())
},
None => return Err(APIError::ChannelUnavailable {
- err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*next_hop_channel_id), next_node_id)
+ err: format!("Funded channel with id {} not found for the passed counterparty node_id {}. Channel may still be opening.",
+ log_bytes!(*next_hop_channel_id), next_node_id)
})
}
};
},
_ => unreachable!() // Only `PendingHTLCRouting::Forward`s are intercepted
};
+ let skimmed_fee_msat =
+ payment.forward_info.outgoing_amt_msat.saturating_sub(amt_to_forward_msat);
let pending_htlc_info = PendingHTLCInfo {
+ skimmed_fee_msat: if skimmed_fee_msat == 0 { None } else { Some(skimmed_fee_msat) },
outgoing_amt_msat: amt_to_forward_msat, routing, ..payment.forward_info
};
///
/// [`HTLCIntercepted`]: events::Event::HTLCIntercepted
pub fn fail_intercepted_htlc(&self, intercept_id: InterceptId) -> Result<(), APIError> {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let payment = self.pending_intercepted_htlcs.lock().unwrap().remove(&intercept_id)
.ok_or_else(|| APIError::APIMisuseError {
/// Should only really ever be called in response to a PendingHTLCsForwardable event.
/// Will likely generate further events.
pub fn process_pending_htlc_forwards(&self) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
- let mut new_events = Vec::new();
+ let mut new_events = VecDeque::new();
let mut failed_forwards = Vec::new();
let mut phantom_receives: Vec<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new();
{
prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
forward_info: PendingHTLCInfo {
routing, incoming_shared_secret, payment_hash, outgoing_amt_msat,
- outgoing_cltv_value, incoming_amt_msat: _
+ outgoing_cltv_value, ..
}
}) => {
macro_rules! failure_handler {
};
match next_hop {
onion_utils::Hop::Receive(hop_data) => {
- match self.construct_recv_pending_htlc_info(hop_data, incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value, Some(phantom_shared_secret)) {
+ match self.construct_recv_pending_htlc_info(hop_data,
+ incoming_shared_secret, payment_hash, outgoing_amt_msat,
+ outgoing_cltv_value, Some(phantom_shared_secret), false, None)
+ {
Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, vec![(info, prev_htlc_id)])),
Err(ReceiveError { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret))
}
prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id: _,
forward_info: PendingHTLCInfo {
incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value,
- routing: PendingHTLCRouting::Forward { onion_packet, .. }, incoming_amt_msat: _,
+ routing: PendingHTLCRouting::Forward { onion_packet, .. }, skimmed_fee_msat, ..
},
}) => {
log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, log_bytes!(payment_hash.0), short_chan_id);
});
if let Err(e) = chan.get_mut().queue_add_htlc(outgoing_amt_msat,
payment_hash, outgoing_cltv_value, htlc_source.clone(),
- onion_packet, &self.logger)
+ onion_packet, skimmed_fee_msat, &self.fee_estimator,
+ &self.logger)
{
if let ChannelError::Ignore(msg) = e {
log_trace!(self.logger, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(payment_hash.0), msg);
let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan.get());
failed_forwards.push((htlc_source, payment_hash,
HTLCFailReason::reason(failure_code, data),
- HTLCDestination::NextHopChannel { node_id: Some(chan.get().get_counterparty_node_id()), channel_id: forward_chan_id }
+ HTLCDestination::NextHopChannel { node_id: Some(chan.get().context.get_counterparty_node_id()), channel_id: forward_chan_id }
));
continue;
}
}
}
} else {
- for forward_info in pending_forwards.drain(..) {
+ 'next_forwardable_htlc: for forward_info in pending_forwards.drain(..) {
match forward_info {
HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
forward_info: PendingHTLCInfo {
- routing, incoming_shared_secret, payment_hash, outgoing_amt_msat, ..
+ routing, incoming_shared_secret, payment_hash, incoming_amt_msat, outgoing_amt_msat,
+ skimmed_fee_msat, ..
}
}) => {
- let (cltv_expiry, onion_payload, payment_data, phantom_shared_secret) = match routing {
- PendingHTLCRouting::Receive { payment_data, incoming_cltv_expiry, phantom_shared_secret } => {
+ let (cltv_expiry, onion_payload, payment_data, phantom_shared_secret, mut onion_fields) = match routing {
+ PendingHTLCRouting::Receive { payment_data, payment_metadata, incoming_cltv_expiry, phantom_shared_secret } => {
let _legacy_hop_data = Some(payment_data.clone());
- (incoming_cltv_expiry, OnionPayload::Invoice { _legacy_hop_data }, Some(payment_data), phantom_shared_secret)
+ let onion_fields =
+ RecipientOnionFields { payment_secret: Some(payment_data.payment_secret), payment_metadata };
+ (incoming_cltv_expiry, OnionPayload::Invoice { _legacy_hop_data },
+ Some(payment_data), phantom_shared_secret, onion_fields)
+ },
+ PendingHTLCRouting::ReceiveKeysend { payment_data, payment_preimage, payment_metadata, incoming_cltv_expiry } => {
+ let onion_fields = RecipientOnionFields {
+ payment_secret: payment_data.as_ref().map(|data| data.payment_secret),
+ payment_metadata
+ };
+ (incoming_cltv_expiry, OnionPayload::Spontaneous(payment_preimage),
+ payment_data, None, onion_fields)
},
- PendingHTLCRouting::ReceiveKeysend { payment_preimage, incoming_cltv_expiry } =>
- (incoming_cltv_expiry, OnionPayload::Spontaneous(payment_preimage), None, None),
_ => {
panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive");
}
incoming_packet_shared_secret: incoming_shared_secret,
phantom_shared_secret,
},
- value: outgoing_amt_msat,
+ // We differentiate the received value from the sender intended value
+ // if possible so that we don't prematurely mark MPP payments complete
+ // if routing nodes overpay
+ value: incoming_amt_msat.unwrap_or(outgoing_amt_msat),
+ sender_intended_value: outgoing_amt_msat,
timer_ticks: 0,
+ total_value_received: None,
total_msat: if let Some(data) = &payment_data { data.total_msat } else { outgoing_amt_msat },
cltv_expiry,
onion_payload,
+ counterparty_skimmed_fee_msat: skimmed_fee_msat,
};
+ let mut committed_to_claimable = false;
+
macro_rules! fail_htlc {
($htlc: expr, $payment_hash: expr) => {
+ debug_assert!(!committed_to_claimable);
let mut htlc_msat_height_data = $htlc.value.to_be_bytes().to_vec();
htlc_msat_height_data.extend_from_slice(
&self.best_block.read().unwrap().height().to_be_bytes(),
HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data),
HTLCDestination::FailedPayment { payment_hash: $payment_hash },
));
+ continue 'next_forwardable_htlc;
}
}
let phantom_shared_secret = claimable_htlc.prev_hop.phantom_shared_secret;
}
macro_rules! check_total_value {
- ($payment_data: expr, $payment_preimage: expr) => {{
+ ($purpose: expr) => {{
let mut payment_claimable_generated = false;
- let purpose = || {
- events::PaymentPurpose::InvoicePayment {
- payment_preimage: $payment_preimage,
- payment_secret: $payment_data.payment_secret,
- }
+ let is_keysend = match $purpose {
+ events::PaymentPurpose::SpontaneousPayment(_) => true,
+ events::PaymentPurpose::InvoicePayment { .. } => false,
};
let mut claimable_payments = self.claimable_payments.lock().unwrap();
if claimable_payments.pending_claiming_payments.contains_key(&payment_hash) {
fail_htlc!(claimable_htlc, payment_hash);
- continue
}
- let (_, htlcs) = claimable_payments.claimable_htlcs.entry(payment_hash)
- .or_insert_with(|| (purpose(), Vec::new()));
- if htlcs.len() == 1 {
- if let OnionPayload::Spontaneous(_) = htlcs[0].onion_payload {
- log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we already had an existing keysend HTLC with the same payment hash", log_bytes!(payment_hash.0));
+ let ref mut claimable_payment = claimable_payments.claimable_payments
+ .entry(payment_hash)
+ // Note that if we insert here we MUST NOT fail_htlc!()
+ .or_insert_with(|| {
+ committed_to_claimable = true;
+ ClaimablePayment {
+ purpose: $purpose.clone(), htlcs: Vec::new(), onion_fields: None,
+ }
+ });
+ if $purpose != claimable_payment.purpose {
+ let log_keysend = |keysend| if keysend { "keysend" } else { "non-keysend" };
+ log_trace!(self.logger, "Failing new {} HTLC with payment_hash {} as we already had an existing {} HTLC with the same payment hash", log_keysend(is_keysend), log_bytes!(payment_hash.0), log_keysend(!is_keysend));
+ fail_htlc!(claimable_htlc, payment_hash);
+ }
+ if !self.default_configuration.accept_mpp_keysend && is_keysend && !claimable_payment.htlcs.is_empty() {
+ log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} as we already had an existing keysend HTLC with the same payment hash and our config states we don't accept MPP keysend", log_bytes!(payment_hash.0));
+ fail_htlc!(claimable_htlc, payment_hash);
+ }
+ if let Some(earlier_fields) = &mut claimable_payment.onion_fields {
+ if earlier_fields.check_merge(&mut onion_fields).is_err() {
fail_htlc!(claimable_htlc, payment_hash);
- continue
}
+ } else {
+ claimable_payment.onion_fields = Some(onion_fields);
}
- let mut total_value = claimable_htlc.value;
+ let ref mut htlcs = &mut claimable_payment.htlcs;
+ let mut total_value = claimable_htlc.sender_intended_value;
+ let mut earliest_expiry = claimable_htlc.cltv_expiry;
for htlc in htlcs.iter() {
- total_value += htlc.value;
- match &htlc.onion_payload {
- OnionPayload::Invoice { .. } => {
- if htlc.total_msat != $payment_data.total_msat {
- log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the HTLCs had inconsistent total values (eg {} and {})",
- log_bytes!(payment_hash.0), $payment_data.total_msat, htlc.total_msat);
- total_value = msgs::MAX_VALUE_MSAT;
- }
- if total_value >= msgs::MAX_VALUE_MSAT { break; }
- },
- _ => unreachable!(),
+ total_value += htlc.sender_intended_value;
+ earliest_expiry = cmp::min(earliest_expiry, htlc.cltv_expiry);
+ if htlc.total_msat != claimable_htlc.total_msat {
+ log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the HTLCs had inconsistent total values (eg {} and {})",
+ log_bytes!(payment_hash.0), claimable_htlc.total_msat, htlc.total_msat);
+ total_value = msgs::MAX_VALUE_MSAT;
}
+ if total_value >= msgs::MAX_VALUE_MSAT { break; }
}
- if total_value >= msgs::MAX_VALUE_MSAT || total_value > $payment_data.total_msat {
- log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the total value {} ran over expected value {} (or HTLCs were inconsistent)",
- log_bytes!(payment_hash.0), total_value, $payment_data.total_msat);
+ // The condition determining whether an MPP is complete must
+ // match exactly the condition used in `timer_tick_occurred`
+ if total_value >= msgs::MAX_VALUE_MSAT {
+ fail_htlc!(claimable_htlc, payment_hash);
+ } else if total_value - claimable_htlc.sender_intended_value >= claimable_htlc.total_msat {
+ log_trace!(self.logger, "Failing HTLC with payment_hash {} as payment is already claimable",
+ log_bytes!(payment_hash.0));
fail_htlc!(claimable_htlc, payment_hash);
- } else if total_value == $payment_data.total_msat {
+ } else if total_value >= claimable_htlc.total_msat {
+ #[allow(unused_assignments)] {
+ committed_to_claimable = true;
+ }
let prev_channel_id = prev_funding_outpoint.to_channel_id();
htlcs.push(claimable_htlc);
- new_events.push(events::Event::PaymentClaimable {
+ let amount_msat = htlcs.iter().map(|htlc| htlc.value).sum();
+ htlcs.iter_mut().for_each(|htlc| htlc.total_value_received = Some(amount_msat));
+ let counterparty_skimmed_fee_msat = htlcs.iter()
+ .map(|htlc| htlc.counterparty_skimmed_fee_msat.unwrap_or(0)).sum();
+ debug_assert!(total_value.saturating_sub(amount_msat) <=
+ counterparty_skimmed_fee_msat);
+ new_events.push_back((events::Event::PaymentClaimable {
receiver_node_id: Some(receiver_node_id),
payment_hash,
- purpose: purpose(),
- amount_msat: total_value,
+ purpose: $purpose,
+ amount_msat,
+ counterparty_skimmed_fee_msat,
via_channel_id: Some(prev_channel_id),
via_user_channel_id: Some(prev_user_channel_id),
- });
+ claim_deadline: Some(earliest_expiry - HTLC_FAIL_BACK_BUFFER),
+ onion_fields: claimable_payment.onion_fields.clone(),
+ }, None));
payment_claimable_generated = true;
} else {
// Nothing to do - we haven't reached the total
// payment value yet, wait until we receive more
// MPP parts.
htlcs.push(claimable_htlc);
+ #[allow(unused_assignments)] {
+ committed_to_claimable = true;
+ }
}
payment_claimable_generated
}}
Err(()) => {
log_trace!(self.logger, "Failing new HTLC with payment_hash {} as payment verification failed", log_bytes!(payment_hash.0));
fail_htlc!(claimable_htlc, payment_hash);
- continue
}
};
if let Some(min_final_cltv_expiry_delta) = min_final_cltv_expiry_delta {
log_trace!(self.logger, "Failing new HTLC with payment_hash {} as its CLTV expiry was too soon (had {}, earliest expected {})",
log_bytes!(payment_hash.0), cltv_expiry, expected_min_expiry_height);
fail_htlc!(claimable_htlc, payment_hash);
- continue;
}
}
- check_total_value!(payment_data, payment_preimage);
+ let purpose = events::PaymentPurpose::InvoicePayment {
+ payment_preimage: payment_preimage.clone(),
+ payment_secret: payment_data.payment_secret,
+ };
+ check_total_value!(purpose);
},
OnionPayload::Spontaneous(preimage) => {
- let mut claimable_payments = self.claimable_payments.lock().unwrap();
- if claimable_payments.pending_claiming_payments.contains_key(&payment_hash) {
- fail_htlc!(claimable_htlc, payment_hash);
- continue
- }
- match claimable_payments.claimable_htlcs.entry(payment_hash) {
- hash_map::Entry::Vacant(e) => {
- let purpose = events::PaymentPurpose::SpontaneousPayment(preimage);
- e.insert((purpose.clone(), vec![claimable_htlc]));
- let prev_channel_id = prev_funding_outpoint.to_channel_id();
- new_events.push(events::Event::PaymentClaimable {
- receiver_node_id: Some(receiver_node_id),
- payment_hash,
- amount_msat: outgoing_amt_msat,
- purpose,
- via_channel_id: Some(prev_channel_id),
- via_user_channel_id: Some(prev_user_channel_id),
- });
- },
- hash_map::Entry::Occupied(_) => {
- log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} for a duplicative payment hash", log_bytes!(payment_hash.0));
- fail_htlc!(claimable_htlc, payment_hash);
- }
- }
+ let purpose = events::PaymentPurpose::SpontaneousPayment(preimage);
+ check_total_value!(purpose);
}
}
},
hash_map::Entry::Occupied(inbound_payment) => {
- if payment_data.is_none() {
+ if let OnionPayload::Spontaneous(_) = claimable_htlc.onion_payload {
log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} because we already have an inbound payment with the same payment hash", log_bytes!(payment_hash.0));
fail_htlc!(claimable_htlc, payment_hash);
- continue
- };
+ }
let payment_data = payment_data.unwrap();
if inbound_payment.get().payment_secret != payment_data.payment_secret {
log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our expected payment secret.", log_bytes!(payment_hash.0));
log_bytes!(payment_hash.0), payment_data.total_msat, inbound_payment.get().min_value_msat.unwrap());
fail_htlc!(claimable_htlc, payment_hash);
} else {
- let payment_claimable_generated = check_total_value!(payment_data, inbound_payment.get().payment_preimage);
+ let purpose = events::PaymentPurpose::InvoicePayment {
+ payment_preimage: inbound_payment.get().payment_preimage,
+ payment_secret: payment_data.payment_secret,
+ };
+ let payment_claimable_generated = check_total_value!(purpose);
if payment_claimable_generated {
inbound_payment.remove_entry();
}
self.pending_outbound_payments.check_retry_payments(&self.router, || self.list_usable_channels(),
|| self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height,
&self.pending_events, &self.logger,
- |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv));
+ |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+ self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv));
for (htlc_source, payment_hash, failure_reason, destination) in failed_forwards.drain(..) {
self.fail_htlc_backwards_internal(&htlc_source, &payment_hash, &failure_reason, destination);
events.append(&mut new_events);
}
- /// Free the background events, generally called from timer_tick_occurred.
- ///
- /// Exposed for testing to allow us to process events quickly without generating accidental
- /// BroadcastChannelUpdate events in timer_tick_occurred.
+ /// Free the background events, generally called from [`PersistenceNotifierGuard`] constructors.
///
/// Expects the caller to have a total_consistency_lock read lock.
- fn process_background_events(&self) -> bool {
+ fn process_background_events(&self) -> NotifyOption {
+ debug_assert_ne!(self.total_consistency_lock.held_by_thread(), LockHeldState::NotHeldByThread);
+
+ self.background_events_processed_since_startup.store(true, Ordering::Release);
+
let mut background_events = Vec::new();
mem::swap(&mut *self.pending_background_events.lock().unwrap(), &mut background_events);
if background_events.is_empty() {
- return false;
+ return NotifyOption::SkipPersist;
}
for event in background_events.drain(..) {
match event {
- BackgroundEvent::ClosingMonitorUpdate((funding_txo, update)) => {
+ BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((funding_txo, update)) => {
// The channel has already been closed, so no use bothering to care about the
// monitor updating completing.
let _ = self.chain_monitor.update_channel(funding_txo, &update);
},
+ BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, update } => {
+ let mut updated_chan = false;
+ let res = {
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ match peer_state.channel_by_id.entry(funding_txo.to_channel_id()) {
+ hash_map::Entry::Occupied(mut chan) => {
+ updated_chan = true;
+ handle_new_monitor_update!(self, funding_txo, update.clone(),
+ peer_state_lock, peer_state, per_peer_state, chan).map(|_| ())
+ },
+ hash_map::Entry::Vacant(_) => Ok(()),
+ }
+ } else { Ok(()) }
+ };
+ if !updated_chan {
+ // TODO: Track this as in-flight even though the channel is closed.
+ let _ = self.chain_monitor.update_channel(funding_txo, &update);
+ }
+ // TODO: If this channel has since closed, we're likely providing a payment
+ // preimage update, which we must ensure is durable! We currently don't,
+ // however, ensure that.
+ if res.is_err() {
+ log_error!(self.logger,
+ "Failed to provide ChannelMonitorUpdate to closed channel! This likely lost us a payment preimage!");
+ }
+ let _ = handle_error!(self, res, counterparty_node_id);
+ },
}
}
- true
+ NotifyOption::DoPersist
}
#[cfg(any(test, feature = "_test_utils"))]
/// Process background events, for functional testing
pub fn test_process_background_events(&self) {
- self.process_background_events();
+ let _lck = self.total_consistency_lock.read().unwrap();
+ let _ = self.process_background_events();
}
fn update_channel_fee(&self, chan_id: &[u8; 32], chan: &mut Channel<<SP::Target as SignerProvider>::Signer>, new_feerate: u32) -> NotifyOption {
- if !chan.is_outbound() { return NotifyOption::SkipPersist; }
+ if !chan.context.is_outbound() { return NotifyOption::SkipPersist; }
// If the feerate has decreased by less than half, don't bother
- if new_feerate <= chan.get_feerate() && new_feerate * 2 > chan.get_feerate() {
+ if new_feerate <= chan.context.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.context.get_feerate_sat_per_1000_weight() {
log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {}.",
- log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate);
+ log_bytes!(chan_id[..]), chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
return NotifyOption::SkipPersist;
}
- if !chan.is_live() {
+ if !chan.context.is_live() {
log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).",
- log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate);
+ log_bytes!(chan_id[..]), chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
return NotifyOption::SkipPersist;
}
log_trace!(self.logger, "Channel {} qualifies for a feerate change from {} to {}.",
- log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate);
+ log_bytes!(chan_id[..]), chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
- chan.queue_update_fee(new_feerate, &self.logger);
+ chan.queue_update_fee(new_feerate, &self.fee_estimator, &self.logger);
NotifyOption::DoPersist
}
/// it wants to detect). Thus, we have a variant exposed here for its benefit.
pub fn maybe_update_chan_fees(&self) {
PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
- let mut should_persist = NotifyOption::SkipPersist;
+ let mut should_persist = self.process_background_events();
let new_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
///
/// This currently includes:
/// * Increasing or decreasing the on-chain feerate estimates for our outbound channels,
- /// * Broadcasting `ChannelUpdate` messages if we've been disconnected from our peer for more
+ /// * Broadcasting [`ChannelUpdate`] messages if we've been disconnected from our peer for more
/// than a minute, informing the network that they should no longer attempt to route over
/// the channel.
- /// * Expiring a channel's previous `ChannelConfig` if necessary to only allow forwarding HTLCs
- /// with the current `ChannelConfig`.
+ /// * Expiring a channel's previous [`ChannelConfig`] if necessary to only allow forwarding HTLCs
+ /// with the current [`ChannelConfig`].
/// * Removing peers which have disconnected but and no longer have any channels.
///
- /// Note that this may cause reentrancy through `chain::Watch::update_channel` calls or feerate
+ /// Note that this may cause reentrancy through [`chain::Watch::update_channel`] calls or feerate
/// estimate fetches.
+ ///
+ /// [`ChannelUpdate`]: msgs::ChannelUpdate
+ /// [`ChannelConfig`]: crate::util::config::ChannelConfig
pub fn timer_tick_occurred(&self) {
PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
- let mut should_persist = NotifyOption::SkipPersist;
- if self.process_background_events() { should_persist = NotifyOption::DoPersist; }
+ let mut should_persist = self.process_background_events();
let new_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
}
match chan.channel_update_status() {
- ChannelUpdateStatus::Enabled if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged),
- ChannelUpdateStatus::Disabled if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged),
- ChannelUpdateStatus::DisabledStaged if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Enabled),
- ChannelUpdateStatus::EnabledStaged if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Disabled),
- ChannelUpdateStatus::DisabledStaged if !chan.is_live() => {
- if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
- pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
- msg: update
- });
+ ChannelUpdateStatus::Enabled if !chan.context.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(0)),
+ ChannelUpdateStatus::Disabled if chan.context.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged(0)),
+ ChannelUpdateStatus::DisabledStaged(_) if chan.context.is_live()
+ => chan.set_channel_update_status(ChannelUpdateStatus::Enabled),
+ ChannelUpdateStatus::EnabledStaged(_) if !chan.context.is_live()
+ => chan.set_channel_update_status(ChannelUpdateStatus::Disabled),
+ ChannelUpdateStatus::DisabledStaged(mut n) if !chan.context.is_live() => {
+ n += 1;
+ if n >= DISABLE_GOSSIP_TICKS {
+ chan.set_channel_update_status(ChannelUpdateStatus::Disabled);
+ if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
+ pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
+ should_persist = NotifyOption::DoPersist;
+ } else {
+ chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(n));
}
- should_persist = NotifyOption::DoPersist;
- chan.set_channel_update_status(ChannelUpdateStatus::Disabled);
},
- ChannelUpdateStatus::EnabledStaged if chan.is_live() => {
- if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
- pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
- msg: update
- });
+ ChannelUpdateStatus::EnabledStaged(mut n) if chan.context.is_live() => {
+ n += 1;
+ if n >= ENABLE_GOSSIP_TICKS {
+ chan.set_channel_update_status(ChannelUpdateStatus::Enabled);
+ if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
+ pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
+ should_persist = NotifyOption::DoPersist;
+ } else {
+ chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged(n));
}
- should_persist = NotifyOption::DoPersist;
- chan.set_channel_update_status(ChannelUpdateStatus::Enabled);
},
_ => {},
}
- chan.maybe_expire_prev_config();
+ chan.context.maybe_expire_prev_config();
+
+ if chan.should_disconnect_peer_awaiting_response() {
+ log_debug!(self.logger, "Disconnecting peer {} due to not making any progress on channel {}",
+ counterparty_node_id, log_bytes!(*chan_id));
+ pending_msg_events.push(MessageSendEvent::HandleError {
+ node_id: counterparty_node_id,
+ action: msgs::ErrorAction::DisconnectPeerWithWarning {
+ msg: msgs::WarningMessage {
+ channel_id: *chan_id,
+ data: "Disconnecting due to timeout awaiting response".to_owned(),
+ },
+ },
+ });
+ }
true
});
}
}
- self.claimable_payments.lock().unwrap().claimable_htlcs.retain(|payment_hash, (_, htlcs)| {
- if htlcs.is_empty() {
+ self.claimable_payments.lock().unwrap().claimable_payments.retain(|payment_hash, payment| {
+ if payment.htlcs.is_empty() {
// This should be unreachable
debug_assert!(false);
return false;
}
- if let OnionPayload::Invoice { .. } = htlcs[0].onion_payload {
+ if let OnionPayload::Invoice { .. } = payment.htlcs[0].onion_payload {
// Check if we've received all the parts we need for an MPP (the value of the parts adds to total_msat).
// In this case we're not going to handle any timeouts of the parts here.
- if htlcs[0].total_msat == htlcs.iter().fold(0, |total, htlc| total + htlc.value) {
+ // This condition determining whether the MPP is complete here must match
+ // exactly the condition used in `process_pending_htlc_forwards`.
+ if payment.htlcs[0].total_msat <= payment.htlcs.iter()
+ .fold(0, |total, htlc| total + htlc.sender_intended_value)
+ {
return true;
- } else if htlcs.into_iter().any(|htlc| {
+ } else if payment.htlcs.iter_mut().any(|htlc| {
htlc.timer_ticks += 1;
return htlc.timer_ticks >= MPP_TIMEOUT_TICKS
}) {
- timed_out_mpp_htlcs.extend(htlcs.drain(..).map(|htlc: ClaimableHTLC| (htlc.prev_hop, *payment_hash)));
+ timed_out_mpp_htlcs.extend(payment.htlcs.drain(..)
+ .map(|htlc: ClaimableHTLC| (htlc.prev_hop, *payment_hash)));
return false;
}
}
/// [`events::Event::PaymentClaimed`] events even for payments you intend to fail, especially on
/// startup during which time claims that were in-progress at shutdown may be replayed.
pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) {
- self.fail_htlc_backwards_with_reason(payment_hash, &FailureCode::IncorrectOrUnknownPaymentDetails);
+ self.fail_htlc_backwards_with_reason(payment_hash, FailureCode::IncorrectOrUnknownPaymentDetails);
}
/// This is a variant of [`ChannelManager::fail_htlc_backwards`] that allows you to specify the
/// reason for the failure.
///
/// See [`FailureCode`] for valid failure codes.
- pub fn fail_htlc_backwards_with_reason(&self, payment_hash: &PaymentHash, failure_code: &FailureCode) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ pub fn fail_htlc_backwards_with_reason(&self, payment_hash: &PaymentHash, failure_code: FailureCode) {
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
- let removed_source = self.claimable_payments.lock().unwrap().claimable_htlcs.remove(payment_hash);
- if let Some((_, mut sources)) = removed_source {
- for htlc in sources.drain(..) {
+ let removed_source = self.claimable_payments.lock().unwrap().claimable_payments.remove(payment_hash);
+ if let Some(payment) = removed_source {
+ for htlc in payment.htlcs {
let reason = self.get_htlc_fail_reason_from_failure_code(failure_code, &htlc);
let source = HTLCSource::PreviousHopData(htlc.prev_hop);
let receiver = HTLCDestination::FailedPayment { payment_hash: *payment_hash };
}
/// Gets error data to form an [`HTLCFailReason`] given a [`FailureCode`] and [`ClaimableHTLC`].
- fn get_htlc_fail_reason_from_failure_code(&self, failure_code: &FailureCode, htlc: &ClaimableHTLC) -> HTLCFailReason {
+ fn get_htlc_fail_reason_from_failure_code(&self, failure_code: FailureCode, htlc: &ClaimableHTLC) -> HTLCFailReason {
match failure_code {
- FailureCode::TemporaryNodeFailure => HTLCFailReason::from_failure_code(*failure_code as u16),
- FailureCode::RequiredNodeFeatureMissing => HTLCFailReason::from_failure_code(*failure_code as u16),
+ FailureCode::TemporaryNodeFailure => HTLCFailReason::from_failure_code(failure_code as u16),
+ FailureCode::RequiredNodeFeatureMissing => HTLCFailReason::from_failure_code(failure_code as u16),
FailureCode::IncorrectOrUnknownPaymentDetails => {
let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec();
htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height().to_be_bytes());
- HTLCFailReason::reason(*failure_code as u16, htlc_msat_height_data)
+ HTLCFailReason::reason(failure_code as u16, htlc_msat_height_data)
}
}
}
// guess somewhat. If its a public channel, we figure best to just use the real SCID (as
// we're not leaking that we have a channel with the counterparty), otherwise we try to use
// an inbound SCID alias before the real SCID.
- let scid_pref = if chan.should_announce() {
- chan.get_short_channel_id().or(chan.latest_inbound_scid_alias())
+ let scid_pref = if chan.context.should_announce() {
+ chan.context.get_short_channel_id().or(chan.context.latest_inbound_scid_alias())
} else {
- chan.latest_inbound_scid_alias().or(chan.get_short_channel_id())
+ chan.context.latest_inbound_scid_alias().or(chan.context.get_short_channel_id())
};
if let Some(scid) = scid_pref {
self.get_htlc_temp_fail_err_and_data(desired_err_code, scid, chan)
// from block_connected which may run during initialization prior to the chain_monitor
// being fully configured. See the docs for `ChannelManagerReadArgs` for more.
match source {
- HTLCSource::OutboundRoute { ref path, ref session_priv, ref payment_id, ref payment_params, .. } => {
+ HTLCSource::OutboundRoute { ref path, ref session_priv, ref payment_id, .. } => {
if self.pending_outbound_payments.fail_htlc(source, payment_hash, onion_error, path,
- session_priv, payment_id, payment_params, self.probing_cookie_secret, &self.secp_ctx,
+ session_priv, payment_id, self.probing_cookie_secret, &self.secp_ctx,
&self.pending_events, &self.logger)
{ self.push_pending_forwards_ev(); }
},
mem::drop(forward_htlcs);
if push_forward_ev { self.push_pending_forwards_ev(); }
let mut pending_events = self.pending_events.lock().unwrap();
- pending_events.push(events::Event::HTLCHandlingFailed {
+ pending_events.push_back((events::Event::HTLCHandlingFailed {
prev_channel_id: outpoint.to_channel_id(),
failed_next_destination: destination,
- });
+ }, None));
},
}
}
/// Provides a payment preimage in response to [`Event::PaymentClaimable`], generating any
/// [`MessageSendEvent`]s needed to claim the payment.
///
- /// Note that calling this method does *not* guarantee that the payment has been claimed. You
- /// *must* wait for an [`Event::PaymentClaimed`] event which upon a successful claim will be
- /// provided to your [`EventHandler`] when [`process_pending_events`] is next called.
+ /// This method is guaranteed to ensure the payment has been claimed but only if the current
+ /// height is strictly below [`Event::PaymentClaimable::claim_deadline`]. To avoid race
+ /// conditions, you should wait for an [`Event::PaymentClaimed`] before considering the payment
+ /// successful. It will generally be available in the next [`process_pending_events`] call.
///
/// Note that if you did not set an `amount_msat` when calling [`create_inbound_payment`] or
/// [`create_inbound_payment_for_hash`] you must check that the amount in the `PaymentClaimable`
/// event matches your expectation. If you fail to do so and call this method, you may provide
/// the sender "proof-of-payment" when they did not fulfill the full expected payment.
///
- /// [`Event::PaymentClaimable`]: crate::util::events::Event::PaymentClaimable
- /// [`Event::PaymentClaimed`]: crate::util::events::Event::PaymentClaimed
+ /// [`Event::PaymentClaimable`]: crate::events::Event::PaymentClaimable
+ /// [`Event::PaymentClaimable::claim_deadline`]: crate::events::Event::PaymentClaimable::claim_deadline
+ /// [`Event::PaymentClaimed`]: crate::events::Event::PaymentClaimed
/// [`process_pending_events`]: EventsProvider::process_pending_events
/// [`create_inbound_payment`]: Self::create_inbound_payment
/// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash
pub fn claim_funds(&self, payment_preimage: PaymentPreimage) {
let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let mut sources = {
let mut claimable_payments = self.claimable_payments.lock().unwrap();
- if let Some((payment_purpose, sources)) = claimable_payments.claimable_htlcs.remove(&payment_hash) {
+ if let Some(payment) = claimable_payments.claimable_payments.remove(&payment_hash) {
let mut receiver_node_id = self.our_network_pubkey;
- for htlc in sources.iter() {
+ for htlc in payment.htlcs.iter() {
if htlc.prev_hop.phantom_shared_secret.is_some() {
let phantom_pubkey = self.node_signer.get_node_id(Recipient::PhantomNode)
.expect("Failed to get node_id for phantom node recipient");
}
let dup_purpose = claimable_payments.pending_claiming_payments.insert(payment_hash,
- ClaimingPayment { amount_msat: sources.iter().map(|source| source.value).sum(),
- payment_purpose, receiver_node_id,
+ ClaimingPayment { amount_msat: payment.htlcs.iter().map(|source| source.value).sum(),
+ payment_purpose: payment.purpose, receiver_node_id,
});
if dup_purpose.is_some() {
debug_assert!(false, "Shouldn't get a duplicate pending claim event ever");
log_error!(self.logger, "Got a duplicate pending claimable event on payment hash {}! Please report this bug",
log_bytes!(payment_hash.0));
}
- sources
+ payment.htlcs
} else { return; }
};
debug_assert!(!sources.is_empty());
- // If we are claiming an MPP payment, we check that all channels which contain a claimable
- // HTLC still exist. While this isn't guaranteed to remain true if a channel closes while
- // we're claiming (or even after we claim, before the commitment update dance completes),
- // it should be a relatively rare race, and we'd rather not claim HTLCs that require us to
- // go on-chain (and lose the on-chain fee to do so) than just reject the payment.
- //
- // Note that we'll still always get our funds - as long as the generated
- // `ChannelMonitorUpdate` makes it out to the relevant monitor we can claim on-chain.
- //
- // If we find an HTLC which we would need to claim but for which we do not have a
- // channel, we will fail all parts of the MPP payment. While we could wait and see if
- // the sender retries the already-failed path(s), it should be a pretty rare case where
- // we got all the HTLCs and then a channel closed while we were waiting for the user to
- // provide the preimage, so worrying too much about the optimal handling isn't worth
- // it.
+ // Just in case one HTLC has been failed between when we generated the `PaymentClaimable`
+ // and when we got here we need to check that the amount we're about to claim matches the
+ // amount we told the user in the last `PaymentClaimable`. We also do a sanity-check that
+ // the MPP parts all have the same `total_msat`.
let mut claimable_amt_msat = 0;
+ let mut prev_total_msat = None;
let mut expected_amt_msat = None;
let mut valid_mpp = true;
let mut errs = Vec::new();
let per_peer_state = self.per_peer_state.read().unwrap();
for htlc in sources.iter() {
- let (counterparty_node_id, chan_id) = match self.short_to_chan_info.read().unwrap().get(&htlc.prev_hop.short_channel_id) {
- Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()),
- None => {
- valid_mpp = false;
- break;
- }
- };
-
- let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
- if peer_state_mutex_opt.is_none() {
- valid_mpp = false;
- break;
- }
-
- let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
- let peer_state = &mut *peer_state_lock;
-
- if peer_state.channel_by_id.get(&chan_id).is_none() {
+ if prev_total_msat.is_some() && prev_total_msat != Some(htlc.total_msat) {
+ log_error!(self.logger, "Somehow ended up with an MPP payment with different expected total amounts - this should not be reachable!");
+ debug_assert!(false);
valid_mpp = false;
break;
}
+ prev_total_msat = Some(htlc.total_msat);
- if expected_amt_msat.is_some() && expected_amt_msat != Some(htlc.total_msat) {
- log_error!(self.logger, "Somehow ended up with an MPP payment with different total amounts - this should not be reachable!");
+ if expected_amt_msat.is_some() && expected_amt_msat != htlc.total_value_received {
+ log_error!(self.logger, "Somehow ended up with an MPP payment with different received total amounts - this should not be reachable!");
debug_assert!(false);
valid_mpp = false;
break;
}
-
- expected_amt_msat = Some(htlc.total_msat);
- if let OnionPayload::Spontaneous(_) = &htlc.onion_payload {
- // We don't currently support MPP for spontaneous payments, so just check
- // that there's one payment here and move on.
- if sources.len() != 1 {
- log_error!(self.logger, "Somehow ended up with an MPP spontaneous payment - this should not be reachable!");
- debug_assert!(false);
- valid_mpp = false;
- break;
- }
- }
-
+ expected_amt_msat = htlc.total_value_received;
claimable_amt_msat += htlc.value;
}
mem::drop(per_peer_state);
-> Result<(), (PublicKey, MsgHandleErrInternal)> {
//TODO: Delay the claimed_funds relaying just like we do outbound relay!
- let per_peer_state = self.per_peer_state.read().unwrap();
- let chan_id = prev_hop.outpoint.to_channel_id();
- let counterparty_node_id_opt = match self.short_to_chan_info.read().unwrap().get(&prev_hop.short_channel_id) {
- Some((cp_id, _dup_chan_id)) => Some(cp_id.clone()),
- None => None
- };
+ // If we haven't yet run background events assume we're still deserializing and shouldn't
+ // actually pass `ChannelMonitorUpdate`s to users yet. Instead, queue them up as
+ // `BackgroundEvent`s.
+ let during_init = !self.background_events_processed_since_startup.load(Ordering::Acquire);
- let mut peer_state_opt = counterparty_node_id_opt.as_ref().map(
- |counterparty_node_id| per_peer_state.get(counterparty_node_id).map(
- |peer_mutex| peer_mutex.lock().unwrap()
- )
- ).unwrap_or(None);
+ {
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ let chan_id = prev_hop.outpoint.to_channel_id();
+ let counterparty_node_id_opt = match self.short_to_chan_info.read().unwrap().get(&prev_hop.short_channel_id) {
+ Some((cp_id, _dup_chan_id)) => Some(cp_id.clone()),
+ None => None
+ };
- if peer_state_opt.is_some() {
- let mut peer_state_lock = peer_state_opt.unwrap();
- let peer_state = &mut *peer_state_lock;
- if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(chan_id) {
- let counterparty_node_id = chan.get().get_counterparty_node_id();
- let fulfill_res = chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger);
-
- if let UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } = fulfill_res {
- if let Some(action) = completion_action(Some(htlc_value_msat)) {
- log_trace!(self.logger, "Tracking monitor update completion action for channel {}: {:?}",
- log_bytes!(chan_id), action);
- peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
- }
- let update_id = monitor_update.update_id;
- let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, monitor_update);
- let res = handle_new_monitor_update!(self, update_res, update_id, peer_state_lock,
- peer_state, per_peer_state, chan);
- if let Err(e) = res {
- // TODO: This is a *critical* error - we probably updated the outbound edge
- // of the HTLC's monitor with a preimage. We should retry this monitor
- // update over and over again until morale improves.
- log_error!(self.logger, "Failed to update channel monitor with preimage {:?}", payment_preimage);
- return Err((counterparty_node_id, e));
+ let peer_state_opt = counterparty_node_id_opt.as_ref().map(
+ |counterparty_node_id| per_peer_state.get(counterparty_node_id)
+ .map(|peer_mutex| peer_mutex.lock().unwrap())
+ ).unwrap_or(None);
+
+ if peer_state_opt.is_some() {
+ let mut peer_state_lock = peer_state_opt.unwrap();
+ let peer_state = &mut *peer_state_lock;
+ if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(chan_id) {
+ let counterparty_node_id = chan.get().context.get_counterparty_node_id();
+ let fulfill_res = chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger);
+
+ if let UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } = fulfill_res {
+ if let Some(action) = completion_action(Some(htlc_value_msat)) {
+ log_trace!(self.logger, "Tracking monitor update completion action for channel {}: {:?}",
+ log_bytes!(chan_id), action);
+ peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
+ }
+ if !during_init {
+ let res = handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock,
+ peer_state, per_peer_state, chan);
+ if let Err(e) = res {
+ // TODO: This is a *critical* error - we probably updated the outbound edge
+ // of the HTLC's monitor with a preimage. We should retry this monitor
+ // update over and over again until morale improves.
+ log_error!(self.logger, "Failed to update channel monitor with preimage {:?}", payment_preimage);
+ return Err((counterparty_node_id, e));
+ }
+ } else {
+ // If we're running during init we cannot update a monitor directly -
+ // they probably haven't actually been loaded yet. Instead, push the
+ // monitor update as a background event.
+ self.pending_background_events.lock().unwrap().push(
+ BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
+ counterparty_node_id,
+ funding_txo: prev_hop.outpoint,
+ update: monitor_update.clone(),
+ });
+ }
}
+ return Ok(());
}
- return Ok(());
}
}
let preimage_update = ChannelMonitorUpdate {
payment_preimage,
}],
};
- // We update the ChannelMonitor on the backward link, after
- // receiving an `update_fulfill_htlc` from the forward link.
- let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, &preimage_update);
- if update_res != ChannelMonitorUpdateStatus::Completed {
- // TODO: This needs to be handled somehow - if we receive a monitor update
- // with a preimage we *must* somehow manage to propagate it to the upstream
- // channel, or we must have an ability to receive the same event and try
- // again on restart.
- log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
- payment_preimage, update_res);
+
+ if !during_init {
+ // We update the ChannelMonitor on the backward link, after
+ // receiving an `update_fulfill_htlc` from the forward link.
+ let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, &preimage_update);
+ if update_res != ChannelMonitorUpdateStatus::Completed {
+ // TODO: This needs to be handled somehow - if we receive a monitor update
+ // with a preimage we *must* somehow manage to propagate it to the upstream
+ // channel, or we must have an ability to receive the same event and try
+ // again on restart.
+ log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
+ payment_preimage, update_res);
+ }
+ } else {
+ // If we're running during init we cannot update a monitor directly - they probably
+ // haven't actually been loaded yet. Instead, push the monitor update as a background
+ // event.
+ // Note that while it's safe to use `ClosedMonitorUpdateRegeneratedOnStartup` here (the
+ // channel is already closed) we need to ultimately handle the monitor update
+ // completion action only after we've completed the monitor update. This is the only
+ // way to guarantee this update *will* be regenerated on startup (otherwise if this was
+ // from a forwarded HTLC the downstream preimage may be deleted before we claim
+ // upstream). Thus, we need to transition to some new `BackgroundEvent` type which will
+ // complete the monitor update completion action from `completion_action`.
+ self.pending_background_events.lock().unwrap().push(
+ BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((
+ prev_hop.outpoint, preimage_update,
+ )));
}
// Note that we do process the completion action here. This totally could be a
// duplicate claim, but we have no way of knowing without interrogating the
fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option<u64>, from_onchain: bool, next_channel_id: [u8; 32]) {
match source {
HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
+ debug_assert!(self.background_events_processed_since_startup.load(Ordering::Acquire),
+ "We don't support claim_htlc claims during startup - monitors may not be available yet");
self.pending_outbound_payments.claim_htlc(payment_id, payment_preimage, session_priv, path, from_onchain, &self.pending_events, &self.logger);
},
HTLCSource::PreviousHopData(hop_data) => {
Some(claimed_htlc_value - forwarded_htlc_value)
} else { None };
- let prev_channel_id = Some(prev_outpoint.to_channel_id());
- let next_channel_id = Some(next_channel_id);
-
- Some(MonitorUpdateCompletionAction::EmitEvent { event: events::Event::PaymentForwarded {
- fee_earned_msat,
- claim_from_onchain_tx: from_onchain,
- prev_channel_id,
- next_channel_id,
- }})
+ Some(MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
+ event: events::Event::PaymentForwarded {
+ fee_earned_msat,
+ claim_from_onchain_tx: from_onchain,
+ prev_channel_id: Some(prev_outpoint.to_channel_id()),
+ next_channel_id: Some(next_channel_id),
+ outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
+ },
+ downstream_counterparty_and_funding_outpoint: None,
+ })
} else { None }
});
if let Err((pk, err)) = res {
MonitorUpdateCompletionAction::PaymentClaimed { payment_hash } => {
let payment = self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
if let Some(ClaimingPayment { amount_msat, payment_purpose: purpose, receiver_node_id }) = payment {
- self.pending_events.lock().unwrap().push(events::Event::PaymentClaimed {
+ self.pending_events.lock().unwrap().push_back((events::Event::PaymentClaimed {
payment_hash, purpose, amount_msat, receiver_node_id: Some(receiver_node_id),
- });
+ }, None));
}
},
- MonitorUpdateCompletionAction::EmitEvent { event } => {
- self.pending_events.lock().unwrap().push(event);
+ MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
+ event, downstream_counterparty_and_funding_outpoint
+ } => {
+ self.pending_events.lock().unwrap().push_back((event, None));
+ if let Some((node_id, funding_outpoint, blocker)) = downstream_counterparty_and_funding_outpoint {
+ self.handle_monitor_update_release(node_id, funding_outpoint, Some(blocker));
+ }
},
}
}
channel_ready: Option<msgs::ChannelReady>, announcement_sigs: Option<msgs::AnnouncementSignatures>)
-> Option<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> {
log_trace!(self.logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {}broadcasting funding, {} channel ready, {} announcement",
- log_bytes!(channel.channel_id()),
+ log_bytes!(channel.context.channel_id()),
if raa.is_some() { "an" } else { "no" },
if commitment_update.is_some() { "a" } else { "no" }, pending_forwards.len(),
if funding_broadcastable.is_some() { "" } else { "not " },
let mut htlc_forwards = None;
- let counterparty_node_id = channel.get_counterparty_node_id();
+ let counterparty_node_id = channel.context.get_counterparty_node_id();
if !pending_forwards.is_empty() {
- htlc_forwards = Some((channel.get_short_channel_id().unwrap_or(channel.outbound_scid_alias()),
- channel.get_funding_txo().unwrap(), channel.get_user_id(), pending_forwards));
+ htlc_forwards = Some((channel.context.get_short_channel_id().unwrap_or(channel.context.outbound_scid_alias()),
+ channel.context.get_funding_txo().unwrap(), channel.context.get_user_id(), pending_forwards));
}
if let Some(msg) = channel_ready {
});
}
- emit_channel_ready_event!(self, channel);
-
macro_rules! handle_cs { () => {
if let Some(update) = commitment_update {
pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
if let Some(tx) = funding_broadcastable {
log_info!(self.logger, "Broadcasting funding transaction with txid {}", tx.txid());
- self.tx_broadcaster.broadcast_transaction(&tx);
+ self.tx_broadcaster.broadcast_transactions(&[&tx]);
+ }
+
+ {
+ let mut pending_events = self.pending_events.lock().unwrap();
+ emit_channel_pending_event!(pending_events, channel);
+ emit_channel_ready_event!(pending_events, channel);
}
htlc_forwards
hash_map::Entry::Vacant(_) => return,
}
};
- log_trace!(self.logger, "ChannelMonitor updated to {}. Current highest is {}",
- highest_applied_update_id, channel.get().get_latest_monitor_update_id());
- if !channel.get().is_awaiting_monitor_update() || channel.get().get_latest_monitor_update_id() != highest_applied_update_id {
+ let remaining_in_flight =
+ if let Some(pending) = peer_state.in_flight_monitor_updates.get_mut(funding_txo) {
+ pending.retain(|upd| upd.update_id > highest_applied_update_id);
+ pending.len()
+ } else { 0 };
+ log_trace!(self.logger, "ChannelMonitor updated to {}. Current highest is {}. {} pending in-flight updates.",
+ highest_applied_update_id, channel.get().context.get_latest_monitor_update_id(),
+ remaining_in_flight);
+ if !channel.get().is_awaiting_monitor_update() || channel.get().context.get_latest_monitor_update_id() != highest_applied_update_id {
return;
}
- handle_monitor_update_completion!(self, highest_applied_update_id, peer_state_lock, peer_state, per_peer_state, channel.get_mut());
+ handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, channel.get_mut());
}
/// Accepts a request to open a channel after a [`Event::OpenChannelRequest`].
}
fn do_accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, accept_0conf: bool, user_channel_id: u128) -> Result<(), APIError> {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
- let peers_without_funded_channels = self.peers_without_funded_channels(|peer| !peer.channel_by_id.is_empty());
+ let peers_without_funded_channels =
+ self.peers_without_funded_channels(|peer| { peer.total_channel_count() > 0 });
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
- let is_only_peer_channel = peer_state.channel_by_id.len() == 1;
- match peer_state.channel_by_id.entry(temporary_channel_id.clone()) {
+ let is_only_peer_channel = peer_state.total_channel_count() == 1;
+ match peer_state.inbound_v1_channel_by_id.entry(temporary_channel_id.clone()) {
hash_map::Entry::Occupied(mut channel) => {
- if !channel.get().inbound_is_awaiting_accept() {
+ if !channel.get().is_awaiting_accept() {
return Err(APIError::APIMisuseError { err: "The channel isn't currently awaiting to be accepted.".to_owned() });
}
if accept_0conf {
channel.get_mut().set_0conf();
- } else if channel.get().get_channel_type().requires_zero_conf() {
+ } else if channel.get().context.get_channel_type().requires_zero_conf() {
let send_msg_err_event = events::MessageSendEvent::HandleError {
- node_id: channel.get().get_counterparty_node_id(),
+ node_id: channel.get().context.get_counterparty_node_id(),
action: msgs::ErrorAction::SendErrorMessage{
msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "No zero confirmation channels accepted".to_owned(), }
}
// channels per-peer we can accept channels from a peer with existing ones.
if is_only_peer_channel && peers_without_funded_channels >= MAX_UNFUNDED_CHANNEL_PEERS {
let send_msg_err_event = events::MessageSendEvent::HandleError {
- node_id: channel.get().get_counterparty_node_id(),
+ node_id: channel.get().context.get_counterparty_node_id(),
action: msgs::ErrorAction::SendErrorMessage{
msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "Have too many peers with unfunded channels, not accepting new ones".to_owned(), }
}
}
peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
- node_id: channel.get().get_counterparty_node_id(),
+ node_id: channel.get().context.get_counterparty_node_id(),
msg: channel.get_mut().accept_inbound_channel(user_channel_id),
});
}
let peer = peer_mtx.lock().unwrap();
if !maybe_count_peer(&*peer) { continue; }
let num_unfunded_channels = Self::unfunded_channel_count(&peer, best_block_height);
- if num_unfunded_channels == peer.channel_by_id.len() {
+ if num_unfunded_channels == peer.total_channel_count() {
peers_without_funded_channels += 1;
}
}
) -> usize {
let mut num_unfunded_channels = 0;
for (_, chan) in peer.channel_by_id.iter() {
- if !chan.is_outbound() && chan.minimum_depth().unwrap_or(1) != 0 &&
- chan.get_funding_tx_confirmations(best_block_height) == 0
+ // This covers non-zero-conf inbound `Channel`s that we are currently monitoring, but those
+ // which have not yet had any confirmations on-chain.
+ if !chan.context.is_outbound() && chan.context.minimum_depth().unwrap_or(1) != 0 &&
+ chan.context.get_funding_tx_confirmations(best_block_height) == 0
{
num_unfunded_channels += 1;
}
}
+ for (_, chan) in peer.inbound_v1_channel_by_id.iter() {
+ if chan.context.minimum_depth().unwrap_or(1) != 0 {
+ num_unfunded_channels += 1;
+ }
+ }
num_unfunded_channels
}
// Get the number of peers with channels, but without funded ones. We don't care too much
// about peers that never open a channel, so we filter by peers that have at least one
// channel, and then limit the number of those with unfunded channels.
- let channeled_peers_without_funding = self.peers_without_funded_channels(|node| !node.channel_by_id.is_empty());
+ let channeled_peers_without_funding =
+ self.peers_without_funded_channels(|node| node.total_channel_count() > 0);
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
// If this peer already has some channels, a new channel won't increase our number of peers
// with unfunded channels, so as long as we aren't over the maximum number of unfunded
// channels per-peer we can accept channels from a peer with existing ones.
- if peer_state.channel_by_id.is_empty() &&
+ if peer_state.total_channel_count() == 0 &&
channeled_peers_without_funding >= MAX_UNFUNDED_CHANNEL_PEERS &&
!self.default_configuration.manually_accept_inbound_channels
{
msg.temporary_channel_id.clone()));
}
- let mut channel = match Channel::new_from_req(&self.fee_estimator, &self.entropy_source, &self.signer_provider,
+ let mut channel = match InboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider,
counterparty_node_id.clone(), &self.channel_type_features(), &peer_state.latest_features, msg, user_channel_id,
&self.default_configuration, best_block_height, &self.logger, outbound_scid_alias)
{
},
Ok(res) => res
};
- match peer_state.channel_by_id.entry(channel.channel_id()) {
- hash_map::Entry::Occupied(_) => {
- self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias);
- return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision for the same peer!".to_owned(), msg.temporary_channel_id.clone()))
- },
- hash_map::Entry::Vacant(entry) => {
- if !self.default_configuration.manually_accept_inbound_channels {
- if channel.get_channel_type().requires_zero_conf() {
- return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), msg.temporary_channel_id.clone()));
- }
- peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
- node_id: counterparty_node_id.clone(),
- msg: channel.accept_inbound_channel(user_channel_id),
- });
- } else {
- let mut pending_events = self.pending_events.lock().unwrap();
- pending_events.push(
- events::Event::OpenChannelRequest {
- temporary_channel_id: msg.temporary_channel_id.clone(),
- counterparty_node_id: counterparty_node_id.clone(),
- funding_satoshis: msg.funding_satoshis,
- push_msat: msg.push_msat,
- channel_type: channel.get_channel_type().clone(),
- }
- );
+ let channel_id = channel.context.channel_id();
+ let channel_exists = peer_state.has_channel(&channel_id);
+ if channel_exists {
+ self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias);
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision for the same peer!".to_owned(), msg.temporary_channel_id.clone()))
+ } else {
+ if !self.default_configuration.manually_accept_inbound_channels {
+ let channel_type = channel.context.get_channel_type();
+ if channel_type.requires_zero_conf() {
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), msg.temporary_channel_id.clone()));
}
-
- entry.insert(channel);
+ if channel_type.requires_anchors_zero_fee_htlc_tx() {
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("No channels with anchor outputs accepted".to_owned(), msg.temporary_channel_id.clone()));
+ }
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
+ node_id: counterparty_node_id.clone(),
+ msg: channel.accept_inbound_channel(user_channel_id),
+ });
+ } else {
+ let mut pending_events = self.pending_events.lock().unwrap();
+ pending_events.push_back((events::Event::OpenChannelRequest {
+ temporary_channel_id: msg.temporary_channel_id.clone(),
+ counterparty_node_id: counterparty_node_id.clone(),
+ funding_satoshis: msg.funding_satoshis,
+ push_msat: msg.push_msat,
+ channel_type: channel.context.get_channel_type().clone(),
+ }, None));
}
+ peer_state.inbound_v1_channel_by_id.insert(channel_id, channel);
}
Ok(())
}
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
- match peer_state.channel_by_id.entry(msg.temporary_channel_id) {
+ match peer_state.outbound_v1_channel_by_id.entry(msg.temporary_channel_id) {
hash_map::Entry::Occupied(mut chan) => {
- try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &peer_state.latest_features), chan);
- (chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id())
+ try_v1_outbound_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &peer_state.latest_features), chan);
+ (chan.get().context.get_value_satoshis(), chan.get().context.get_funding_redeemscript().to_v0_p2wsh(), chan.get().context.get_user_id())
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
}
};
let mut pending_events = self.pending_events.lock().unwrap();
- pending_events.push(events::Event::FundingGenerationReady {
+ pending_events.push_back((events::Event::FundingGenerationReady {
temporary_channel_id: msg.temporary_channel_id,
counterparty_node_id: *counterparty_node_id,
channel_value_satoshis: value,
output_script,
user_channel_id: user_id,
- });
+ }, None));
Ok(())
}
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
- let ((funding_msg, monitor), chan) =
- match peer_state.channel_by_id.entry(msg.temporary_channel_id) {
- hash_map::Entry::Occupied(mut chan) => {
- (try_chan_entry!(self, chan.get_mut().funding_created(msg, best_block, &self.signer_provider, &self.logger), chan), chan.remove())
+ let (chan, funding_msg, monitor) =
+ match peer_state.inbound_v1_channel_by_id.remove(&msg.temporary_channel_id) {
+ Some(inbound_chan) => {
+ match inbound_chan.funding_created(msg, best_block, &self.signer_provider, &self.logger) {
+ Ok(res) => res,
+ Err((mut inbound_chan, err)) => {
+ // We've already removed this inbound channel from the map in `PeerState`
+ // above so at this point we just need to clean up any lingering entries
+ // concerning this channel as it is safe to do so.
+ update_maps_on_chan_removal!(self, &inbound_chan.context);
+ let user_id = inbound_chan.context.get_user_id();
+ let shutdown_res = inbound_chan.context.force_shutdown(false);
+ return Err(MsgHandleErrInternal::from_finish_shutdown(format!("{}", err),
+ msg.temporary_channel_id, user_id, shutdown_res, None));
+ },
+ }
},
- hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
+ None => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
};
match peer_state.channel_by_id.entry(funding_msg.channel_id) {
Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id".to_owned(), funding_msg.channel_id))
},
hash_map::Entry::Vacant(e) => {
- match self.id_to_peer.lock().unwrap().entry(chan.channel_id()) {
+ match self.id_to_peer.lock().unwrap().entry(chan.context.channel_id()) {
hash_map::Entry::Occupied(_) => {
return Err(MsgHandleErrInternal::send_err_msg_no_close(
"The funding_created message had the same funding_txid as an existing channel - funding is not possible".to_owned(),
funding_msg.channel_id))
},
hash_map::Entry::Vacant(i_e) => {
- i_e.insert(chan.get_counterparty_node_id());
+ i_e.insert(chan.context.get_counterparty_node_id());
}
}
let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor);
let chan = e.insert(chan);
- let mut res = handle_new_monitor_update!(self, monitor_res, 0, peer_state_lock, peer_state,
- per_peer_state, chan, MANUALLY_REMOVING, { peer_state.channel_by_id.remove(&new_channel_id) });
+ let mut res = handle_new_monitor_update!(self, monitor_res, peer_state_lock, peer_state,
+ per_peer_state, chan, MANUALLY_REMOVING_INITIAL_MONITOR,
+ { peer_state.channel_by_id.remove(&new_channel_id) });
// Note that we reply with the new channel_id in error messages if we gave up on the
// channel, not the temporary_channel_id. This is compatible with ourselves, but the
if let Err(MsgHandleErrInternal { shutdown_finish: Some((res, _)), .. }) = &mut res {
res.0 = None;
}
- res
+ res.map(|_| ())
}
}
}
hash_map::Entry::Occupied(mut chan) => {
let monitor = try_chan_entry!(self,
chan.get_mut().funding_signed(&msg, best_block, &self.signer_provider, &self.logger), chan);
- let update_res = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor);
- let mut res = handle_new_monitor_update!(self, update_res, 0, peer_state_lock, peer_state, per_peer_state, chan);
+ let update_res = self.chain_monitor.watch_channel(chan.get().context.get_funding_txo().unwrap(), monitor);
+ let mut res = handle_new_monitor_update!(self, update_res, peer_state_lock, peer_state, per_peer_state, chan, INITIAL_MONITOR);
if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
// We weren't able to watch the channel to begin with, so no updates should be made on
// it. Previously, full_stack_target found an (unreachable) panic when the
shutdown_finish.0.take();
}
}
- res
+ res.map(|_| ())
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
}
let announcement_sigs_opt = try_chan_entry!(self, chan.get_mut().channel_ready(&msg, &self.node_signer,
self.genesis_hash.clone(), &self.default_configuration, &self.best_block.read().unwrap(), &self.logger), chan);
if let Some(announcement_sigs) = announcement_sigs_opt {
- log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(chan.get().channel_id()));
+ log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(chan.get().context.channel_id()));
peer_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
node_id: counterparty_node_id.clone(),
msg: announcement_sigs,
});
- } else if chan.get().is_usable() {
+ } else if chan.get().context.is_usable() {
// If we're sending an announcement_signatures, we'll send the (public)
// channel_update after sending a channel_announcement when we receive our
// counterparty's announcement_signatures. Thus, we only bother to send a
// channel_update here if the channel is not public, i.e. we're not sending an
// announcement_signatures.
- log_trace!(self.logger, "Sending private initial channel_update for our counterparty on channel {}", log_bytes!(chan.get().channel_id()));
+ log_trace!(self.logger, "Sending private initial channel_update for our counterparty on channel {}", log_bytes!(chan.get().context.channel_id()));
if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) {
peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
node_id: counterparty_node_id.clone(),
}
}
- emit_channel_ready_event!(self, chan.get_mut());
+ {
+ let mut pending_events = self.pending_events.lock().unwrap();
+ emit_channel_ready_event!(pending_events, chan.get_mut());
+ }
Ok(())
},
if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" });
}
- let funding_txo_opt = chan_entry.get().get_funding_txo();
+ let funding_txo_opt = chan_entry.get().context.get_funding_txo();
let (shutdown, monitor_update_opt, htlcs) = try_chan_entry!(self,
chan_entry.get_mut().shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_entry);
dropped_htlcs = htlcs;
// Update the monitor with the shutdown script if necessary.
if let Some(monitor_update) = monitor_update_opt {
- let update_id = monitor_update.update_id;
- let update_res = self.chain_monitor.update_channel(funding_txo_opt.unwrap(), monitor_update);
- break handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, per_peer_state, chan_entry);
+ break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
+ peer_state_lock, peer_state, per_peer_state, chan_entry).map(|_| ());
}
break Ok(());
},
};
if let Some(broadcast_tx) = tx {
log_info!(self.logger, "Broadcasting {}", log_tx!(broadcast_tx));
- self.tx_broadcaster.broadcast_transaction(&broadcast_tx);
+ self.tx_broadcaster.broadcast_transactions(&[&broadcast_tx]);
}
if let Some(chan) = chan_option {
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
msg: update
});
}
- self.issue_channel_close_events(&chan, ClosureReason::CooperativeClosure);
+ self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure);
}
Ok(())
}
//encrypted with the same key. It's not immediately obvious how to usefully exploit that,
//but we should prevent it anyway.
- let pending_forward_info = self.decode_update_add_htlc_onion(msg);
+ let decoded_hop_res = self.decode_update_add_htlc_onion(msg);
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan) => {
+ let pending_forward_info = match decoded_hop_res {
+ Ok((next_hop, shared_secret, next_packet_pk_opt)) =>
+ self.construct_pending_htlc_status(msg, shared_secret, next_hop,
+ chan.get().context.config().accept_underpaying_htlcs, next_packet_pk_opt),
+ Err(e) => PendingHTLCStatus::Fail(e)
+ };
let create_pending_htlc_status = |chan: &Channel<<SP::Target as SignerProvider>::Signer>, pending_forward_info: PendingHTLCStatus, error_code: u16| {
// If the update_add is completely bogus, the call will Err and we will close,
// but if we've sent a shutdown and they haven't acknowledged it yet, we just
_ => pending_forward_info
}
};
- try_chan_entry!(self, chan.get_mut().update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.logger), chan);
+ try_chan_entry!(self, chan.get_mut().update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.fee_estimator, &self.logger), chan);
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan) => {
- let funding_txo = chan.get().get_funding_txo();
- let monitor_update = try_chan_entry!(self, chan.get_mut().commitment_signed(&msg, &self.logger), chan);
- let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), monitor_update);
- let update_id = monitor_update.update_id;
- handle_new_monitor_update!(self, update_res, update_id, peer_state_lock,
- peer_state, per_peer_state, chan)
+ let funding_txo = chan.get().context.get_funding_txo();
+ let monitor_update_opt = try_chan_entry!(self, chan.get_mut().commitment_signed(&msg, &self.logger), chan);
+ if let Some(monitor_update) = monitor_update_opt {
+ handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock,
+ peer_state, per_peer_state, chan).map(|_| ())
+ } else { Ok(()) }
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)]) {
for &mut (prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, ref mut pending_forwards) in per_source_pending_forwards {
let mut push_forward_event = false;
- let mut new_intercept_events = Vec::new();
+ let mut new_intercept_events = VecDeque::new();
let mut failed_intercept_forwards = Vec::new();
if !pending_forwards.is_empty() {
for (forward_info, prev_htlc_id) in pending_forwards.drain(..) {
let mut pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap();
match pending_intercepts.entry(intercept_id) {
hash_map::Entry::Vacant(entry) => {
- new_intercept_events.push(events::Event::HTLCIntercepted {
+ new_intercept_events.push_back((events::Event::HTLCIntercepted {
requested_next_hop_scid: scid,
payment_hash: forward_info.payment_hash,
inbound_amount_msat: forward_info.incoming_amt_msat.unwrap(),
expected_outbound_amount_msat: forward_info.outgoing_amt_msat,
intercept_id
- });
+ }, None));
entry.insert(PendingAddHTLCInfo {
prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info });
},
}
}
- // We only want to push a PendingHTLCsForwardable event if no others are queued.
fn push_pending_forwards_ev(&self) {
let mut pending_events = self.pending_events.lock().unwrap();
- let forward_ev_exists = pending_events.iter()
- .find(|ev| if let events::Event::PendingHTLCsForwardable { .. } = ev { true } else { false })
- .is_some();
- if !forward_ev_exists {
- pending_events.push(events::Event::PendingHTLCsForwardable {
- time_forwardable:
- Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS),
- });
- }
+ let is_processing_events = self.pending_events_processor.load(Ordering::Acquire);
+ let num_forward_events = pending_events.iter().filter(|(ev, _)|
+ if let events::Event::PendingHTLCsForwardable { .. } = ev { true } else { false }
+ ).count();
+ // We only want to push a PendingHTLCsForwardable event if no others are queued. Processing
+ // events is done in batches and they are not removed until we're done processing each
+ // batch. Since handling a `PendingHTLCsForwardable` event will call back into the
+ // `ChannelManager`, we'll still see the original forwarding event not removed. Phantom
+ // payments will need an additional forwarding event before being claimed to make them look
+ // real by taking more time.
+ if (is_processing_events && num_forward_events <= 1) || num_forward_events < 1 {
+ pending_events.push_back((Event::PendingHTLCsForwardable {
+ time_forwardable: Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS),
+ }, None));
+ }
+ }
+
+ /// Checks whether [`ChannelMonitorUpdate`]s generated by the receipt of a remote
+ /// [`msgs::RevokeAndACK`] should be held for the given channel until some other action
+ /// completes. Note that this needs to happen in the same [`PeerState`] mutex as any release of
+ /// the [`ChannelMonitorUpdate`] in question.
+ fn raa_monitor_updates_held(&self,
+ actions_blocking_raa_monitor_updates: &BTreeMap<[u8; 32], Vec<RAAMonitorUpdateBlockingAction>>,
+ channel_funding_outpoint: OutPoint, counterparty_node_id: PublicKey
+ ) -> bool {
+ actions_blocking_raa_monitor_updates
+ .get(&channel_funding_outpoint.to_channel_id()).map(|v| !v.is_empty()).unwrap_or(false)
+ || self.pending_events.lock().unwrap().iter().any(|(_, action)| {
+ action == &Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
+ channel_funding_outpoint,
+ counterparty_node_id,
+ })
+ })
}
fn internal_revoke_and_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), MsgHandleErrInternal> {
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan) => {
- let funding_txo = chan.get().get_funding_txo();
- let (htlcs_to_fail, monitor_update) = try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.logger), chan);
- let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), monitor_update);
- let update_id = monitor_update.update_id;
- let res = handle_new_monitor_update!(self, update_res, update_id,
- peer_state_lock, peer_state, per_peer_state, chan);
+ let funding_txo = chan.get().context.get_funding_txo();
+ let (htlcs_to_fail, monitor_update_opt) = try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.fee_estimator, &self.logger), chan);
+ let res = if let Some(monitor_update) = monitor_update_opt {
+ handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update,
+ peer_state_lock, peer_state, per_peer_state, chan).map(|_| ())
+ } else { Ok(()) };
(htlcs_to_fail, res)
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan) => {
- if !chan.get().is_usable() {
+ if !chan.get().context.is_usable() {
return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it".to_owned(), action: msgs::ErrorAction::IgnoreError}));
}
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(chan_id) {
hash_map::Entry::Occupied(mut chan) => {
- if chan.get().get_counterparty_node_id() != *counterparty_node_id {
- if chan.get().should_announce() {
+ if chan.get().context.get_counterparty_node_id() != *counterparty_node_id {
+ if chan.get().context.should_announce() {
// If the announcement is about a channel of ours which is public, some
// other peer may simply be forwarding all its gossip to us. Don't provide
// a scary-looking error message and return Ok instead.
}
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id));
}
- let were_node_one = self.get_our_node_id().serialize()[..] < chan.get().get_counterparty_node_id().serialize()[..];
+ let were_node_one = self.get_our_node_id().serialize()[..] < chan.get().context.get_counterparty_node_id().serialize()[..];
let msg_from_node_one = msg.contents.flags & 1 == 0;
if were_node_one == msg_from_node_one {
return Ok(NotifyOption::SkipPersist);
node_id: counterparty_node_id.clone(),
msg,
});
- } else if chan.get().is_usable() {
+ } else if chan.get().context.is_usable() {
// If the channel is in a usable state (ie the channel is not being shut
// down), send a unicast channel_update to our counterparty to make sure
// they have the latest channel parameters.
if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) {
channel_update = Some(events::MessageSendEvent::SendChannelUpdate {
- node_id: chan.get().get_counterparty_node_id(),
+ node_id: chan.get().context.get_counterparty_node_id(),
msg,
});
}
}
- let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take();
+ let need_lnd_workaround = chan.get_mut().context.workaround_lnd_bug_4006.take();
htlc_forwards = self.handle_channel_resumption(
&mut peer_state.pending_msg_events, chan.get_mut(), responses.raa, responses.commitment_update, responses.order,
Vec::new(), None, responses.channel_ready, responses.announcement_sigs);
Ok(())
}
- /// Process pending events from the `chain::Watch`, returning whether any events were processed.
+ /// Process pending events from the [`chain::Watch`], returning whether any events were processed.
fn process_pending_monitor_events(&self) -> bool {
debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock
let pending_msg_events = &mut peer_state.pending_msg_events;
if let hash_map::Entry::Occupied(chan_entry) = peer_state.channel_by_id.entry(funding_outpoint.to_channel_id()) {
let mut chan = remove_channel!(self, chan_entry);
- failed_channels.push(chan.force_shutdown(false));
+ failed_channels.push(chan.context.force_shutdown(false));
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
} else {
ClosureReason::CommitmentTxConfirmed
};
- self.issue_channel_close_events(&chan, reason);
+ self.issue_channel_close_events(&chan.context, reason);
pending_msg_events.push(events::MessageSendEvent::HandleError {
- node_id: chan.get_counterparty_node_id(),
+ node_id: chan.context.get_counterparty_node_id(),
action: msgs::ErrorAction::SendErrorMessage {
- msg: msgs::ErrorMessage { channel_id: chan.channel_id(), data: "Channel force-closed".to_owned() }
+ msg: msgs::ErrorMessage { channel_id: chan.context.channel_id(), data: "Channel force-closed".to_owned() }
},
});
}
/// update events as a separate process method here.
#[cfg(fuzzing)]
pub fn process_monitor_events(&self) {
- PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
- if self.process_pending_monitor_events() {
- NotifyOption::DoPersist
- } else {
- NotifyOption::SkipPersist
- }
- });
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
+ self.process_pending_monitor_events();
}
/// Check the holding cell in each channel and free any pending HTLCs in them if possible.
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state: &mut PeerState<_> = &mut *peer_state_lock;
for (channel_id, chan) in peer_state.channel_by_id.iter_mut() {
- let counterparty_node_id = chan.get_counterparty_node_id();
- let funding_txo = chan.get_funding_txo();
+ let counterparty_node_id = chan.context.get_counterparty_node_id();
+ let funding_txo = chan.context.get_funding_txo();
let (monitor_opt, holding_cell_failed_htlcs) =
- chan.maybe_free_holding_cell_htlcs(&self.logger);
+ chan.maybe_free_holding_cell_htlcs(&self.fee_estimator, &self.logger);
if !holding_cell_failed_htlcs.is_empty() {
failed_htlcs.push((holding_cell_failed_htlcs, *channel_id, counterparty_node_id));
}
if let Some(monitor_update) = monitor_opt {
has_monitor_update = true;
- let update_res = self.chain_monitor.update_channel(
- funding_txo.expect("channel is live"), monitor_update);
- let update_id = monitor_update.update_id;
let channel_id: [u8; 32] = *channel_id;
- let res = handle_new_monitor_update!(self, update_res, update_id,
+ let res = handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update,
peer_state_lock, peer_state, per_peer_state, chan, MANUALLY_REMOVING,
peer_state.channel_by_id.remove(&channel_id));
if res.is_err() {
if let Some(msg) = msg_opt {
has_update = true;
pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
- node_id: chan.get_counterparty_node_id(), msg,
+ node_id: chan.context.get_counterparty_node_id(), msg,
});
}
if let Some(tx) = tx_opt {
});
}
- self.issue_channel_close_events(chan, ClosureReason::CooperativeClosure);
+ self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure);
log_info!(self.logger, "Broadcasting {}", log_tx!(tx));
- self.tx_broadcaster.broadcast_transaction(&tx);
- update_maps_on_chan_removal!(self, chan);
+ self.tx_broadcaster.broadcast_transactions(&[&tx]);
+ update_maps_on_chan_removal!(self, &chan.context);
false
} else { true }
},
Err(e) => {
has_update = true;
let (close_channel, res) = convert_chan_err!(self, e, chan, channel_id);
- handle_errors.push((chan.get_counterparty_node_id(), Err(res)));
+ handle_errors.push((chan.context.get_counterparty_node_id(), Err(res)));
!close_channel
}
}
// Channel::force_shutdown tries to make us do) as we may still be in initialization,
// so we track the update internally and handle it when the user next calls
// timer_tick_occurred, guaranteeing we're running normally.
- if let Some((funding_txo, update)) = failure.0.take() {
+ if let Some((counterparty_node_id, funding_txo, update)) = failure.0.take() {
assert_eq!(update.updates.len(), 1);
if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] {
assert!(should_broadcast);
} else { unreachable!(); }
- self.pending_background_events.lock().unwrap().push(BackgroundEvent::ClosingMonitorUpdate((funding_txo, update)));
+ self.pending_background_events.lock().unwrap().push(
+ BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
+ counterparty_node_id, funding_txo, update
+ });
}
self.finish_force_close_channel(failure);
}
}
- fn set_payment_hash_secret_map(&self, payment_hash: PaymentHash, payment_preimage: Option<PaymentPreimage>, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32) -> Result<PaymentSecret, APIError> {
- assert!(invoice_expiry_delta_secs <= 60*60*24*365); // Sadly bitcoin timestamps are u32s, so panic before 2106
-
- if min_value_msat.is_some() && min_value_msat.unwrap() > MAX_VALUE_MSAT {
- return Err(APIError::APIMisuseError { err: format!("min_value_msat of {} greater than total 21 million bitcoin supply", min_value_msat.unwrap()) });
- }
-
- let payment_secret = PaymentSecret(self.entropy_source.get_secure_random_bytes());
-
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- let mut payment_secrets = self.pending_inbound_payments.lock().unwrap();
- match payment_secrets.entry(payment_hash) {
- hash_map::Entry::Vacant(e) => {
- e.insert(PendingInboundPayment {
- payment_secret, min_value_msat, payment_preimage,
- user_payment_id: 0, // For compatibility with version 0.0.103 and earlier
- // We assume that highest_seen_timestamp is pretty close to the current time -
- // it's updated when we receive a new block with the maximum time we've seen in
- // a header. It should never be more than two hours in the future.
- // Thus, we add two hours here as a buffer to ensure we absolutely
- // never fail a payment too early.
- // Note that we assume that received blocks have reasonably up-to-date
- // timestamps.
- expiry_time: self.highest_seen_timestamp.load(Ordering::Acquire) as u64 + invoice_expiry_delta_secs as u64 + 7200,
- });
- },
- hash_map::Entry::Occupied(_) => return Err(APIError::APIMisuseError { err: "Duplicate payment hash".to_owned() }),
- }
- Ok(payment_secret)
- }
-
/// Gets a payment secret and payment hash for use in an invoice given to a third party wishing
/// to pay us.
///
min_final_cltv_expiry_delta)
}
- /// Legacy version of [`create_inbound_payment`]. Use this method if you wish to share
- /// serialized state with LDK node(s) running 0.0.103 and earlier.
- ///
- /// May panic if `invoice_expiry_delta_secs` is greater than one year.
- ///
- /// # Note
- /// This method is deprecated and will be removed soon.
- ///
- /// [`create_inbound_payment`]: Self::create_inbound_payment
- #[deprecated]
- pub fn create_inbound_payment_legacy(&self, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32) -> Result<(PaymentHash, PaymentSecret), APIError> {
- let payment_preimage = PaymentPreimage(self.entropy_source.get_secure_random_bytes());
- let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
- let payment_secret = self.set_payment_hash_secret_map(payment_hash, Some(payment_preimage), min_value_msat, invoice_expiry_delta_secs)?;
- Ok((payment_hash, payment_secret))
- }
-
/// Gets a [`PaymentSecret`] for a given [`PaymentHash`], for which the payment preimage is
/// stored external to LDK.
///
min_final_cltv_expiry)
}
- /// Legacy version of [`create_inbound_payment_for_hash`]. Use this method if you wish to share
- /// serialized state with LDK node(s) running 0.0.103 and earlier.
- ///
- /// May panic if `invoice_expiry_delta_secs` is greater than one year.
- ///
- /// # Note
- /// This method is deprecated and will be removed soon.
- ///
- /// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash
- #[deprecated]
- pub fn create_inbound_payment_for_hash_legacy(&self, payment_hash: PaymentHash, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32) -> Result<PaymentSecret, APIError> {
- self.set_payment_hash_secret_map(payment_hash, None, min_value_msat, invoice_expiry_delta_secs)
- }
-
/// Gets an LDK-generated payment preimage from a payment hash and payment secret that were
/// previously returned from [`create_inbound_payment`].
///
/// Gets a fake short channel id for use in receiving [phantom node payments]. These fake scids
/// are used when constructing the phantom invoice's route hints.
///
- /// [phantom node payments]: crate::chain::keysinterface::PhantomKeysManager
+ /// [phantom node payments]: crate::sign::PhantomKeysManager
pub fn get_phantom_scid(&self) -> u64 {
let best_block_height = self.best_block.read().unwrap().height();
let short_to_chan_info = self.short_to_chan_info.read().unwrap();
/// Gets route hints for use in receiving [phantom node payments].
///
- /// [phantom node payments]: crate::chain::keysinterface::PhantomKeysManager
+ /// [phantom node payments]: crate::sign::PhantomKeysManager
pub fn get_phantom_route_hints(&self) -> PhantomRouteHints {
PhantomRouteHints {
channels: self.list_usable_channels(),
inflight_htlcs
}
- #[cfg(any(test, fuzzing, feature = "_test_utils"))]
+ #[cfg(any(test, feature = "_test_utils"))]
pub fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
let events = core::cell::RefCell::new(Vec::new());
let event_handler = |event: events::Event| events.borrow_mut().push(event);
#[cfg(feature = "_test_utils")]
pub fn push_pending_event(&self, event: events::Event) {
let mut events = self.pending_events.lock().unwrap();
- events.push(event);
+ events.push_back((event, None));
}
#[cfg(test)]
pub fn pop_pending_event(&self) -> Option<events::Event> {
let mut events = self.pending_events.lock().unwrap();
- if events.is_empty() { None } else { Some(events.remove(0)) }
+ events.pop_front().map(|(e, _)| e)
}
#[cfg(test)]
self.pending_outbound_payments.clear_pending_payments()
}
- /// Processes any events asynchronously in the order they were generated since the last call
- /// using the given event handler.
- ///
- /// See the trait-level documentation of [`EventsProvider`] for requirements.
- pub async fn process_pending_events_async<Future: core::future::Future, H: Fn(Event) -> Future>(
- &self, handler: H
- ) {
- // We'll acquire our total consistency lock until the returned future completes so that
- // we can be sure no other persists happen while processing events.
- let _read_guard = self.total_consistency_lock.read().unwrap();
+ /// When something which was blocking a channel from updating its [`ChannelMonitor`] (e.g. an
+ /// [`Event`] being handled) completes, this should be called to restore the channel to normal
+ /// operation. It will double-check that nothing *else* is also blocking the same channel from
+ /// making progress and then let any blocked [`ChannelMonitorUpdate`]s fly.
+ fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey, channel_funding_outpoint: OutPoint, mut completed_blocker: Option<RAAMonitorUpdateBlockingAction>) {
+ let mut errors = Vec::new();
+ loop {
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) {
+ let mut peer_state_lck = peer_state_mtx.lock().unwrap();
+ let peer_state = &mut *peer_state_lck;
+
+ if let Some(blocker) = completed_blocker.take() {
+ // Only do this on the first iteration of the loop.
+ if let Some(blockers) = peer_state.actions_blocking_raa_monitor_updates
+ .get_mut(&channel_funding_outpoint.to_channel_id())
+ {
+ blockers.retain(|iter| iter != &blocker);
+ }
+ }
- let mut result = NotifyOption::SkipPersist;
+ if self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates,
+ channel_funding_outpoint, counterparty_node_id) {
+ // Check that, while holding the peer lock, we don't have anything else
+ // blocking monitor updates for this channel. If we do, release the monitor
+ // update(s) when those blockers complete.
+ log_trace!(self.logger, "Delaying monitor unlock for channel {} as another channel's mon update needs to complete first",
+ log_bytes!(&channel_funding_outpoint.to_channel_id()[..]));
+ break;
+ }
- // TODO: This behavior should be documented. It's unintuitive that we query
- // ChannelMonitors when clearing other events.
- if self.process_pending_monitor_events() {
- result = NotifyOption::DoPersist;
+ if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(channel_funding_outpoint.to_channel_id()) {
+ debug_assert_eq!(chan.get().context.get_funding_txo().unwrap(), channel_funding_outpoint);
+ if let Some((monitor_update, further_update_exists)) = chan.get_mut().unblock_next_blocked_monitor_update() {
+ log_debug!(self.logger, "Unlocking monitor updating for channel {} and updating monitor",
+ log_bytes!(&channel_funding_outpoint.to_channel_id()[..]));
+ if let Err(e) = handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
+ peer_state_lck, peer_state, per_peer_state, chan)
+ {
+ errors.push((e, counterparty_node_id));
+ }
+ if further_update_exists {
+ // If there are more `ChannelMonitorUpdate`s to process, restart at the
+ // top of the loop.
+ continue;
+ }
+ } else {
+ log_trace!(self.logger, "Unlocked monitor updating for channel {} without monitors to update",
+ log_bytes!(&channel_funding_outpoint.to_channel_id()[..]));
+ }
+ }
+ } else {
+ log_debug!(self.logger,
+ "Got a release post-RAA monitor update for peer {} but the channel is gone",
+ log_pubkey!(counterparty_node_id));
+ }
+ break;
}
-
- let pending_events = mem::replace(&mut *self.pending_events.lock().unwrap(), vec![]);
- if !pending_events.is_empty() {
- result = NotifyOption::DoPersist;
+ for (err, counterparty_node_id) in errors {
+ let res = Err::<(), _>(err);
+ let _ = handle_error!(self, res, counterparty_node_id);
}
+ }
- for event in pending_events {
- handler(event).await;
+ fn handle_post_event_actions(&self, actions: Vec<EventCompletionAction>) {
+ for action in actions {
+ match action {
+ EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
+ channel_funding_outpoint, counterparty_node_id
+ } => {
+ self.handle_monitor_update_release(counterparty_node_id, channel_funding_outpoint, None);
+ }
+ }
}
+ }
- if result == NotifyOption::DoPersist {
- self.persistence_notifier.notify();
- }
+ /// Processes any events asynchronously in the order they were generated since the last call
+ /// using the given event handler.
+ ///
+ /// See the trait-level documentation of [`EventsProvider`] for requirements.
+ pub async fn process_pending_events_async<Future: core::future::Future, H: Fn(Event) -> Future>(
+ &self, handler: H
+ ) {
+ let mut ev;
+ process_events_body!(self, ev, { handler(ev).await });
}
}
fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
let events = RefCell::new(Vec::new());
PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
- let mut result = NotifyOption::SkipPersist;
+ let mut result = self.process_background_events();
// TODO: This behavior should be documented. It's unintuitive that we query
// ChannelMonitors when clearing other events.
/// An [`EventHandler`] may safely call back to the provider in order to handle an event.
/// However, it must not call [`Writeable::write`] as doing so would result in a deadlock.
fn process_pending_events<H: Deref>(&self, handler: H) where H::Target: EventHandler {
- PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
- let mut result = NotifyOption::SkipPersist;
-
- // TODO: This behavior should be documented. It's unintuitive that we query
- // ChannelMonitors when clearing other events.
- if self.process_pending_monitor_events() {
- result = NotifyOption::DoPersist;
- }
-
- let pending_events = mem::replace(&mut *self.pending_events.lock().unwrap(), vec![]);
- if !pending_events.is_empty() {
- result = NotifyOption::DoPersist;
- }
-
- for event in pending_events {
- handler.handle_event(event);
- }
-
- result
- });
+ let mut ev;
+ process_events_body!(self, ev, handler.handle_event(ev));
}
}
}
fn block_disconnected(&self, header: &BlockHeader, height: u32) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock,
+ &self.persistence_notifier, || -> NotifyOption { NotifyOption::DoPersist });
let new_height = height - 1;
{
let mut best_block = self.best_block.write().unwrap();
let block_hash = header.block_hash();
log_trace!(self.logger, "{} transactions included in block {} at height {} provided", txdata.len(), block_hash, height);
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock,
+ &self.persistence_notifier, || -> NotifyOption { NotifyOption::DoPersist });
self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger)
.map(|(a, b)| (a, Vec::new(), b)));
let block_hash = header.block_hash();
log_trace!(self.logger, "New best block: {} at height {}", block_hash, height);
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
-
+ let _persistence_guard = PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock,
+ &self.persistence_notifier, || -> NotifyOption { NotifyOption::DoPersist });
*self.best_block.write().unwrap() = BestBlock::new(block_hash, height);
self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger));
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
for chan in peer_state.channel_by_id.values() {
- if let (Some(funding_txo), Some(block_hash)) = (chan.get_funding_txo(), chan.get_funding_tx_confirmed_in()) {
+ if let (Some(funding_txo), Some(block_hash)) = (chan.context.get_funding_txo(), chan.context.get_funding_tx_confirmed_in()) {
res.push((funding_txo.txid, Some(block_hash)));
}
}
}
fn transaction_unconfirmed(&self, txid: &Txid) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock,
+ &self.persistence_notifier, || -> NotifyOption { NotifyOption::DoPersist });
self.do_chain_event(None, |channel| {
- if let Some(funding_txo) = channel.get_funding_txo() {
+ if let Some(funding_txo) = channel.context.get_funding_txo() {
if funding_txo.txid == *txid {
channel.funding_transaction_unconfirmed(&self.logger).map(|()| (None, Vec::new(), None))
} else { Ok((None, Vec::new(), None)) }
for (source, payment_hash) in timed_out_pending_htlcs.drain(..) {
let (failure_code, data) = self.get_htlc_inbound_temp_fail_err_and_data(0x1000|14 /* expiry_too_soon */, &channel);
timed_out_htlcs.push((source, payment_hash, HTLCFailReason::reason(failure_code, data),
- HTLCDestination::NextHopChannel { node_id: Some(channel.get_counterparty_node_id()), channel_id: channel.channel_id() }));
+ HTLCDestination::NextHopChannel { node_id: Some(channel.context.get_counterparty_node_id()), channel_id: channel.context.channel_id() }));
}
if let Some(channel_ready) = channel_ready_opt {
send_channel_ready!(self, pending_msg_events, channel, channel_ready);
- if channel.is_usable() {
- log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id()));
+ if channel.context.is_usable() {
+ log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.context.channel_id()));
if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
- node_id: channel.get_counterparty_node_id(),
+ node_id: channel.context.get_counterparty_node_id(),
msg,
});
}
} else {
- log_trace!(self.logger, "Sending channel_ready WITHOUT channel_update for {}", log_bytes!(channel.channel_id()));
+ log_trace!(self.logger, "Sending channel_ready WITHOUT channel_update for {}", log_bytes!(channel.context.channel_id()));
}
}
- emit_channel_ready_event!(self, channel);
+ {
+ let mut pending_events = self.pending_events.lock().unwrap();
+ emit_channel_ready_event!(pending_events, channel);
+ }
if let Some(announcement_sigs) = announcement_sigs {
- log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(channel.channel_id()));
+ log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(channel.context.channel_id()));
pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
- node_id: channel.get_counterparty_node_id(),
+ node_id: channel.context.get_counterparty_node_id(),
msg: announcement_sigs,
});
if let Some(height) = height_opt {
}
}
if channel.is_our_channel_ready() {
- if let Some(real_scid) = channel.get_short_channel_id() {
+ if let Some(real_scid) = channel.context.get_short_channel_id() {
// If we sent a 0conf channel_ready, and now have an SCID, we add it
// to the short_to_chan_info map here. Note that we check whether we
// can relay using the real SCID at relay-time (i.e.
// un-confirmed we force-close the channel, ensuring short_to_chan_info
// is always consistent.
let mut short_to_chan_info = self.short_to_chan_info.write().unwrap();
- let scid_insert = short_to_chan_info.insert(real_scid, (channel.get_counterparty_node_id(), channel.channel_id()));
- assert!(scid_insert.is_none() || scid_insert.unwrap() == (channel.get_counterparty_node_id(), channel.channel_id()),
+ let scid_insert = short_to_chan_info.insert(real_scid, (channel.context.get_counterparty_node_id(), channel.context.channel_id()));
+ assert!(scid_insert.is_none() || scid_insert.unwrap() == (channel.context.get_counterparty_node_id(), channel.context.channel_id()),
"SCIDs should never collide - ensure you weren't behind by a full {} blocks when creating channels",
fake_scid::MAX_SCID_BLOCKS_FROM_NOW);
}
}
} else if let Err(reason) = res {
- update_maps_on_chan_removal!(self, channel);
+ update_maps_on_chan_removal!(self, &channel.context);
// It looks like our counterparty went on-chain or funding transaction was
// reorged out of the main chain. Close the channel.
- failed_channels.push(channel.force_shutdown(true));
+ failed_channels.push(channel.context.force_shutdown(true));
if let Ok(update) = self.get_channel_update_for_broadcast(&channel) {
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
let reason_message = format!("{}", reason);
- self.issue_channel_close_events(channel, reason);
+ self.issue_channel_close_events(&channel.context, reason);
pending_msg_events.push(events::MessageSendEvent::HandleError {
- node_id: channel.get_counterparty_node_id(),
+ node_id: channel.context.get_counterparty_node_id(),
action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage {
- channel_id: channel.channel_id(),
+ channel_id: channel.context.channel_id(),
data: reason_message,
} },
});
}
if let Some(height) = height_opt {
- self.claimable_payments.lock().unwrap().claimable_htlcs.retain(|payment_hash, (_, htlcs)| {
- htlcs.retain(|htlc| {
+ self.claimable_payments.lock().unwrap().claimable_payments.retain(|payment_hash, payment| {
+ payment.htlcs.retain(|htlc| {
// If height is approaching the number of blocks we think it takes us to get
// our commitment transaction confirmed before the HTLC expires, plus the
// number of blocks we generally consider it to take to do a commitment update,
false
} else { true }
});
- !htlcs.is_empty() // Only retain this entry if htlcs has at least one entry.
+ !payment.htlcs.is_empty() // Only retain this entry if htlcs has at least one entry.
});
let mut intercepted_htlcs = self.pending_intercepted_htlcs.lock().unwrap();
}
}
- /// Blocks until ChannelManager needs to be persisted or a timeout is reached. It returns a bool
- /// indicating whether persistence is necessary. Only one listener on
- /// [`await_persistable_update`], [`await_persistable_update_timeout`], or a future returned by
- /// [`get_persistable_update_future`] is guaranteed to be woken up.
+ /// Gets a [`Future`] that completes when this [`ChannelManager`] needs to be persisted.
///
- /// Note that this method is not available with the `no-std` feature.
+ /// Note that callbacks registered on the [`Future`] MUST NOT call back into this
+ /// [`ChannelManager`] and should instead register actions to be taken later.
///
- /// [`await_persistable_update`]: Self::await_persistable_update
- /// [`await_persistable_update_timeout`]: Self::await_persistable_update_timeout
- /// [`get_persistable_update_future`]: Self::get_persistable_update_future
- #[cfg(any(test, feature = "std"))]
- pub fn await_persistable_update_timeout(&self, max_wait: Duration) -> bool {
- self.persistence_notifier.wait_timeout(max_wait)
- }
-
- /// Blocks until ChannelManager needs to be persisted. Only one listener on
- /// [`await_persistable_update`], `await_persistable_update_timeout`, or a future returned by
- /// [`get_persistable_update_future`] is guaranteed to be woken up.
- ///
- /// [`await_persistable_update`]: Self::await_persistable_update
- /// [`get_persistable_update_future`]: Self::get_persistable_update_future
- pub fn await_persistable_update(&self) {
- self.persistence_notifier.wait()
- }
-
- /// Gets a [`Future`] that completes when a persistable update is available. Note that
- /// callbacks registered on the [`Future`] MUST NOT call back into this [`ChannelManager`] and
- /// should instead register actions to be taken later.
pub fn get_persistable_update_future(&self) -> Future {
self.persistence_notifier.get_future()
}
L::Target: Logger,
{
fn handle_open_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannel) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let _ = handle_error!(self, self.internal_open_channel(counterparty_node_id, msg), *counterparty_node_id);
}
+ fn handle_open_channel_v2(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannelV2) {
+ let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+ "Dual-funded channels not supported".to_owned(),
+ msg.temporary_channel_id.clone())), *counterparty_node_id);
+ }
+
fn handle_accept_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannel) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let _ = handle_error!(self, self.internal_accept_channel(counterparty_node_id, msg), *counterparty_node_id);
}
+ fn handle_accept_channel_v2(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannelV2) {
+ let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+ "Dual-funded channels not supported".to_owned(),
+ msg.temporary_channel_id.clone())), *counterparty_node_id);
+ }
+
fn handle_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let _ = handle_error!(self, self.internal_funding_created(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let _ = handle_error!(self, self.internal_funding_signed(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_channel_ready(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReady) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let _ = handle_error!(self, self.internal_channel_ready(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_shutdown(&self, counterparty_node_id: &PublicKey, msg: &msgs::Shutdown) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let _ = handle_error!(self, self.internal_shutdown(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let _ = handle_error!(self, self.internal_closing_signed(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_update_add_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let _ = handle_error!(self, self.internal_update_add_htlc(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let _ = handle_error!(self, self.internal_update_fulfill_htlc(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_update_fail_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let _ = handle_error!(self, self.internal_update_fail_htlc(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_update_fail_malformed_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let _ = handle_error!(self, self.internal_update_fail_malformed_htlc(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_commitment_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::CommitmentSigned) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let _ = handle_error!(self, self.internal_commitment_signed(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_revoke_and_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::RevokeAndACK) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let _ = handle_error!(self, self.internal_revoke_and_ack(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_update_fee(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFee) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let _ = handle_error!(self, self.internal_update_fee(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_announcement_signatures(&self, counterparty_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let _ = handle_error!(self, self.internal_announcement_signatures(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) {
PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
+ let force_persist = self.process_background_events();
if let Ok(persist) = handle_error!(self, self.internal_channel_update(counterparty_node_id, msg), *counterparty_node_id) {
- persist
+ if force_persist == NotifyOption::DoPersist { NotifyOption::DoPersist } else { persist }
} else {
NotifyOption::SkipPersist
}
}
fn handle_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let _ = handle_error!(self, self.internal_channel_reestablish(counterparty_node_id, msg), *counterparty_node_id);
}
fn peer_disconnected(&self, counterparty_node_id: &PublicKey) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let mut failed_channels = Vec::new();
let mut per_peer_state = self.per_peer_state.write().unwrap();
let remove_peer = {
peer_state.channel_by_id.retain(|_, chan| {
chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
if chan.is_shutdown() {
- update_maps_on_chan_removal!(self, chan);
- self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer);
+ update_maps_on_chan_removal!(self, &chan.context);
+ self.issue_channel_close_events(&chan.context, ClosureReason::DisconnectedPeer);
return false;
}
true
});
+ peer_state.inbound_v1_channel_by_id.retain(|_, chan| {
+ update_maps_on_chan_removal!(self, &chan.context);
+ self.issue_channel_close_events(&chan.context, ClosureReason::DisconnectedPeer);
+ false
+ });
+ peer_state.outbound_v1_channel_by_id.retain(|_, chan| {
+ update_maps_on_chan_removal!(self, &chan.context);
+ self.issue_channel_close_events(&chan.context, ClosureReason::DisconnectedPeer);
+ false
+ });
pending_msg_events.retain(|msg| {
match msg {
+ // V1 Channel Establishment
&events::MessageSendEvent::SendAcceptChannel { .. } => false,
&events::MessageSendEvent::SendOpenChannel { .. } => false,
&events::MessageSendEvent::SendFundingCreated { .. } => false,
&events::MessageSendEvent::SendFundingSigned { .. } => false,
+ // V2 Channel Establishment
+ &events::MessageSendEvent::SendAcceptChannelV2 { .. } => false,
+ &events::MessageSendEvent::SendOpenChannelV2 { .. } => false,
+ // Common Channel Establishment
&events::MessageSendEvent::SendChannelReady { .. } => false,
&events::MessageSendEvent::SendAnnouncementSignatures { .. } => false,
+ // Interactive Transaction Construction
+ &events::MessageSendEvent::SendTxAddInput { .. } => false,
+ &events::MessageSendEvent::SendTxAddOutput { .. } => false,
+ &events::MessageSendEvent::SendTxRemoveInput { .. } => false,
+ &events::MessageSendEvent::SendTxRemoveOutput { .. } => false,
+ &events::MessageSendEvent::SendTxComplete { .. } => false,
+ &events::MessageSendEvent::SendTxSignatures { .. } => false,
+ &events::MessageSendEvent::SendTxInitRbf { .. } => false,
+ &events::MessageSendEvent::SendTxAckRbf { .. } => false,
+ &events::MessageSendEvent::SendTxAbort { .. } => false,
+ // Channel Operations
&events::MessageSendEvent::UpdateHTLCs { .. } => false,
&events::MessageSendEvent::SendRevokeAndACK { .. } => false,
&events::MessageSendEvent::SendClosingSigned { .. } => false,
&events::MessageSendEvent::SendShutdown { .. } => false,
&events::MessageSendEvent::SendChannelReestablish { .. } => false,
+ &events::MessageSendEvent::HandleError { .. } => false,
+ // Gossip
&events::MessageSendEvent::SendChannelAnnouncement { .. } => false,
&events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true,
&events::MessageSendEvent::BroadcastChannelUpdate { .. } => true,
&events::MessageSendEvent::BroadcastNodeAnnouncement { .. } => true,
&events::MessageSendEvent::SendChannelUpdate { .. } => false,
- &events::MessageSendEvent::HandleError { .. } => false,
&events::MessageSendEvent::SendChannelRangeQuery { .. } => false,
&events::MessageSendEvent::SendShortIdsQuery { .. } => false,
&events::MessageSendEvent::SendReplyChannelRange { .. } => false,
return Err(());
}
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
// If we have too many peers connected which don't have funded channels, disconnect the
// peer immediately (as long as it doesn't have funded channels). If we have a bunch of
}
e.insert(Mutex::new(PeerState {
channel_by_id: HashMap::new(),
+ outbound_v1_channel_by_id: HashMap::new(),
+ inbound_v1_channel_by_id: HashMap::new(),
latest_features: init_msg.features.clone(),
pending_msg_events: Vec::new(),
+ in_flight_monitor_updates: BTreeMap::new(),
monitor_update_blocked_actions: BTreeMap::new(),
+ actions_blocking_raa_monitor_updates: BTreeMap::new(),
is_connected: true,
}));
},
let peer_state = &mut *peer_state_lock;
let pending_msg_events = &mut peer_state.pending_msg_events;
peer_state.channel_by_id.retain(|_, chan| {
- let retain = if chan.get_counterparty_node_id() == *counterparty_node_id {
- if !chan.have_received_message() {
+ let retain = if chan.context.get_counterparty_node_id() == *counterparty_node_id {
+ if !chan.context.have_received_message() {
// If we created this (outbound) channel while we were disconnected from the
// peer we probably failed to send the open_channel message, which is now
// lost. We can't have had anything pending related to this channel, so we just
false
} else {
pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
- node_id: chan.get_counterparty_node_id(),
+ node_id: chan.context.get_counterparty_node_id(),
msg: chan.get_channel_reestablish(&self.logger),
});
true
}
} else { true };
- if retain && chan.get_counterparty_node_id() != *counterparty_node_id {
+ if retain && chan.context.get_counterparty_node_id() != *counterparty_node_id {
if let Some(msg) = chan.get_signed_channel_announcement(&self.node_signer, self.genesis_hash.clone(), self.best_block.read().unwrap().height(), &self.default_configuration) {
if let Ok(update_msg) = self.get_channel_update_for_broadcast(chan) {
pending_msg_events.push(events::MessageSendEvent::SendChannelAnnouncement {
}
fn handle_error(&self, counterparty_node_id: &PublicKey, msg: &msgs::ErrorMessage) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
if msg.channel_id == [0; 32] {
let channel_ids: Vec<[u8; 32]> = {
if peer_state_mutex_opt.is_none() { return; }
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
- peer_state.channel_by_id.keys().cloned().collect()
+ peer_state.channel_by_id.keys().cloned()
+ .chain(peer_state.outbound_v1_channel_by_id.keys().cloned())
+ .chain(peer_state.inbound_v1_channel_by_id.keys().cloned()).collect()
};
for channel_id in channel_ids {
// Untrusted messages from peer, we throw away the error if id points to a non-existent channel
if peer_state_mutex_opt.is_none() { return; }
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
- if let Some(chan) = peer_state.channel_by_id.get_mut(&msg.channel_id) {
+ if let Some(chan) = peer_state.outbound_v1_channel_by_id.get_mut(&msg.channel_id) {
if let Ok(msg) = chan.maybe_handle_error_without_close(self.genesis_hash) {
peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
node_id: *counterparty_node_id,
fn provided_init_features(&self, _their_init_features: &PublicKey) -> InitFeatures {
provided_init_features(&self.default_configuration)
}
+
+ fn get_genesis_hashes(&self) -> Option<Vec<ChainHash>> {
+ Some(vec![ChainHash::from(&self.genesis_hash[..])])
+ }
+
+ fn handle_tx_add_input(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxAddInput) {
+ let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+ "Dual-funded channels not supported".to_owned(),
+ msg.channel_id.clone())), *counterparty_node_id);
+ }
+
+ fn handle_tx_add_output(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxAddOutput) {
+ let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+ "Dual-funded channels not supported".to_owned(),
+ msg.channel_id.clone())), *counterparty_node_id);
+ }
+
+ fn handle_tx_remove_input(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxRemoveInput) {
+ let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+ "Dual-funded channels not supported".to_owned(),
+ msg.channel_id.clone())), *counterparty_node_id);
+ }
+
+ fn handle_tx_remove_output(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxRemoveOutput) {
+ let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+ "Dual-funded channels not supported".to_owned(),
+ msg.channel_id.clone())), *counterparty_node_id);
+ }
+
+ fn handle_tx_complete(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxComplete) {
+ let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+ "Dual-funded channels not supported".to_owned(),
+ msg.channel_id.clone())), *counterparty_node_id);
+ }
+
+ fn handle_tx_signatures(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxSignatures) {
+ let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+ "Dual-funded channels not supported".to_owned(),
+ msg.channel_id.clone())), *counterparty_node_id);
+ }
+
+ fn handle_tx_init_rbf(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxInitRbf) {
+ let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+ "Dual-funded channels not supported".to_owned(),
+ msg.channel_id.clone())), *counterparty_node_id);
+ }
+
+ fn handle_tx_ack_rbf(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxAckRbf) {
+ let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+ "Dual-funded channels not supported".to_owned(),
+ msg.channel_id.clone())), *counterparty_node_id);
+ }
+
+ fn handle_tx_abort(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxAbort) {
+ let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+ "Dual-funded channels not supported".to_owned(),
+ msg.channel_id.clone())), *counterparty_node_id);
+ }
}
/// Fetches the set of [`NodeFeatures`] flags which are provided by or required by
/// Fetches the set of [`InitFeatures`] flags which are provided by or required by
/// [`ChannelManager`].
-pub fn provided_init_features(_config: &UserConfig) -> InitFeatures {
+pub fn provided_init_features(config: &UserConfig) -> InitFeatures {
// Note that if new features are added here which other peers may (eventually) require, we
- // should also add the corresponding (optional) bit to the ChannelMessageHandler impl for
- // ErroringMessageHandler.
+ // should also add the corresponding (optional) bit to the [`ChannelMessageHandler`] impl for
+ // [`ErroringMessageHandler`].
let mut features = InitFeatures::empty();
- features.set_data_loss_protect_optional();
+ features.set_data_loss_protect_required();
features.set_upfront_shutdown_script_optional();
features.set_variable_length_onion_required();
features.set_static_remote_key_required();
features.set_channel_type_optional();
features.set_scid_privacy_optional();
features.set_zero_conf_optional();
- #[cfg(anchors)]
- { // Attributes are not allowed on if expressions on our current MSRV of 1.41.
- if _config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx {
- features.set_anchors_zero_fee_htlc_tx_optional();
- }
+ if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx {
+ features.set_anchors_zero_fee_htlc_tx_optional();
}
features
}
(14, user_channel_id_low, required),
(16, self.balance_msat, required),
(18, self.outbound_capacity_msat, required),
- // Note that by the time we get past the required read above, outbound_capacity_msat will be
- // filled in, so we can safely unwrap it here.
- (19, self.next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat.0.unwrap() as u64)),
+ (19, self.next_outbound_htlc_limit_msat, required),
(20, self.inbound_capacity_msat, required),
+ (21, self.next_outbound_htlc_minimum_msat, required),
(22, self.confirmations_required, option),
(24, self.force_close_spend_delay, option),
(26, self.is_outbound, required),
(33, self.inbound_htlc_minimum_msat, option),
(35, self.inbound_htlc_maximum_msat, option),
(37, user_channel_id_high_opt, option),
+ (39, self.feerate_sat_per_1000_weight, option),
+ (41, self.channel_shutdown_state, option),
});
Ok(())
}
// filled in, so we can safely unwrap it here.
(19, next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat.0.unwrap() as u64)),
(20, inbound_capacity_msat, required),
+ (21, next_outbound_htlc_minimum_msat, (default_value, 0)),
(22, confirmations_required, option),
(24, force_close_spend_delay, option),
(26, is_outbound, required),
(33, inbound_htlc_minimum_msat, option),
(35, inbound_htlc_maximum_msat, option),
(37, user_channel_id_high_opt, option),
+ (39, feerate_sat_per_1000_weight, option),
+ (41, channel_shutdown_state, option),
});
// `user_channel_id` used to be a single u64 value. In order to remain backwards compatible with
balance_msat: balance_msat.0.unwrap(),
outbound_capacity_msat: outbound_capacity_msat.0.unwrap(),
next_outbound_htlc_limit_msat: next_outbound_htlc_limit_msat.0.unwrap(),
+ next_outbound_htlc_minimum_msat: next_outbound_htlc_minimum_msat.0.unwrap(),
inbound_capacity_msat: inbound_capacity_msat.0.unwrap(),
confirmations_required,
confirmations,
is_public: is_public.0.unwrap(),
inbound_htlc_minimum_msat,
inbound_htlc_maximum_msat,
+ feerate_sat_per_1000_weight,
+ channel_shutdown_state,
})
}
}
impl_writeable_tlv_based!(PhantomRouteHints, {
- (2, channels, vec_type),
+ (2, channels, required_vec),
(4, phantom_scid, required),
(6, real_node_pubkey, required),
});
(0, payment_data, required),
(1, phantom_shared_secret, option),
(2, incoming_cltv_expiry, required),
+ (3, payment_metadata, option),
},
(2, ReceiveKeysend) => {
(0, payment_preimage, required),
(2, incoming_cltv_expiry, required),
+ (3, payment_metadata, option),
+ (4, payment_data, option), // Added in 0.0.116
},
;);
(6, outgoing_amt_msat, required),
(8, outgoing_cltv_value, required),
(9, incoming_amt_msat, option),
+ (10, skimmed_fee_msat, option),
});
(0, self.prev_hop, required),
(1, self.total_msat, required),
(2, self.value, required),
+ (3, self.sender_intended_value, required),
(4, payment_data, option),
+ (5, self.total_value_received, option),
(6, self.cltv_expiry, required),
(8, keysend_preimage, option),
+ (10, self.counterparty_skimmed_fee_msat, option),
});
Ok(())
}
impl Readable for ClaimableHTLC {
fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
- let mut prev_hop = crate::util::ser::RequiredWrapper(None);
- let mut value = 0;
- let mut payment_data: Option<msgs::FinalOnionHopData> = None;
- let mut cltv_expiry = 0;
- let mut total_msat = None;
- let mut keysend_preimage: Option<PaymentPreimage> = None;
- read_tlv_fields!(reader, {
+ _init_and_read_tlv_fields!(reader, {
(0, prev_hop, required),
(1, total_msat, option),
- (2, value, required),
- (4, payment_data, option),
+ (2, value_ser, required),
+ (3, sender_intended_value, option),
+ (4, payment_data_opt, option),
+ (5, total_value_received, option),
(6, cltv_expiry, required),
- (8, keysend_preimage, option)
+ (8, keysend_preimage, option),
+ (10, counterparty_skimmed_fee_msat, option),
});
+ let payment_data: Option<msgs::FinalOnionHopData> = payment_data_opt;
+ let value = value_ser.0.unwrap();
let onion_payload = match keysend_preimage {
Some(p) => {
if payment_data.is_some() {
prev_hop: prev_hop.0.unwrap(),
timer_ticks: 0,
value,
+ sender_intended_value: sender_intended_value.unwrap_or(value),
+ total_value_received,
total_msat: total_msat.unwrap(),
onion_payload,
- cltv_expiry,
+ cltv_expiry: cltv_expiry.0.unwrap(),
+ counterparty_skimmed_fee_msat,
})
}
}
0 => {
let mut session_priv: crate::util::ser::RequiredWrapper<SecretKey> = crate::util::ser::RequiredWrapper(None);
let mut first_hop_htlc_msat: u64 = 0;
- let mut path = Some(Vec::new());
+ let mut path_hops = Vec::new();
let mut payment_id = None;
- let mut payment_secret = None;
- let mut payment_params = None;
+ let mut payment_params: Option<PaymentParameters> = None;
+ let mut blinded_tail: Option<BlindedTail> = None;
read_tlv_fields!(reader, {
(0, session_priv, required),
(1, payment_id, option),
(2, first_hop_htlc_msat, required),
- (3, payment_secret, option),
- (4, path, vec_type),
- (5, payment_params, option),
+ (4, path_hops, required_vec),
+ (5, payment_params, (option: ReadableArgs, 0)),
+ (6, blinded_tail, option),
});
if payment_id.is_none() {
// For backwards compat, if there was no payment_id written, use the session_priv bytes
// instead.
payment_id = Some(PaymentId(*session_priv.0.unwrap().as_ref()));
}
+ let path = Path { hops: path_hops, blinded_tail };
+ if path.hops.len() == 0 {
+ return Err(DecodeError::InvalidValue);
+ }
+ if let Some(params) = payment_params.as_mut() {
+ if let Payee::Clear { ref mut final_cltv_expiry_delta, .. } = params.payee {
+ if final_cltv_expiry_delta == &0 {
+ *final_cltv_expiry_delta = path.final_cltv_expiry_delta().ok_or(DecodeError::InvalidValue)?;
+ }
+ }
+ }
Ok(HTLCSource::OutboundRoute {
session_priv: session_priv.0.unwrap(),
first_hop_htlc_msat,
- path: path.unwrap(),
+ path,
payment_id: payment_id.unwrap(),
- payment_secret,
- payment_params,
})
}
1 => Ok(HTLCSource::PreviousHopData(Readable::read(reader)?)),
impl Writeable for HTLCSource {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), crate::io::Error> {
match self {
- HTLCSource::OutboundRoute { ref session_priv, ref first_hop_htlc_msat, ref path, payment_id, payment_secret, payment_params } => {
+ HTLCSource::OutboundRoute { ref session_priv, ref first_hop_htlc_msat, ref path, payment_id } => {
0u8.write(writer)?;
let payment_id_opt = Some(payment_id);
write_tlv_fields!(writer, {
(0, session_priv, required),
(1, payment_id_opt, option),
(2, first_hop_htlc_msat, required),
- (3, payment_secret, option),
- (4, *path, vec_type),
- (5, payment_params, option),
+ // 3 was previously used to write a PaymentSecret for the payment.
+ (4, path.hops, required_vec),
+ (5, None::<PaymentParameters>, option), // payment_params in LDK versions prior to 0.0.115
+ (6, path.blinded_tail, option),
});
}
HTLCSource::PreviousHopData(ref field) => {
}
number_of_channels += peer_state.channel_by_id.len();
for (_, channel) in peer_state.channel_by_id.iter() {
- if !channel.is_funding_initiated() {
+ if !channel.context.is_funding_initiated() {
unfunded_channels += 1;
}
}
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
for (_, channel) in peer_state.channel_by_id.iter() {
- if channel.is_funding_initiated() {
+ if channel.context.is_funding_initiated() {
channel.write(writer)?;
}
}
let pending_outbound_payments = self.pending_outbound_payments.pending_outbound_payments.lock().unwrap();
let mut htlc_purposes: Vec<&events::PaymentPurpose> = Vec::new();
- (claimable_payments.claimable_htlcs.len() as u64).write(writer)?;
- for (payment_hash, (purpose, previous_hops)) in claimable_payments.claimable_htlcs.iter() {
+ let mut htlc_onion_fields: Vec<&_> = Vec::new();
+ (claimable_payments.claimable_payments.len() as u64).write(writer)?;
+ for (payment_hash, payment) in claimable_payments.claimable_payments.iter() {
payment_hash.write(writer)?;
- (previous_hops.len() as u64).write(writer)?;
- for htlc in previous_hops.iter() {
+ (payment.htlcs.len() as u64).write(writer)?;
+ for htlc in payment.htlcs.iter() {
htlc.write(writer)?;
}
- htlc_purposes.push(purpose);
+ htlc_purposes.push(&payment.purpose);
+ htlc_onion_fields.push(&payment.onion_fields);
}
let mut monitor_update_blocked_actions_per_peer = None;
}
let events = self.pending_events.lock().unwrap();
- (events.len() as u64).write(writer)?;
- for event in events.iter() {
- event.write(writer)?;
- }
-
- let background_events = self.pending_background_events.lock().unwrap();
- (background_events.len() as u64).write(writer)?;
- for event in background_events.iter() {
- match event {
- BackgroundEvent::ClosingMonitorUpdate((funding_txo, monitor_update)) => {
- 0u8.write(writer)?;
- funding_txo.write(writer)?;
- monitor_update.write(writer)?;
- },
+ // LDK versions prior to 0.0.115 don't support post-event actions, thus if there's no
+ // actions at all, skip writing the required TLV. Otherwise, pre-0.0.115 versions will
+ // refuse to read the new ChannelManager.
+ let events_not_backwards_compatible = events.iter().any(|(_, action)| action.is_some());
+ if events_not_backwards_compatible {
+ // If we're gonna write a even TLV that will overwrite our events anyway we might as
+ // well save the space and not write any events here.
+ 0u64.write(writer)?;
+ } else {
+ (events.len() as u64).write(writer)?;
+ for (event, _) in events.iter() {
+ event.write(writer)?;
}
}
+ // LDK versions prior to 0.0.116 wrote the `pending_background_events`
+ // `MonitorUpdateRegeneratedOnStartup`s here, however there was never a reason to do so -
+ // the closing monitor updates were always effectively replayed on startup (either directly
+ // by calling `broadcast_latest_holder_commitment_txn` on a `ChannelMonitor` during
+ // deserialization or, in 0.0.115, by regenerating the monitor update itself).
+ 0u64.write(writer)?;
+
// Prior to 0.0.111 we tracked node_announcement serials here, however that now happens in
// `PeerManager`, and thus we simply write the `highest_seen_timestamp` twice, which is
// likely to be identical.
pending_claiming_payments = None;
}
+ let mut in_flight_monitor_updates: Option<HashMap<(&PublicKey, &OutPoint), &Vec<ChannelMonitorUpdate>>> = None;
+ for ((counterparty_id, _), peer_state) in per_peer_state.iter().zip(peer_states.iter()) {
+ for (funding_outpoint, updates) in peer_state.in_flight_monitor_updates.iter() {
+ if !updates.is_empty() {
+ if in_flight_monitor_updates.is_none() { in_flight_monitor_updates = Some(HashMap::new()); }
+ in_flight_monitor_updates.as_mut().unwrap().insert((counterparty_id, funding_outpoint), updates);
+ }
+ }
+ }
+
write_tlv_fields!(writer, {
(1, pending_outbound_payments_no_retry, required),
(2, pending_intercepted_htlcs, option),
(5, self.our_network_pubkey, required),
(6, monitor_update_blocked_actions_per_peer, option),
(7, self.fake_scid_rand_bytes, required),
- (9, htlc_purposes, vec_type),
+ (8, if events_not_backwards_compatible { Some(&*events) } else { None }, option),
+ (9, htlc_purposes, required_vec),
+ (10, in_flight_monitor_updates, option),
(11, self.probing_cookie_secret, required),
+ (13, htlc_onion_fields, optional_vec),
});
Ok(())
}
}
+impl Writeable for VecDeque<(Event, Option<EventCompletionAction>)> {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+ (self.len() as u64).write(w)?;
+ for (event, action) in self.iter() {
+ event.write(w)?;
+ action.write(w)?;
+ #[cfg(debug_assertions)] {
+ // Events are MaybeReadable, in some cases indicating that they shouldn't actually
+ // be persisted and are regenerated on restart. However, if such an event has a
+ // post-event-handling action we'll write nothing for the event and would have to
+ // either forget the action or fail on deserialization (which we do below). Thus,
+ // check that the event is sane here.
+ let event_encoded = event.encode();
+ let event_read: Option<Event> =
+ MaybeReadable::read(&mut &event_encoded[..]).unwrap();
+ if action.is_some() { assert!(event_read.is_some()); }
+ }
+ }
+ Ok(())
+ }
+}
+impl Readable for VecDeque<(Event, Option<EventCompletionAction>)> {
+ fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
+ let len: u64 = Readable::read(reader)?;
+ const MAX_ALLOC_SIZE: u64 = 1024 * 16;
+ let mut events: Self = VecDeque::with_capacity(cmp::min(
+ MAX_ALLOC_SIZE/mem::size_of::<(events::Event, Option<EventCompletionAction>)>() as u64,
+ len) as usize);
+ for _ in 0..len {
+ let ev_opt = MaybeReadable::read(reader)?;
+ let action = Readable::read(reader)?;
+ if let Some(ev) = ev_opt {
+ events.push_back((ev, action));
+ } else if action.is_some() {
+ return Err(DecodeError::InvalidValue);
+ }
+ }
+ Ok(events)
+ }
+}
+
+impl_writeable_tlv_based_enum!(ChannelShutdownState,
+ (0, NotShuttingDown) => {},
+ (2, ShutdownInitiated) => {},
+ (4, ResolvingHTLCs) => {},
+ (6, NegotiatingClosingFee) => {},
+ (8, ShutdownComplete) => {}, ;
+);
+
/// Arguments for the creation of a ChannelManager that are not deserialized.
///
/// At a high-level, the process for deserializing a ChannelManager and resuming normal operation
pub default_config: UserConfig,
/// A map from channel funding outpoints to ChannelMonitors for those channels (ie
- /// value.get_funding_txo() should be the key).
+ /// value.context.get_funding_txo() should be the key).
///
/// If a monitor is inconsistent with the channel state during deserialization the channel will
/// be force-closed using the data in the ChannelMonitor and the channel will be dropped. This
/// In such cases the latest local transactions will be sent to the tx_broadcaster included in
/// this struct.
///
- /// (C-not exported) because we have no HashMap bindings
+ /// This is not exported to bindings users because we have no HashMap bindings
pub channel_monitors: HashMap<OutPoint, &'a mut ChannelMonitor<<SP::Target as SignerProvider>::Signer>>,
}
let mut peer_channels: HashMap<PublicKey, HashMap<[u8; 32], Channel<<SP::Target as SignerProvider>::Signer>>> = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
let mut id_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
- let mut channel_closures = Vec::new();
+ let mut channel_closures = VecDeque::new();
+ let mut close_background_events = Vec::new();
for _ in 0..channel_count {
let mut channel: Channel<<SP::Target as SignerProvider>::Signer> = Channel::read(reader, (
&args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config)
))?;
- let funding_txo = channel.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
+ let funding_txo = channel.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
funding_txo_set.insert(funding_txo.clone());
if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) {
- if channel.get_cur_holder_commitment_transaction_number() < monitor.get_cur_holder_commitment_number() ||
- channel.get_revoked_counterparty_commitment_transaction_number() < monitor.get_min_seen_secret() ||
- channel.get_cur_counterparty_commitment_transaction_number() < monitor.get_cur_counterparty_commitment_number() ||
- channel.get_latest_monitor_update_id() > monitor.get_latest_update_id() {
- // If the channel is ahead of the monitor, return InvalidValue:
- log_error!(args.logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!");
- log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
- log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id());
- log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
- log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
- log_error!(args.logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
- log_error!(args.logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
- return Err(DecodeError::InvalidValue);
- } else if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() ||
+ if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() ||
channel.get_revoked_counterparty_commitment_transaction_number() > monitor.get_min_seen_secret() ||
channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() ||
- channel.get_latest_monitor_update_id() < monitor.get_latest_update_id() {
+ channel.context.get_latest_monitor_update_id() < monitor.get_latest_update_id() {
// But if the channel is behind of the monitor, close the channel:
log_error!(args.logger, "A ChannelManager is stale compared to the current ChannelMonitor!");
log_error!(args.logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast.");
log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
- log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id());
- let (_, mut new_failed_htlcs) = channel.force_shutdown(true);
+ log_bytes!(channel.context.channel_id()), monitor.get_latest_update_id(), channel.context.get_latest_monitor_update_id());
+ let (monitor_update, mut new_failed_htlcs) = channel.context.force_shutdown(true);
+ if let Some((counterparty_node_id, funding_txo, update)) = monitor_update {
+ close_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
+ counterparty_node_id, funding_txo, update
+ });
+ }
failed_htlcs.append(&mut new_failed_htlcs);
- monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger);
- channel_closures.push(events::Event::ChannelClosed {
- channel_id: channel.channel_id(),
- user_channel_id: channel.get_user_id(),
+ channel_closures.push_back((events::Event::ChannelClosed {
+ channel_id: channel.context.channel_id(),
+ user_channel_id: channel.context.get_user_id(),
reason: ClosureReason::OutdatedChannelManager
- });
+ }, None));
for (channel_htlc_source, payment_hash) in channel.inflight_htlc_sources() {
let mut found_htlc = false;
for (monitor_htlc_source, _) in monitor.get_all_current_outbound_htlcs() {
// backwards leg of the HTLC will simply be rejected.
log_info!(args.logger,
"Failing HTLC with hash {} as it is missing in the ChannelMonitor for channel {} but was present in the (stale) ChannelManager",
- log_bytes!(channel.channel_id()), log_bytes!(payment_hash.0));
- failed_htlcs.push((channel_htlc_source.clone(), *payment_hash, channel.get_counterparty_node_id(), channel.channel_id()));
+ log_bytes!(channel.context.channel_id()), log_bytes!(payment_hash.0));
+ failed_htlcs.push((channel_htlc_source.clone(), *payment_hash, channel.context.get_counterparty_node_id(), channel.context.channel_id()));
}
}
} else {
- log_info!(args.logger, "Successfully loaded channel {}", log_bytes!(channel.channel_id()));
- if let Some(short_channel_id) = channel.get_short_channel_id() {
- short_to_chan_info.insert(short_channel_id, (channel.get_counterparty_node_id(), channel.channel_id()));
+ log_info!(args.logger, "Successfully loaded channel {} at update_id {} against monitor at update id {}",
+ log_bytes!(channel.context.channel_id()), channel.context.get_latest_monitor_update_id(),
+ monitor.get_latest_update_id());
+ if let Some(short_channel_id) = channel.context.get_short_channel_id() {
+ short_to_chan_info.insert(short_channel_id, (channel.context.get_counterparty_node_id(), channel.context.channel_id()));
}
- if channel.is_funding_initiated() {
- id_to_peer.insert(channel.channel_id(), channel.get_counterparty_node_id());
+ if channel.context.is_funding_initiated() {
+ id_to_peer.insert(channel.context.channel_id(), channel.context.get_counterparty_node_id());
}
- match peer_channels.entry(channel.get_counterparty_node_id()) {
+ match peer_channels.entry(channel.context.get_counterparty_node_id()) {
hash_map::Entry::Occupied(mut entry) => {
let by_id_map = entry.get_mut();
- by_id_map.insert(channel.channel_id(), channel);
+ by_id_map.insert(channel.context.channel_id(), channel);
},
hash_map::Entry::Vacant(entry) => {
let mut by_id_map = HashMap::new();
- by_id_map.insert(channel.channel_id(), channel);
+ by_id_map.insert(channel.context.channel_id(), channel);
entry.insert(by_id_map);
}
}
// If we were persisted and shut down while the initial ChannelMonitor persistence
// was in-progress, we never broadcasted the funding transaction and can still
// safely discard the channel.
- let _ = channel.force_shutdown(false);
- channel_closures.push(events::Event::ChannelClosed {
- channel_id: channel.channel_id(),
- user_channel_id: channel.get_user_id(),
+ let _ = channel.context.force_shutdown(false);
+ channel_closures.push_back((events::Event::ChannelClosed {
+ channel_id: channel.context.channel_id(),
+ user_channel_id: channel.context.get_user_id(),
reason: ClosureReason::DisconnectedPeer,
- });
+ }, None));
} else {
- log_error!(args.logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", log_bytes!(channel.channel_id()));
+ log_error!(args.logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", log_bytes!(channel.context.channel_id()));
log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
log_error!(args.logger, " Without the ChannelMonitor we cannot continue without risking funds.");
}
}
- for (funding_txo, monitor) in args.channel_monitors.iter_mut() {
+ for (funding_txo, _) in args.channel_monitors.iter() {
if !funding_txo_set.contains(funding_txo) {
- log_info!(args.logger, "Broadcasting latest holder commitment transaction for closed channel {}", log_bytes!(funding_txo.to_channel_id()));
- monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger);
+ log_info!(args.logger, "Queueing monitor update to ensure missing channel {} is force closed",
+ log_bytes!(funding_txo.to_channel_id()));
+ let monitor_update = ChannelMonitorUpdate {
+ update_id: CLOSED_CHANNEL_UPDATE_ID,
+ updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }],
+ };
+ close_background_events.push(BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((*funding_txo, monitor_update)));
}
}
claimable_htlcs_list.push((payment_hash, previous_hops));
}
+ let peer_state_from_chans = |channel_by_id| {
+ PeerState {
+ channel_by_id,
+ outbound_v1_channel_by_id: HashMap::new(),
+ inbound_v1_channel_by_id: HashMap::new(),
+ latest_features: InitFeatures::empty(),
+ pending_msg_events: Vec::new(),
+ in_flight_monitor_updates: BTreeMap::new(),
+ monitor_update_blocked_actions: BTreeMap::new(),
+ actions_blocking_raa_monitor_updates: BTreeMap::new(),
+ is_connected: false,
+ }
+ };
+
let peer_count: u64 = Readable::read(reader)?;
let mut per_peer_state = HashMap::with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex<PeerState<<SP::Target as SignerProvider>::Signer>>)>()));
for _ in 0..peer_count {
let peer_pubkey = Readable::read(reader)?;
- let peer_state = PeerState {
- channel_by_id: peer_channels.remove(&peer_pubkey).unwrap_or(HashMap::new()),
- latest_features: Readable::read(reader)?,
- pending_msg_events: Vec::new(),
- monitor_update_blocked_actions: BTreeMap::new(),
- is_connected: false,
- };
+ let peer_chans = peer_channels.remove(&peer_pubkey).unwrap_or(HashMap::new());
+ let mut peer_state = peer_state_from_chans(peer_chans);
+ peer_state.latest_features = Readable::read(reader)?;
per_peer_state.insert(peer_pubkey, Mutex::new(peer_state));
}
let event_count: u64 = Readable::read(reader)?;
- let mut pending_events_read: Vec<events::Event> = Vec::with_capacity(cmp::min(event_count as usize, MAX_ALLOC_SIZE/mem::size_of::<events::Event>()));
+ let mut pending_events_read: VecDeque<(events::Event, Option<EventCompletionAction>)> =
+ VecDeque::with_capacity(cmp::min(event_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(events::Event, Option<EventCompletionAction>)>()));
for _ in 0..event_count {
match MaybeReadable::read(reader)? {
- Some(event) => pending_events_read.push(event),
+ Some(event) => pending_events_read.push_back((event, None)),
None => continue,
}
}
let background_event_count: u64 = Readable::read(reader)?;
- let mut pending_background_events_read: Vec<BackgroundEvent> = Vec::with_capacity(cmp::min(background_event_count as usize, MAX_ALLOC_SIZE/mem::size_of::<BackgroundEvent>()));
for _ in 0..background_event_count {
match <u8 as Readable>::read(reader)? {
- 0 => pending_background_events_read.push(BackgroundEvent::ClosingMonitorUpdate((Readable::read(reader)?, Readable::read(reader)?))),
+ 0 => {
+ // LDK versions prior to 0.0.116 wrote pending `MonitorUpdateRegeneratedOnStartup`s here,
+ // however we really don't (and never did) need them - we regenerate all
+ // on-startup monitor updates.
+ let _: OutPoint = Readable::read(reader)?;
+ let _: ChannelMonitorUpdate = Readable::read(reader)?;
+ }
_ => return Err(DecodeError::InvalidValue),
}
}
let mut fake_scid_rand_bytes: Option<[u8; 32]> = None;
let mut probing_cookie_secret: Option<[u8; 32]> = None;
let mut claimable_htlc_purposes = None;
+ let mut claimable_htlc_onion_fields = None;
let mut pending_claiming_payments = Some(HashMap::new());
- let mut monitor_update_blocked_actions_per_peer = Some(Vec::new());
+ let mut monitor_update_blocked_actions_per_peer: Option<Vec<(_, BTreeMap<_, Vec<_>>)>> = Some(Vec::new());
+ let mut events_override = None;
+ let mut in_flight_monitor_updates: Option<HashMap<(PublicKey, OutPoint), Vec<ChannelMonitorUpdate>>> = None;
read_tlv_fields!(reader, {
(1, pending_outbound_payments_no_retry, option),
(2, pending_intercepted_htlcs, option),
(5, received_network_pubkey, option),
(6, monitor_update_blocked_actions_per_peer, option),
(7, fake_scid_rand_bytes, option),
- (9, claimable_htlc_purposes, vec_type),
+ (8, events_override, option),
+ (9, claimable_htlc_purposes, optional_vec),
+ (10, in_flight_monitor_updates, option),
(11, probing_cookie_secret, option),
+ (13, claimable_htlc_onion_fields, optional_vec),
});
if fake_scid_rand_bytes.is_none() {
fake_scid_rand_bytes = Some(args.entropy_source.get_secure_random_bytes());
probing_cookie_secret = Some(args.entropy_source.get_secure_random_bytes());
}
+ if let Some(events) = events_override {
+ pending_events_read = events;
+ }
+
+ if !channel_closures.is_empty() {
+ pending_events_read.append(&mut channel_closures);
+ }
+
if pending_outbound_payments.is_none() && pending_outbound_payments_no_retry.is_none() {
pending_outbound_payments = Some(pending_outbound_payments_compat);
} else if pending_outbound_payments.is_none() {
outbounds.insert(id, PendingOutboundPayment::Legacy { session_privs });
}
pending_outbound_payments = Some(outbounds);
- } else {
+ }
+ let pending_outbounds = OutboundPayments {
+ pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()),
+ retry_lock: Mutex::new(())
+ };
+
+ // We have to replay (or skip, if they were completed after we wrote the `ChannelManager`)
+ // each `ChannelMonitorUpdate` in `in_flight_monitor_updates`. After doing so, we have to
+ // check that each channel we have isn't newer than the latest `ChannelMonitorUpdate`(s) we
+ // replayed, and for each monitor update we have to replay we have to ensure there's a
+ // `ChannelMonitor` for it.
+ //
+ // In order to do so we first walk all of our live channels (so that we can check their
+ // state immediately after doing the update replays, when we have the `update_id`s
+ // available) and then walk any remaining in-flight updates.
+ //
+ // Because the actual handling of the in-flight updates is the same, it's macro'ized here:
+ let mut pending_background_events = Vec::new();
+ macro_rules! handle_in_flight_updates {
+ ($counterparty_node_id: expr, $chan_in_flight_upds: expr, $funding_txo: expr,
+ $monitor: expr, $peer_state: expr, $channel_info_log: expr
+ ) => { {
+ let mut max_in_flight_update_id = 0;
+ $chan_in_flight_upds.retain(|upd| upd.update_id > $monitor.get_latest_update_id());
+ for update in $chan_in_flight_upds.iter() {
+ log_trace!(args.logger, "Replaying ChannelMonitorUpdate {} for {}channel {}",
+ update.update_id, $channel_info_log, log_bytes!($funding_txo.to_channel_id()));
+ max_in_flight_update_id = cmp::max(max_in_flight_update_id, update.update_id);
+ pending_background_events.push(
+ BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
+ counterparty_node_id: $counterparty_node_id,
+ funding_txo: $funding_txo,
+ update: update.clone(),
+ });
+ }
+ if $peer_state.in_flight_monitor_updates.insert($funding_txo, $chan_in_flight_upds).is_some() {
+ log_error!(args.logger, "Duplicate in-flight monitor update set for the same channel!");
+ return Err(DecodeError::InvalidValue);
+ }
+ max_in_flight_update_id
+ } }
+ }
+
+ for (counterparty_id, peer_state_mtx) in per_peer_state.iter_mut() {
+ let mut peer_state_lock = peer_state_mtx.lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ for (_, chan) in peer_state.channel_by_id.iter() {
+ // Channels that were persisted have to be funded, otherwise they should have been
+ // discarded.
+ let funding_txo = chan.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
+ let monitor = args.channel_monitors.get(&funding_txo)
+ .expect("We already checked for monitor presence when loading channels");
+ let mut max_in_flight_update_id = monitor.get_latest_update_id();
+ if let Some(in_flight_upds) = &mut in_flight_monitor_updates {
+ if let Some(mut chan_in_flight_upds) = in_flight_upds.remove(&(*counterparty_id, funding_txo)) {
+ max_in_flight_update_id = cmp::max(max_in_flight_update_id,
+ handle_in_flight_updates!(*counterparty_id, chan_in_flight_upds,
+ funding_txo, monitor, peer_state, ""));
+ }
+ }
+ if chan.get_latest_unblocked_monitor_update_id() > max_in_flight_update_id {
+ // If the channel is ahead of the monitor, return InvalidValue:
+ log_error!(args.logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!");
+ log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} with update_id through {} in-flight",
+ log_bytes!(chan.context.channel_id()), monitor.get_latest_update_id(), max_in_flight_update_id);
+ log_error!(args.logger, " but the ChannelManager is at update_id {}.", chan.get_latest_unblocked_monitor_update_id());
+ log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
+ log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
+ log_error!(args.logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
+ log_error!(args.logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
+ return Err(DecodeError::InvalidValue);
+ }
+ }
+ }
+
+ if let Some(in_flight_upds) = in_flight_monitor_updates {
+ for ((counterparty_id, funding_txo), mut chan_in_flight_updates) in in_flight_upds {
+ if let Some(monitor) = args.channel_monitors.get(&funding_txo) {
+ // Now that we've removed all the in-flight monitor updates for channels that are
+ // still open, we need to replay any monitor updates that are for closed channels,
+ // creating the neccessary peer_state entries as we go.
+ let peer_state_mutex = per_peer_state.entry(counterparty_id).or_insert_with(|| {
+ Mutex::new(peer_state_from_chans(HashMap::new()))
+ });
+ let mut peer_state = peer_state_mutex.lock().unwrap();
+ handle_in_flight_updates!(counterparty_id, chan_in_flight_updates,
+ funding_txo, monitor, peer_state, "closed ");
+ } else {
+ log_error!(args.logger, "A ChannelMonitor is missing even though we have in-flight updates for it! This indicates a potentially-critical violation of the chain::Watch API!");
+ log_error!(args.logger, " The ChannelMonitor for channel {} is missing.",
+ log_bytes!(funding_txo.to_channel_id()));
+ log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
+ log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
+ log_error!(args.logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
+ log_error!(args.logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
+ return Err(DecodeError::InvalidValue);
+ }
+ }
+ }
+
+ // Note that we have to do the above replays before we push new monitor updates.
+ pending_background_events.append(&mut close_background_events);
+
+ // If there's any preimages for forwarded HTLCs hanging around in ChannelMonitors we
+ // should ensure we try them again on the inbound edge. We put them here and do so after we
+ // have a fully-constructed `ChannelManager` at the end.
+ let mut pending_claims_to_replay = Vec::new();
+
+ {
// If we're tracking pending payments, ensure we haven't lost any by looking at the
// ChannelMonitor data for any channels for which we do not have authorative state
// (i.e. those for which we just force-closed above or we otherwise don't have a
// We only rebuild the pending payments map if we were most recently serialized by
// 0.0.102+
for (_, monitor) in args.channel_monitors.iter() {
- if id_to_peer.get(&monitor.get_funding_txo().0.to_channel_id()).is_none() {
- for (htlc_source, htlc) in monitor.get_pending_outbound_htlcs() {
- if let HTLCSource::OutboundRoute { payment_id, session_priv, path, payment_secret, .. } = htlc_source {
- if path.is_empty() {
+ let counterparty_opt = id_to_peer.get(&monitor.get_funding_txo().0.to_channel_id());
+ if counterparty_opt.is_none() {
+ for (htlc_source, (htlc, _)) in monitor.get_pending_or_resolved_outbound_htlcs() {
+ if let HTLCSource::OutboundRoute { payment_id, session_priv, path, .. } = htlc_source {
+ if path.hops.is_empty() {
log_error!(args.logger, "Got an empty path for a pending payment");
return Err(DecodeError::InvalidValue);
}
- let path_amt = path.last().unwrap().fee_msat;
+
+ let path_amt = path.final_value_msat();
let mut session_priv_bytes = [0; 32];
session_priv_bytes[..].copy_from_slice(&session_priv[..]);
- match pending_outbound_payments.as_mut().unwrap().entry(payment_id) {
+ match pending_outbounds.pending_outbound_payments.lock().unwrap().entry(payment_id) {
hash_map::Entry::Occupied(mut entry) => {
let newly_added = entry.get_mut().insert(session_priv_bytes, &path);
log_info!(args.logger, "{} a pending payment path for {} msat for session priv {} on an existing pending payment with payment hash {}",
if newly_added { "Added" } else { "Had" }, path_amt, log_bytes!(session_priv_bytes), log_bytes!(htlc.payment_hash.0));
},
hash_map::Entry::Vacant(entry) => {
- let path_fee = path.get_path_fees();
+ let path_fee = path.fee_msat();
entry.insert(PendingOutboundPayment::Retryable {
retry_strategy: None,
attempts: PaymentAttempts::new(),
payment_params: None,
session_privs: [session_priv_bytes].iter().map(|a| *a).collect(),
payment_hash: htlc.payment_hash,
- payment_secret,
+ payment_secret: None, // only used for retries, and we'll never retry on startup
+ payment_metadata: None, // only used for retries, and we'll never retry on startup
keysend_preimage: None, // only used for retries, and we'll never retry on startup
pending_amt_msat: path_amt,
pending_fee_msat: Some(path_fee),
}
}
}
- for (htlc_source, htlc) in monitor.get_all_current_outbound_htlcs() {
- if let HTLCSource::PreviousHopData(prev_hop_data) = htlc_source {
- let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| {
- info.prev_funding_outpoint == prev_hop_data.outpoint &&
- info.prev_htlc_id == prev_hop_data.htlc_id
- };
- // The ChannelMonitor is now responsible for this HTLC's
- // failure/success and will let us know what its outcome is. If we
- // still have an entry for this HTLC in `forward_htlcs` or
- // `pending_intercepted_htlcs`, we were apparently not persisted after
- // the monitor was when forwarding the payment.
- forward_htlcs.retain(|_, forwards| {
- forwards.retain(|forward| {
- if let HTLCForwardInfo::AddHTLC(htlc_info) = forward {
- if pending_forward_matches_htlc(&htlc_info) {
- log_info!(args.logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}",
- log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id()));
- false
+ for (htlc_source, (htlc, preimage_opt)) in monitor.get_all_current_outbound_htlcs() {
+ match htlc_source {
+ HTLCSource::PreviousHopData(prev_hop_data) => {
+ let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| {
+ info.prev_funding_outpoint == prev_hop_data.outpoint &&
+ info.prev_htlc_id == prev_hop_data.htlc_id
+ };
+ // The ChannelMonitor is now responsible for this HTLC's
+ // failure/success and will let us know what its outcome is. If we
+ // still have an entry for this HTLC in `forward_htlcs` or
+ // `pending_intercepted_htlcs`, we were apparently not persisted after
+ // the monitor was when forwarding the payment.
+ forward_htlcs.retain(|_, forwards| {
+ forwards.retain(|forward| {
+ if let HTLCForwardInfo::AddHTLC(htlc_info) = forward {
+ if pending_forward_matches_htlc(&htlc_info) {
+ log_info!(args.logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}",
+ log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id()));
+ false
+ } else { true }
} else { true }
+ });
+ !forwards.is_empty()
+ });
+ pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| {
+ if pending_forward_matches_htlc(&htlc_info) {
+ log_info!(args.logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}",
+ log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id()));
+ pending_events_read.retain(|(event, _)| {
+ if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event {
+ intercepted_id != ev_id
+ } else { true }
+ });
+ false
} else { true }
});
- !forwards.is_empty()
- });
- pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| {
- if pending_forward_matches_htlc(&htlc_info) {
- log_info!(args.logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}",
- log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id()));
- pending_events_read.retain(|event| {
- if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event {
- intercepted_id != ev_id
- } else { true }
- });
- false
- } else { true }
- });
+ },
+ HTLCSource::OutboundRoute { payment_id, session_priv, path, .. } => {
+ if let Some(preimage) = preimage_opt {
+ let pending_events = Mutex::new(pending_events_read);
+ // Note that we set `from_onchain` to "false" here,
+ // deliberately keeping the pending payment around forever.
+ // Given it should only occur when we have a channel we're
+ // force-closing for being stale that's okay.
+ // The alternative would be to wipe the state when claiming,
+ // generating a `PaymentPathSuccessful` event but regenerating
+ // it and the `PaymentSent` on every restart until the
+ // `ChannelMonitor` is removed.
+ pending_outbounds.claim_htlc(payment_id, preimage, session_priv, path, false, &pending_events, &args.logger);
+ pending_events_read = pending_events.into_inner().unwrap();
+ }
+ },
}
}
}
+
+ // Whether the downstream channel was closed or not, try to re-apply any payment
+ // preimages from it which may be needed in upstream channels for forwarded
+ // payments.
+ let outbound_claimed_htlcs_iter = monitor.get_all_current_outbound_htlcs()
+ .into_iter()
+ .filter_map(|(htlc_source, (htlc, preimage_opt))| {
+ if let HTLCSource::PreviousHopData(_) = htlc_source {
+ if let Some(payment_preimage) = preimage_opt {
+ Some((htlc_source, payment_preimage, htlc.amount_msat,
+ // Check if `counterparty_opt.is_none()` to see if the
+ // downstream chan is closed (because we don't have a
+ // channel_id -> peer map entry).
+ counterparty_opt.is_none(),
+ monitor.get_funding_txo().0.to_channel_id()))
+ } else { None }
+ } else {
+ // If it was an outbound payment, we've handled it above - if a preimage
+ // came in and we persisted the `ChannelManager` we either handled it and
+ // are good to go or the channel force-closed - we don't have to handle the
+ // channel still live case here.
+ None
+ }
+ });
+ for tuple in outbound_claimed_htlcs_iter {
+ pending_claims_to_replay.push(tuple);
+ }
}
}
- let pending_outbounds = OutboundPayments { pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()), retry_lock: Mutex::new(()) };
if !forward_htlcs.is_empty() || pending_outbounds.needs_abandon() {
// If we have pending HTLCs to forward, assume we either dropped a
// `PendingHTLCsForwardable` or the user received it but never processed it as they
// shut down before the timer hit. Either way, set the time_forwardable to a small
// constant as enough time has likely passed that we should simply handle the forwards
// now, or at least after the user gets a chance to reconnect to our peers.
- pending_events_read.push(events::Event::PendingHTLCsForwardable {
+ pending_events_read.push_back((events::Event::PendingHTLCsForwardable {
time_forwardable: Duration::from_secs(2),
- });
+ }, None));
}
let inbound_pmt_key_material = args.node_signer.get_inbound_payment_key_material();
let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material);
- let mut claimable_htlcs = HashMap::with_capacity(claimable_htlcs_list.len());
- if let Some(mut purposes) = claimable_htlc_purposes {
+ let mut claimable_payments = HashMap::with_capacity(claimable_htlcs_list.len());
+ if let Some(purposes) = claimable_htlc_purposes {
if purposes.len() != claimable_htlcs_list.len() {
return Err(DecodeError::InvalidValue);
}
- for (purpose, (payment_hash, previous_hops)) in purposes.drain(..).zip(claimable_htlcs_list.drain(..)) {
- claimable_htlcs.insert(payment_hash, (purpose, previous_hops));
+ if let Some(onion_fields) = claimable_htlc_onion_fields {
+ if onion_fields.len() != claimable_htlcs_list.len() {
+ return Err(DecodeError::InvalidValue);
+ }
+ for (purpose, (onion, (payment_hash, htlcs))) in
+ purposes.into_iter().zip(onion_fields.into_iter().zip(claimable_htlcs_list.into_iter()))
+ {
+ let existing_payment = claimable_payments.insert(payment_hash, ClaimablePayment {
+ purpose, htlcs, onion_fields: onion,
+ });
+ if existing_payment.is_some() { return Err(DecodeError::InvalidValue); }
+ }
+ } else {
+ for (purpose, (payment_hash, htlcs)) in purposes.into_iter().zip(claimable_htlcs_list.into_iter()) {
+ let existing_payment = claimable_payments.insert(payment_hash, ClaimablePayment {
+ purpose, htlcs, onion_fields: None,
+ });
+ if existing_payment.is_some() { return Err(DecodeError::InvalidValue); }
+ }
}
} else {
// LDK versions prior to 0.0.107 did not write a `pending_htlc_purposes`, but do
// include a `_legacy_hop_data` in the `OnionPayload`.
- for (payment_hash, previous_hops) in claimable_htlcs_list.drain(..) {
- if previous_hops.is_empty() {
+ for (payment_hash, htlcs) in claimable_htlcs_list.drain(..) {
+ if htlcs.is_empty() {
return Err(DecodeError::InvalidValue);
}
- let purpose = match &previous_hops[0].onion_payload {
+ let purpose = match &htlcs[0].onion_payload {
OnionPayload::Invoice { _legacy_hop_data } => {
if let Some(hop_data) = _legacy_hop_data {
events::PaymentPurpose::InvoicePayment {
OnionPayload::Spontaneous(payment_preimage) =>
events::PaymentPurpose::SpontaneousPayment(*payment_preimage),
};
- claimable_htlcs.insert(payment_hash, (purpose, previous_hops));
+ claimable_payments.insert(payment_hash, ClaimablePayment {
+ purpose, htlcs, onion_fields: None,
+ });
}
}
let mut secp_ctx = Secp256k1::new();
secp_ctx.seeded_randomize(&args.entropy_source.get_secure_random_bytes());
- if !channel_closures.is_empty() {
- pending_events_read.append(&mut channel_closures);
- }
-
let our_network_pubkey = match args.node_signer.get_node_id(Recipient::Node) {
Ok(key) => key,
Err(()) => return Err(DecodeError::InvalidValue)
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
for (chan_id, chan) in peer_state.channel_by_id.iter_mut() {
- if chan.outbound_scid_alias() == 0 {
+ if chan.context.outbound_scid_alias() == 0 {
let mut outbound_scid_alias;
loop {
outbound_scid_alias = fake_scid::Namespace::OutboundAlias
.get_fake_scid(best_block_height, &genesis_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.entropy_source);
if outbound_scid_aliases.insert(outbound_scid_alias) { break; }
}
- chan.set_outbound_scid_alias(outbound_scid_alias);
- } else if !outbound_scid_aliases.insert(chan.outbound_scid_alias()) {
+ chan.context.set_outbound_scid_alias(outbound_scid_alias);
+ } else if !outbound_scid_aliases.insert(chan.context.outbound_scid_alias()) {
// Note that in rare cases its possible to hit this while reading an older
// channel if we just happened to pick a colliding outbound alias above.
- log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.outbound_scid_alias());
+ log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.context.outbound_scid_alias());
return Err(DecodeError::InvalidValue);
}
- if chan.is_usable() {
- if short_to_chan_info.insert(chan.outbound_scid_alias(), (chan.get_counterparty_node_id(), *chan_id)).is_some() {
+ if chan.context.is_usable() {
+ if short_to_chan_info.insert(chan.context.outbound_scid_alias(), (chan.context.get_counterparty_node_id(), *chan_id)).is_some() {
// Note that in rare cases its possible to hit this while reading an older
// channel if we just happened to pick a colliding outbound alias above.
- log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.outbound_scid_alias());
+ log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.context.outbound_scid_alias());
return Err(DecodeError::InvalidValue);
}
}
for (_, monitor) in args.channel_monitors.iter() {
for (payment_hash, payment_preimage) in monitor.get_stored_preimages() {
- if let Some((payment_purpose, claimable_htlcs)) = claimable_htlcs.remove(&payment_hash) {
+ if let Some(payment) = claimable_payments.remove(&payment_hash) {
log_info!(args.logger, "Re-claiming HTLCs with payment hash {} as we've released the preimage to a ChannelMonitor!", log_bytes!(payment_hash.0));
let mut claimable_amt_msat = 0;
let mut receiver_node_id = Some(our_network_pubkey);
- let phantom_shared_secret = claimable_htlcs[0].prev_hop.phantom_shared_secret;
+ let phantom_shared_secret = payment.htlcs[0].prev_hop.phantom_shared_secret;
if phantom_shared_secret.is_some() {
let phantom_pubkey = args.node_signer.get_node_id(Recipient::PhantomNode)
.expect("Failed to get node_id for phantom node recipient");
receiver_node_id = Some(phantom_pubkey)
}
- for claimable_htlc in claimable_htlcs {
+ for claimable_htlc in payment.htlcs {
claimable_amt_msat += claimable_htlc.value;
// Add a holding-cell claim of the payment to the Channel, which should be
previous_hop_monitor.provide_payment_preimage(&payment_hash, &payment_preimage, &args.tx_broadcaster, &bounded_fee_estimator, &args.logger);
}
}
- pending_events_read.push(events::Event::PaymentClaimed {
+ pending_events_read.push_back((events::Event::PaymentClaimed {
receiver_node_id,
payment_hash,
- purpose: payment_purpose,
+ purpose: payment.purpose,
amount_msat: claimable_amt_msat,
- });
+ }, None));
}
}
}
for (node_id, monitor_update_blocked_actions) in monitor_update_blocked_actions_per_peer.unwrap() {
- if let Some(peer_state) = per_peer_state.get_mut(&node_id) {
+ if let Some(peer_state) = per_peer_state.get(&node_id) {
+ for (_, actions) in monitor_update_blocked_actions.iter() {
+ for action in actions.iter() {
+ if let MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
+ downstream_counterparty_and_funding_outpoint:
+ Some((blocked_node_id, blocked_channel_outpoint, blocking_action)), ..
+ } = action {
+ if let Some(blocked_peer_state) = per_peer_state.get(&blocked_node_id) {
+ blocked_peer_state.lock().unwrap().actions_blocking_raa_monitor_updates
+ .entry(blocked_channel_outpoint.to_channel_id())
+ .or_insert_with(Vec::new).push(blocking_action.clone());
+ }
+ }
+ }
+ }
peer_state.lock().unwrap().monitor_update_blocked_actions = monitor_update_blocked_actions;
} else {
log_error!(args.logger, "Got blocked actions without a per-peer-state for {}", node_id);
pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs.unwrap()),
forward_htlcs: Mutex::new(forward_htlcs),
- claimable_payments: Mutex::new(ClaimablePayments { claimable_htlcs, pending_claiming_payments: pending_claiming_payments.unwrap() }),
+ claimable_payments: Mutex::new(ClaimablePayments { claimable_payments, pending_claiming_payments: pending_claiming_payments.unwrap() }),
outbound_scid_aliases: Mutex::new(outbound_scid_aliases),
id_to_peer: Mutex::new(id_to_peer),
short_to_chan_info: FairRwLock::new(short_to_chan_info),
per_peer_state: FairRwLock::new(per_peer_state),
pending_events: Mutex::new(pending_events_read),
- pending_background_events: Mutex::new(pending_background_events_read),
+ pending_events_processor: AtomicBool::new(false),
+ pending_background_events: Mutex::new(pending_background_events),
total_consistency_lock: RwLock::new(()),
+ background_events_processed_since_startup: AtomicBool::new(false),
persistence_notifier: Notifier::new(),
entropy_source: args.entropy_source,
channel_manager.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
}
+ for (source, preimage, downstream_value, downstream_closed, downstream_chan_id) in pending_claims_to_replay {
+ // We use `downstream_closed` in place of `from_onchain` here just as a guess - we
+ // don't remember in the `ChannelMonitor` where we got a preimage from, but if the
+ // channel is closed we just assume that it probably came from an on-chain claim.
+ channel_manager.claim_funds_internal(source, preimage, Some(downstream_value),
+ downstream_closed, downstream_chan_id);
+ }
+
//TODO: Broadcast channel update for closed channels, but only after we've made a
//connection or two.
use bitcoin::hashes::Hash;
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
- use core::time::Duration;
use core::sync::atomic::Ordering;
+ use crate::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret};
- use crate::ln::channelmanager::{inbound_payment, PaymentId, PaymentSendFailure, InterceptId};
+ use crate::ln::channelmanager::{inbound_payment, PaymentId, PaymentSendFailure, RecipientOnionFields, InterceptId};
use crate::ln::functional_test_utils::*;
- use crate::ln::msgs;
+ use crate::ln::msgs::{self, ErrorAction};
use crate::ln::msgs::ChannelMessageHandler;
use crate::routing::router::{PaymentParameters, RouteParameters, find_route};
use crate::util::errors::APIError;
- use crate::util::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
use crate::util::test_utils;
- use crate::util::config::ChannelConfig;
- use crate::chain::keysinterface::EntropySource;
+ use crate::util::config::{ChannelConfig, ChannelConfigUpdate};
+ use crate::sign::EntropySource;
#[test]
fn test_notify_limits() {
// All nodes start with a persistable update pending as `create_network` connects each node
// with all other nodes to make most tests simpler.
- assert!(nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
- assert!(nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
- assert!(nodes[2].node.await_persistable_update_timeout(Duration::from_millis(1)));
+ assert!(nodes[0].node.get_persistable_update_future().poll_is_complete());
+ assert!(nodes[1].node.get_persistable_update_future().poll_is_complete());
+ assert!(nodes[2].node.get_persistable_update_future().poll_is_complete());
let mut chan = create_announced_chan_between_nodes(&nodes, 0, 1);
// to connect messages with new values
chan.0.contents.fee_base_msat *= 2;
chan.1.contents.fee_base_msat *= 2;
- let node_a_chan_info = nodes[0].node.list_channels()[0].clone();
- let node_b_chan_info = nodes[1].node.list_channels()[0].clone();
+ let node_a_chan_info = nodes[0].node.list_channels_with_counterparty(
+ &nodes[1].node.get_our_node_id()).pop().unwrap();
+ let node_b_chan_info = nodes[1].node.list_channels_with_counterparty(
+ &nodes[0].node.get_our_node_id()).pop().unwrap();
// The first two nodes (which opened a channel) should now require fresh persistence
- assert!(nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
- assert!(nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
+ assert!(nodes[0].node.get_persistable_update_future().poll_is_complete());
+ assert!(nodes[1].node.get_persistable_update_future().poll_is_complete());
// ... but the last node should not.
- assert!(!nodes[2].node.await_persistable_update_timeout(Duration::from_millis(1)));
+ assert!(!nodes[2].node.get_persistable_update_future().poll_is_complete());
// After persisting the first two nodes they should no longer need fresh persistence.
- assert!(!nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
- assert!(!nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
+ assert!(!nodes[0].node.get_persistable_update_future().poll_is_complete());
+ assert!(!nodes[1].node.get_persistable_update_future().poll_is_complete());
// Node 3, unrelated to the only channel, shouldn't care if it receives a channel_update
// about the channel.
nodes[2].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &chan.0);
nodes[2].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &chan.1);
- assert!(!nodes[2].node.await_persistable_update_timeout(Duration::from_millis(1)));
+ assert!(!nodes[2].node.get_persistable_update_future().poll_is_complete());
// The nodes which are a party to the channel should also ignore messages from unrelated
// parties.
nodes[0].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.1);
nodes[1].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.0);
nodes[1].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.1);
- assert!(!nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
- assert!(!nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
+ assert!(!nodes[0].node.get_persistable_update_future().poll_is_complete());
+ assert!(!nodes[1].node.get_persistable_update_future().poll_is_complete());
// At this point the channel info given by peers should still be the same.
assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
// persisted and that its channel info remains the same.
nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &as_update);
nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &bs_update);
- assert!(!nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
- assert!(!nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
+ assert!(!nodes[0].node.get_persistable_update_future().poll_is_complete());
+ assert!(!nodes[1].node.get_persistable_update_future().poll_is_complete());
assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info);
// the channel info has updated.
nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &bs_update);
nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &as_update);
- assert!(nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
- assert!(nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
+ assert!(nodes[0].node.get_persistable_update_future().poll_is_complete());
+ assert!(nodes[1].node.get_persistable_update_future().poll_is_complete());
assert_ne!(nodes[0].node.list_channels()[0], node_a_chan_info);
assert_ne!(nodes[1].node.list_channels()[0], node_b_chan_info);
}
// Use the utility function send_payment_along_path to send the payment with MPP data which
// indicates there are more HTLCs coming.
let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match.
- let session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash, Some(payment_secret), payment_id, &mpp_route).unwrap();
- nodes[0].node.test_send_payment_along_path(&mpp_route.paths[0], &route.payment_params, &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[0]).unwrap();
+ let session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash,
+ RecipientOnionFields::secret_only(payment_secret), payment_id, &mpp_route).unwrap();
+ nodes[0].node.test_send_payment_along_path(&mpp_route.paths[0], &our_payment_hash,
+ RecipientOnionFields::secret_only(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[0]).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None);
// Next, send a keysend payment with the same payment_hash and make sure it fails.
- nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage), PaymentId(payment_preimage.0)).unwrap();
+ nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage),
+ RecipientOnionFields::spontaneous_empty(), PaymentId(payment_preimage.0)).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
expect_payment_failed!(nodes[0], our_payment_hash, true);
// Send the second half of the original MPP payment.
- nodes[0].node.test_send_payment_along_path(&mpp_route.paths[1], &route.payment_params, &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[1]).unwrap();
+ nodes[0].node.test_send_payment_along_path(&mpp_route.paths[1], &our_payment_hash,
+ RecipientOnionFields::secret_only(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[1]).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
#[test]
fn test_keysend_dup_payment_hash() {
+ do_test_keysend_dup_payment_hash(false);
+ do_test_keysend_dup_payment_hash(true);
+ }
+
+ fn do_test_keysend_dup_payment_hash(accept_mpp_keysend: bool) {
// (1): Test that a keysend payment with a duplicate payment hash to an existing pending
// outbound regular payment fails as expected.
// (2): Test that a regular payment with a duplicate payment hash to an existing keysend payment
// fails as expected.
+ // (3): Test that a keysend payment with a duplicate payment hash to an existing keysend
+ // payment fails as expected. When `accept_mpp_keysend` is false, this tests that we
+ // reject MPP keysend payments, since in this case where the payment has no payment
+ // secret, a keysend payment with a duplicate hash is basically an MPP keysend. If
+ // `accept_mpp_keysend` is true, this tests that we only accept MPP keysends with
+ // payment secrets and reject otherwise.
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
- let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let mut mpp_keysend_cfg = test_default_channel_config();
+ mpp_keysend_cfg.accept_mpp_keysend = accept_mpp_keysend;
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(mpp_keysend_cfg)]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
create_announced_chan_between_nodes(&nodes, 0, 1);
let scorer = test_utils::TestScorer::new();
// Next, attempt a keysend payment and make sure it fails.
let route_params = RouteParameters {
- payment_params: PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(), TEST_FINAL_CLTV),
+ payment_params: PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(), TEST_FINAL_CLTV, false),
final_value_msat: 100_000,
- final_cltv_expiry_delta: TEST_FINAL_CLTV,
};
let route = find_route(
&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
- None, nodes[0].logger, &scorer, &random_seed_bytes
+ None, nodes[0].logger, &scorer, &(), &random_seed_bytes
).unwrap();
- nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage), PaymentId(payment_preimage.0)).unwrap();
+ nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage),
+ RecipientOnionFields::spontaneous_empty(), PaymentId(payment_preimage.0)).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
let payment_preimage = PaymentPreimage([42; 32]);
let route = find_route(
&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
- None, nodes[0].logger, &scorer, &random_seed_bytes
+ None, nodes[0].logger, &scorer, &(), &random_seed_bytes
).unwrap();
- let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage), PaymentId(payment_preimage.0)).unwrap();
+ let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage),
+ RecipientOnionFields::spontaneous_empty(), PaymentId(payment_preimage.0)).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
// Next, attempt a regular payment and make sure it fails.
let payment_secret = PaymentSecret([43; 32]);
- nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap();
+ nodes[0].node.send_payment_with_route(&route, payment_hash,
+ RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
// Finally, succeed the keysend payment.
claim_payment(&nodes[0], &expected_route, payment_preimage);
+
+ // To start (3), send a keysend payment but don't claim it.
+ let payment_id_1 = PaymentId([44; 32]);
+ let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage),
+ RecipientOnionFields::spontaneous_empty(), payment_id_1).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let event = events.pop().unwrap();
+ let path = vec![&nodes[1]];
+ pass_along_path(&nodes[0], &path, 100_000, payment_hash, None, event, true, Some(payment_preimage));
+
+ // Next, attempt a keysend payment and make sure it fails.
+ let route_params = RouteParameters {
+ payment_params: PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(), TEST_FINAL_CLTV, false),
+ final_value_msat: 100_000,
+ };
+ let route = find_route(
+ &nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
+ None, nodes[0].logger, &scorer, &(), &random_seed_bytes
+ ).unwrap();
+ let payment_id_2 = PaymentId([45; 32]);
+ nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage),
+ RecipientOnionFields::spontaneous_empty(), payment_id_2).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let ev = events.drain(..).next().unwrap();
+ let payment_event = SendEvent::from_event(ev);
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
+ check_added_monitors!(nodes[1], 0);
+ commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
+ check_added_monitors!(nodes[1], 1);
+ let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ assert!(updates.update_add_htlcs.is_empty());
+ assert!(updates.update_fulfill_htlcs.is_empty());
+ assert_eq!(updates.update_fail_htlcs.len(), 1);
+ assert!(updates.update_fail_malformed_htlcs.is_empty());
+ assert!(updates.update_fee.is_none());
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
+ commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
+ expect_payment_failed!(nodes[0], payment_hash, true);
+
+ // Finally, claim the original payment.
+ claim_payment(&nodes[0], &expected_route, payment_preimage);
}
#[test]
let _chan = create_chan_between_nodes(&nodes[0], &nodes[1]);
let route_params = RouteParameters {
- payment_params: PaymentParameters::for_keysend(payee_pubkey, 40),
+ payment_params: PaymentParameters::for_keysend(payee_pubkey, 40, false),
final_value_msat: 10_000,
- final_cltv_expiry_delta: 40,
};
let network_graph = nodes[0].network_graph.clone();
let first_hops = nodes[0].node.list_usable_channels();
let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
let route = find_route(
&payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
- nodes[0].logger, &scorer, &random_seed_bytes
+ nodes[0].logger, &scorer, &(), &random_seed_bytes
).unwrap();
let test_preimage = PaymentPreimage([42; 32]);
let mismatch_payment_hash = PaymentHash([43; 32]);
- let session_privs = nodes[0].node.test_add_new_pending_payment(mismatch_payment_hash, None, PaymentId(mismatch_payment_hash.0), &route).unwrap();
- nodes[0].node.test_send_payment_internal(&route, mismatch_payment_hash, &None, Some(test_preimage), PaymentId(mismatch_payment_hash.0), None, session_privs).unwrap();
+ let session_privs = nodes[0].node.test_add_new_pending_payment(mismatch_payment_hash,
+ RecipientOnionFields::spontaneous_empty(), PaymentId(mismatch_payment_hash.0), &route).unwrap();
+ nodes[0].node.test_send_payment_internal(&route, mismatch_payment_hash,
+ RecipientOnionFields::spontaneous_empty(), Some(test_preimage), PaymentId(mismatch_payment_hash.0), None, session_privs).unwrap();
check_added_monitors!(nodes[0], 1);
let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
assert!(updates.update_fee.is_none());
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
- nodes[1].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Payment preimage didn't match payment hash".to_string(), 1);
+ nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Payment preimage didn't match payment hash", 1);
}
#[test]
fn test_keysend_msg_with_secret_err() {
- // Test that we error as expected if we receive a keysend payment that includes a payment secret.
+ // Test that we error as expected if we receive a keysend payment that includes a payment
+ // secret when we don't support MPP keysend.
+ let mut reject_mpp_keysend_cfg = test_default_channel_config();
+ reject_mpp_keysend_cfg.accept_mpp_keysend = false;
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
- let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(reject_mpp_keysend_cfg)]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let payer_pubkey = nodes[0].node.get_our_node_id();
let _chan = create_chan_between_nodes(&nodes[0], &nodes[1]);
let route_params = RouteParameters {
- payment_params: PaymentParameters::for_keysend(payee_pubkey, 40),
+ payment_params: PaymentParameters::for_keysend(payee_pubkey, 40, false),
final_value_msat: 10_000,
- final_cltv_expiry_delta: 40,
};
let network_graph = nodes[0].network_graph.clone();
let first_hops = nodes[0].node.list_usable_channels();
let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
let route = find_route(
&payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
- nodes[0].logger, &scorer, &random_seed_bytes
+ nodes[0].logger, &scorer, &(), &random_seed_bytes
).unwrap();
let test_preimage = PaymentPreimage([42; 32]);
let test_secret = PaymentSecret([43; 32]);
let payment_hash = PaymentHash(Sha256::hash(&test_preimage.0).into_inner());
- let session_privs = nodes[0].node.test_add_new_pending_payment(payment_hash, Some(test_secret), PaymentId(payment_hash.0), &route).unwrap();
- nodes[0].node.test_send_payment_internal(&route, payment_hash, &Some(test_secret), Some(test_preimage), PaymentId(payment_hash.0), None, session_privs).unwrap();
+ let session_privs = nodes[0].node.test_add_new_pending_payment(payment_hash,
+ RecipientOnionFields::secret_only(test_secret), PaymentId(payment_hash.0), &route).unwrap();
+ nodes[0].node.test_send_payment_internal(&route, payment_hash,
+ RecipientOnionFields::secret_only(test_secret), Some(test_preimage),
+ PaymentId(payment_hash.0), None, session_privs).unwrap();
check_added_monitors!(nodes[0], 1);
let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
assert!(updates.update_fee.is_none());
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
- nodes[1].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "We don't support MPP keysend payments".to_string(), 1);
+ nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "We don't support MPP keysend payments", 1);
}
#[test]
let (mut route, payment_hash, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000);
let path = route.paths[0].clone();
route.paths.push(path);
- route.paths[0][0].pubkey = nodes[1].node.get_our_node_id();
- route.paths[0][0].short_channel_id = chan_1_id;
- route.paths[0][1].short_channel_id = chan_3_id;
- route.paths[1][0].pubkey = nodes[2].node.get_our_node_id();
- route.paths[1][0].short_channel_id = chan_2_id;
- route.paths[1][1].short_channel_id = chan_4_id;
-
- match nodes[0].node.send_payment(&route, payment_hash, &None, PaymentId(payment_hash.0)).unwrap_err() {
+ route.paths[0].hops[0].pubkey = nodes[1].node.get_our_node_id();
+ route.paths[0].hops[0].short_channel_id = chan_1_id;
+ route.paths[0].hops[1].short_channel_id = chan_3_id;
+ route.paths[1].hops[0].pubkey = nodes[2].node.get_our_node_id();
+ route.paths[1].hops[0].short_channel_id = chan_2_id;
+ route.paths[1].hops[1].short_channel_id = chan_4_id;
+
+ match nodes[0].node.send_payment_with_route(&route, payment_hash,
+ RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0))
+ .unwrap_err() {
PaymentSendFailure::ParameterError(APIError::APIMisuseError { ref err }) => {
- assert!(regex::Regex::new(r"Payment secret is required for multi-path payments").unwrap().is_match(err)) },
+ assert!(regex::Regex::new(r"Payment secret is required for multi-path payments").unwrap().is_match(err))
+ },
_ => panic!("unexpected error")
}
}
match inbound_payment::verify(bad_payment_hash, &payment_data, nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger) {
Ok(_) => panic!("Unexpected ok"),
Err(()) => {
- nodes[0].logger.assert_log_contains("lightning::ln::inbound_payment".to_string(), "Failing HTLC with user-generated payment_hash".to_string(), 1);
+ nodes[0].logger.assert_log_contains("lightning::ln::inbound_payment", "Failing HTLC with user-generated payment_hash", 1);
}
}
assert_eq!(nodes_0_lock.len(), 1);
assert!(nodes_0_lock.contains_key(channel_id));
}
+ expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
{
// Assert that `nodes[1]`'s `id_to_peer` map is populated with the channel as soon as
let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
check_added_monitors!(nodes[0], 1);
+ expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
let (channel_ready, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
let (announcement, nodes_0_update, nodes_1_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &nodes_0_update, &nodes_1_update);
nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
check_added_monitors!(nodes[1], 1);
+ expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
+
let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
check_added_monitors!(nodes[0], 1);
+ expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
}
open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes();
}
// A MAX_UNFUNDED_CHANS_PER_PEER + 1 channel will be summarily rejected
open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes();
nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
- assert_eq!(get_err_msg!(nodes[1], nodes[0].node.get_our_node_id()).channel_id,
+ assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
open_channel_msg.temporary_channel_id);
// Further, because all of our channels with nodes[0] are inbound, and none of them funded,
&SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
peer_pks.push(random_pk);
nodes[1].node.peer_connected(&random_pk, &msgs::Init {
- features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
+ features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+ }, true).unwrap();
}
let last_random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
&SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
nodes[1].node.peer_connected(&last_random_pk, &msgs::Init {
- features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap_err();
+ features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+ }, true).unwrap_err();
// Also importantly, because nodes[0] isn't "protected", we will refuse a reconnection from
// them if we have too many un-channel'd peers.
if let Event::ChannelClosed { .. } = ev { } else { panic!(); }
}
nodes[1].node.peer_connected(&last_random_pk, &msgs::Init {
- features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
+ features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+ }, true).unwrap();
nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
- features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap_err();
+ features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+ }, true).unwrap_err();
// but of course if the connection is outbound its allowed...
nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
- features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
+ features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+ }, false).unwrap();
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
// Now nodes[0] is disconnected but still has a pending, un-funded channel lying around.
open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes();
}
nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg);
- assert_eq!(get_err_msg!(nodes[1], last_random_pk).channel_id,
+ assert_eq!(get_err_msg(&nodes[1], &last_random_pk).channel_id,
open_channel_msg.temporary_channel_id);
// Of course, however, outbound channels are always allowed
// "protected" and can connect again.
mine_transaction(&nodes[1], funding_tx.as_ref().unwrap());
nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
- features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
+ features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+ }, true).unwrap();
get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
// Further, because the first channel was funded, we can open another channel with
// Once we have MAX_UNFUNDED_CHANS_PER_PEER unfunded channels, new inbound channels will be
// rejected.
nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
- assert_eq!(get_err_msg!(nodes[1], nodes[0].node.get_our_node_id()).channel_id,
+ assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
open_channel_msg.temporary_channel_id);
// but we can still open an outbound channel.
// but even with such an outbound channel, additional inbound channels will still fail.
nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
- assert_eq!(get_err_msg!(nodes[1], nodes[0].node.get_our_node_id()).channel_id,
+ assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
open_channel_msg.temporary_channel_id);
}
let random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
&SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
nodes[1].node.peer_connected(&random_pk, &msgs::Init {
- features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
+ features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+ }, true).unwrap();
nodes[1].node.handle_open_channel(&random_pk, &open_channel_msg);
let events = nodes[1].node.get_and_clear_pending_events();
let last_random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
&SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
nodes[1].node.peer_connected(&last_random_pk, &msgs::Init {
- features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
+ features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+ }, true).unwrap();
nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg);
let events = nodes[1].node.get_and_clear_pending_events();
match events[0] {
}
_ => panic!("Unexpected event"),
}
- assert_eq!(get_err_msg!(nodes[1], last_random_pk).channel_id,
+ assert_eq!(get_err_msg(&nodes[1], &last_random_pk).channel_id,
open_channel_msg.temporary_channel_id);
// ...however if we accept the same channel 0conf it should work just fine.
get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, last_random_pk);
}
- #[cfg(anchors)]
+ #[test]
+ fn reject_excessively_underpaying_htlcs() {
+ let chanmon_cfg = create_chanmon_cfgs(1);
+ let node_cfg = create_node_cfgs(1, &chanmon_cfg);
+ let node_chanmgr = create_node_chanmgrs(1, &node_cfg, &[None]);
+ let node = create_network(1, &node_cfg, &node_chanmgr);
+ let sender_intended_amt_msat = 100;
+ let extra_fee_msat = 10;
+ let hop_data = msgs::OnionHopData {
+ amt_to_forward: 100,
+ outgoing_cltv_value: 42,
+ format: msgs::OnionHopDataFormat::FinalNode {
+ keysend_preimage: None,
+ payment_metadata: None,
+ payment_data: Some(msgs::FinalOnionHopData {
+ payment_secret: PaymentSecret([0; 32]), total_msat: sender_intended_amt_msat,
+ }),
+ }
+ };
+ // Check that if the amount we received + the penultimate hop extra fee is less than the sender
+ // intended amount, we fail the payment.
+ if let Err(crate::ln::channelmanager::ReceiveError { err_code, .. }) =
+ node[0].node.construct_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]),
+ sender_intended_amt_msat - extra_fee_msat - 1, 42, None, true, Some(extra_fee_msat))
+ {
+ assert_eq!(err_code, 19);
+ } else { panic!(); }
+
+ // If amt_received + extra_fee is equal to the sender intended amount, we're fine.
+ let hop_data = msgs::OnionHopData { // This is the same hop_data as above, OnionHopData doesn't implement Clone
+ amt_to_forward: 100,
+ outgoing_cltv_value: 42,
+ format: msgs::OnionHopDataFormat::FinalNode {
+ keysend_preimage: None,
+ payment_metadata: None,
+ payment_data: Some(msgs::FinalOnionHopData {
+ payment_secret: PaymentSecret([0; 32]), total_msat: sender_intended_amt_msat,
+ }),
+ }
+ };
+ assert!(node[0].node.construct_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]),
+ sender_intended_amt_msat - extra_fee_msat, 42, None, true, Some(extra_fee_msat)).is_ok());
+ }
+
+ #[test]
+ fn test_inbound_anchors_manual_acceptance() {
+ // Tests that we properly limit inbound channels when we have the manual-channel-acceptance
+ // flag set and (sometimes) accept channels as 0conf.
+ let mut anchors_cfg = test_default_channel_config();
+ anchors_cfg.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
+
+ let mut anchors_manual_accept_cfg = anchors_cfg.clone();
+ anchors_manual_accept_cfg.manually_accept_inbound_channels = true;
+
+ let chanmon_cfgs = create_chanmon_cfgs(3);
+ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs,
+ &[Some(anchors_cfg.clone()), Some(anchors_cfg.clone()), Some(anchors_manual_accept_cfg.clone())]);
+ let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+ nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
+ let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
+ assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
+ let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
+ match &msg_events[0] {
+ MessageSendEvent::HandleError { node_id, action } => {
+ assert_eq!(*node_id, nodes[0].node.get_our_node_id());
+ match action {
+ ErrorAction::SendErrorMessage { msg } =>
+ assert_eq!(msg.data, "No channels with anchor outputs accepted".to_owned()),
+ _ => panic!("Unexpected error action"),
+ }
+ }
+ _ => panic!("Unexpected event"),
+ }
+
+ nodes[2].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
+ let events = nodes[2].node.get_and_clear_pending_events();
+ match events[0] {
+ Event::OpenChannelRequest { temporary_channel_id, .. } =>
+ nodes[2].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 23).unwrap(),
+ _ => panic!("Unexpected event"),
+ }
+ get_event_msg!(nodes[2], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
+ }
+
#[test]
fn test_anchors_zero_fee_htlc_tx_fallback() {
// Tests that if both nodes support anchors, but the remote node does not want to accept
_ => panic!("Unexpected event"),
}
- let error_msg = get_err_msg!(nodes[1], nodes[0].node.get_our_node_id());
+ let error_msg = get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id());
nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &error_msg);
let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
}
+
+ #[test]
+ fn test_update_channel_config() {
+ let chanmon_cfg = create_chanmon_cfgs(2);
+ let node_cfg = create_node_cfgs(2, &chanmon_cfg);
+ let mut user_config = test_default_channel_config();
+ let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[Some(user_config), Some(user_config)]);
+ let nodes = create_network(2, &node_cfg, &node_chanmgr);
+ let _ = create_announced_chan_between_nodes(&nodes, 0, 1);
+ let channel = &nodes[0].node.list_channels()[0];
+
+ nodes[0].node.update_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &user_config.channel_config).unwrap();
+ let events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 0);
+
+ user_config.channel_config.forwarding_fee_base_msat += 10;
+ nodes[0].node.update_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &user_config.channel_config).unwrap();
+ assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_base_msat, user_config.channel_config.forwarding_fee_base_msat);
+ let events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ match &events[0] {
+ MessageSendEvent::BroadcastChannelUpdate { .. } => {},
+ _ => panic!("expected BroadcastChannelUpdate event"),
+ }
+
+ nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &ChannelConfigUpdate::default()).unwrap();
+ let events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 0);
+
+ let new_cltv_expiry_delta = user_config.channel_config.cltv_expiry_delta + 6;
+ nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &ChannelConfigUpdate {
+ cltv_expiry_delta: Some(new_cltv_expiry_delta),
+ ..Default::default()
+ }).unwrap();
+ assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().cltv_expiry_delta, new_cltv_expiry_delta);
+ let events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ match &events[0] {
+ MessageSendEvent::BroadcastChannelUpdate { .. } => {},
+ _ => panic!("expected BroadcastChannelUpdate event"),
+ }
+
+ let new_fee = user_config.channel_config.forwarding_fee_proportional_millionths + 100;
+ nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &ChannelConfigUpdate {
+ forwarding_fee_proportional_millionths: Some(new_fee),
+ ..Default::default()
+ }).unwrap();
+ assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().cltv_expiry_delta, new_cltv_expiry_delta);
+ assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_proportional_millionths, new_fee);
+ let events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ match &events[0] {
+ MessageSendEvent::BroadcastChannelUpdate { .. } => {},
+ _ => panic!("expected BroadcastChannelUpdate event"),
+ }
+ }
}
-#[cfg(all(any(test, feature = "_test_utils"), feature = "_bench_unstable"))]
+#[cfg(ldk_bench)]
pub mod bench {
use crate::chain::Listen;
use crate::chain::chainmonitor::{ChainMonitor, Persist};
- use crate::chain::keysinterface::{EntropySource, KeysManager, InMemorySigner};
- use crate::ln::channelmanager::{self, BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage, PaymentId};
+ use crate::sign::{KeysManager, InMemorySigner};
+ use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider};
+ use crate::ln::channelmanager::{BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage, PaymentId, RecipientOnionFields, Retry};
use crate::ln::functional_test_utils::*;
use crate::ln::msgs::{ChannelMessageHandler, Init};
use crate::routing::gossip::NetworkGraph;
- use crate::routing::router::{PaymentParameters, get_route};
+ use crate::routing::router::{PaymentParameters, RouteParameters};
use crate::util::test_utils;
- use crate::util::config::UserConfig;
- use crate::util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
+ use crate::util::config::{UserConfig, MaxDustHTLCExposure};
use bitcoin::hashes::Hash;
use bitcoin::hashes::sha256::Hash as Sha256;
use crate::sync::{Arc, Mutex};
- use test::Bencher;
+ use criterion::Criterion;
+
+ type Manager<'a, P> = ChannelManager<
+ &'a ChainMonitor<InMemorySigner, &'a test_utils::TestChainSource,
+ &'a test_utils::TestBroadcaster, &'a test_utils::TestFeeEstimator,
+ &'a test_utils::TestLogger, &'a P>,
+ &'a test_utils::TestBroadcaster, &'a KeysManager, &'a KeysManager, &'a KeysManager,
+ &'a test_utils::TestFeeEstimator, &'a test_utils::TestRouter<'a>,
+ &'a test_utils::TestLogger>;
- struct NodeHolder<'a, P: Persist<InMemorySigner>> {
- node: &'a ChannelManager<
- &'a ChainMonitor<InMemorySigner, &'a test_utils::TestChainSource,
- &'a test_utils::TestBroadcaster, &'a test_utils::TestFeeEstimator,
- &'a test_utils::TestLogger, &'a P>,
- &'a test_utils::TestBroadcaster, &'a KeysManager, &'a KeysManager, &'a KeysManager,
- &'a test_utils::TestFeeEstimator, &'a test_utils::TestRouter<'a>,
- &'a test_utils::TestLogger>,
+ struct ANodeHolder<'a, P: Persist<InMemorySigner>> {
+ node: &'a Manager<'a, P>,
+ }
+ impl<'a, P: Persist<InMemorySigner>> NodeHolder for ANodeHolder<'a, P> {
+ type CM = Manager<'a, P>;
+ #[inline]
+ fn node(&self) -> &Manager<'a, P> { self.node }
+ #[inline]
+ fn chain_monitor(&self) -> Option<&test_utils::TestChainMonitor> { None }
}
- #[cfg(test)]
- #[bench]
- fn bench_sends(bench: &mut Bencher) {
- bench_two_sends(bench, test_utils::TestPersister::new(), test_utils::TestPersister::new());
+ pub fn bench_sends(bench: &mut Criterion) {
+ bench_two_sends(bench, "bench_sends", test_utils::TestPersister::new(), test_utils::TestPersister::new());
}
- pub fn bench_two_sends<P: Persist<InMemorySigner>>(bench: &mut Bencher, persister_a: P, persister_b: P) {
+ pub fn bench_two_sends<P: Persist<InMemorySigner>>(bench: &mut Criterion, bench_name: &str, persister_a: P, persister_b: P) {
// Do a simple benchmark of sending a payment back and forth between two nodes.
// Note that this is unrealistic as each payment send will require at least two fsync
// calls per node.
let network = bitcoin::Network::Testnet;
+ let genesis_block = bitcoin::blockdata::constants::genesis_block(network);
- let tx_broadcaster = test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new()), blocks: Arc::new(Mutex::new(Vec::new()))};
+ let tx_broadcaster = test_utils::TestBroadcaster::new(network);
let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
let logger_a = test_utils::TestLogger::with_id("node a".to_owned());
let scorer = Mutex::new(test_utils::TestScorer::new());
let router = test_utils::TestRouter::new(Arc::new(NetworkGraph::new(network, &logger_a)), &scorer);
let mut config: UserConfig = Default::default();
+ config.channel_config.max_dust_htlc_exposure = MaxDustHTLCExposure::FeeRateMultiplier(5_000_000 / 253);
config.channel_handshake_config.minimum_depth = 1;
let chain_monitor_a = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_a);
let node_a = ChannelManager::new(&fee_estimator, &chain_monitor_a, &tx_broadcaster, &router, &logger_a, &keys_manager_a, &keys_manager_a, &keys_manager_a, config.clone(), ChainParameters {
network,
best_block: BestBlock::from_network(network),
- });
- let node_a_holder = NodeHolder { node: &node_a };
+ }, genesis_block.header.time);
+ let node_a_holder = ANodeHolder { node: &node_a };
let logger_b = test_utils::TestLogger::with_id("node a".to_owned());
let chain_monitor_b = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_b);
let node_b = ChannelManager::new(&fee_estimator, &chain_monitor_b, &tx_broadcaster, &router, &logger_b, &keys_manager_b, &keys_manager_b, &keys_manager_b, config.clone(), ChainParameters {
network,
best_block: BestBlock::from_network(network),
- });
- let node_b_holder = NodeHolder { node: &node_b };
-
- node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: node_b.init_features(), remote_network_address: None }, true).unwrap();
- node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: node_a.init_features(), remote_network_address: None }, false).unwrap();
+ }, genesis_block.header.time);
+ let node_b_holder = ANodeHolder { node: &node_b };
+
+ node_a.peer_connected(&node_b.get_our_node_id(), &Init {
+ features: node_b.init_features(), networks: None, remote_network_address: None
+ }, true).unwrap();
+ node_b.peer_connected(&node_a.get_our_node_id(), &Init {
+ features: node_a.init_features(), networks: None, remote_network_address: None
+ }, false).unwrap();
node_a.create_channel(node_b.get_our_node_id(), 8_000_000, 100_000_000, 42, None).unwrap();
node_b.handle_open_channel(&node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id()));
node_a.handle_accept_channel(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id()));
} else { panic!(); }
node_b.handle_funding_created(&node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendFundingCreated, node_b.get_our_node_id()));
+ let events_b = node_b.get_and_clear_pending_events();
+ assert_eq!(events_b.len(), 1);
+ match events_b[0] {
+ Event::ChannelPending{ ref counterparty_node_id, .. } => {
+ assert_eq!(*counterparty_node_id, node_a.get_our_node_id());
+ },
+ _ => panic!("Unexpected event"),
+ }
+
node_a.handle_funding_signed(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendFundingSigned, node_a.get_our_node_id()));
+ let events_a = node_a.get_and_clear_pending_events();
+ assert_eq!(events_a.len(), 1);
+ match events_a[0] {
+ Event::ChannelPending{ ref counterparty_node_id, .. } => {
+ assert_eq!(*counterparty_node_id, node_b.get_our_node_id());
+ },
+ _ => panic!("Unexpected event"),
+ }
assert_eq!(&tx_broadcaster.txn_broadcasted.lock().unwrap()[..], &[tx.clone()]);
- let block = Block {
- header: BlockHeader { version: 0x20000000, prev_blockhash: BestBlock::from_network(network).block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
- txdata: vec![tx],
- };
+ let block = create_dummy_block(BestBlock::from_network(network).block_hash(), 42, vec![tx]);
Listen::block_connected(&node_a, &block, 1);
Listen::block_connected(&node_b, &block, 1);
_ => panic!("Unexpected event"),
}
- let dummy_graph = NetworkGraph::new(network, &logger_a);
-
let mut payment_count: u64 = 0;
macro_rules! send_payment {
($node_a: expr, $node_b: expr) => {
- let usable_channels = $node_a.list_usable_channels();
let payment_params = PaymentParameters::from_node_id($node_b.get_our_node_id(), TEST_FINAL_CLTV)
- .with_features($node_b.invoice_features());
- let scorer = test_utils::TestScorer::new();
- let seed = [3u8; 32];
- let keys_manager = KeysManager::new(&seed, 42, 42);
- let random_seed_bytes = keys_manager.get_secure_random_bytes();
- let route = get_route(&$node_a.get_our_node_id(), &payment_params, &dummy_graph.read_only(),
- Some(&usable_channels.iter().map(|r| r).collect::<Vec<_>>()), 10_000, TEST_FINAL_CLTV, &logger_a, &scorer, &random_seed_bytes).unwrap();
-
+ .with_bolt11_features($node_b.invoice_features()).unwrap();
let mut payment_preimage = PaymentPreimage([0; 32]);
payment_preimage.0[0..8].copy_from_slice(&payment_count.to_le_bytes());
payment_count += 1;
let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner());
let payment_secret = $node_b.create_inbound_payment_for_hash(payment_hash, None, 7200, None).unwrap();
- $node_a.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap();
+ $node_a.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),
+ PaymentId(payment_hash.0), RouteParameters {
+ payment_params, final_value_msat: 10_000,
+ }, Retry::Attempts(0)).unwrap();
let payment_event = SendEvent::from_event($node_a.get_and_clear_pending_msg_events().pop().unwrap());
$node_b.handle_update_add_htlc(&$node_a.get_our_node_id(), &payment_event.msgs[0]);
$node_b.handle_commitment_signed(&$node_a.get_our_node_id(), &payment_event.commitment_msg);
- let (raa, cs) = get_revoke_commit_msgs!(NodeHolder { node: &$node_b }, $node_a.get_our_node_id());
+ let (raa, cs) = get_revoke_commit_msgs(&ANodeHolder { node: &$node_b }, &$node_a.get_our_node_id());
$node_a.handle_revoke_and_ack(&$node_b.get_our_node_id(), &raa);
$node_a.handle_commitment_signed(&$node_b.get_our_node_id(), &cs);
- $node_b.handle_revoke_and_ack(&$node_a.get_our_node_id(), &get_event_msg!(NodeHolder { node: &$node_a }, MessageSendEvent::SendRevokeAndACK, $node_b.get_our_node_id()));
+ $node_b.handle_revoke_and_ack(&$node_a.get_our_node_id(), &get_event_msg!(ANodeHolder { node: &$node_a }, MessageSendEvent::SendRevokeAndACK, $node_b.get_our_node_id()));
- expect_pending_htlcs_forwardable!(NodeHolder { node: &$node_b });
- expect_payment_claimable!(NodeHolder { node: &$node_b }, payment_hash, payment_secret, 10_000);
+ expect_pending_htlcs_forwardable!(ANodeHolder { node: &$node_b });
+ expect_payment_claimable!(ANodeHolder { node: &$node_b }, payment_hash, payment_secret, 10_000);
$node_b.claim_funds(payment_preimage);
- expect_payment_claimed!(NodeHolder { node: &$node_b }, payment_hash, 10_000);
+ expect_payment_claimed!(ANodeHolder { node: &$node_b }, payment_hash, 10_000);
match $node_b.get_and_clear_pending_msg_events().pop().unwrap() {
MessageSendEvent::UpdateHTLCs { node_id, updates } => {
_ => panic!("Failed to generate claim event"),
}
- let (raa, cs) = get_revoke_commit_msgs!(NodeHolder { node: &$node_a }, $node_b.get_our_node_id());
+ let (raa, cs) = get_revoke_commit_msgs(&ANodeHolder { node: &$node_a }, &$node_b.get_our_node_id());
$node_b.handle_revoke_and_ack(&$node_a.get_our_node_id(), &raa);
$node_b.handle_commitment_signed(&$node_a.get_our_node_id(), &cs);
- $node_a.handle_revoke_and_ack(&$node_b.get_our_node_id(), &get_event_msg!(NodeHolder { node: &$node_b }, MessageSendEvent::SendRevokeAndACK, $node_a.get_our_node_id()));
+ $node_a.handle_revoke_and_ack(&$node_b.get_our_node_id(), &get_event_msg!(ANodeHolder { node: &$node_b }, MessageSendEvent::SendRevokeAndACK, $node_a.get_our_node_id()));
- expect_payment_sent!(NodeHolder { node: &$node_a }, payment_preimage);
+ expect_payment_sent!(ANodeHolder { node: &$node_a }, payment_preimage);
}
}
- bench.iter(|| {
+ bench.bench_function(bench_name, |b| b.iter(|| {
send_payment!(node_a, node_b);
send_payment!(node_b, node_a);
- });
+ }));
}
}