use bitcoin::blockdata::block::BlockHeader;
use bitcoin::blockdata::transaction::Transaction;
-use bitcoin::blockdata::constants::genesis_block;
+use bitcoin::blockdata::constants::{genesis_block, ChainHash};
use bitcoin::network::constants::Network;
use bitcoin::hashes::Hash;
// Since this struct is returned in `list_channels` methods, expose it here in case users want to
// construct one themselves.
use crate::ln::{inbound_payment, PaymentHash, PaymentPreimage, PaymentSecret};
-use crate::ln::channel::{Channel, ChannelError, ChannelUpdateStatus, UpdateFulfillCommitFetch};
+use crate::ln::channel::{Channel, ChannelError, ChannelUpdateStatus, ShutdownResult, UpdateFulfillCommitFetch};
use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
#[cfg(any(feature = "_test_utils", test))]
use crate::ln::features::InvoiceFeatures;
use crate::routing::gossip::NetworkGraph;
-use crate::routing::router::{BlindedTail, DefaultRouter, InFlightHtlcs, Path, PaymentParameters, Route, RouteHop, RouteParameters, Router};
-use crate::routing::scoring::ProbabilisticScorer;
+use crate::routing::router::{BlindedTail, DefaultRouter, InFlightHtlcs, Path, Payee, PaymentParameters, Route, RouteHop, RouteParameters, Router};
+use crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters};
use crate::ln::msgs;
use crate::ln::onion_utils;
use crate::ln::onion_utils::HTLCFailReason;
use crate::ln::outbound_payment;
use crate::ln::outbound_payment::{OutboundPayments, PaymentAttempts, PendingOutboundPayment};
use crate::ln::wire::Encode;
-use crate::chain::keysinterface::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider, ChannelSigner, WriteableEcdsaChannelSigner};
+use crate::sign::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider, ChannelSigner, WriteableEcdsaChannelSigner};
use crate::util::config::{UserConfig, ChannelConfig};
use crate::util::wakers::{Future, Notifier};
use crate::util::scid_utils::fake_scid;
// Re-export this for use in the public API.
pub use crate::ln::outbound_payment::{PaymentSendFailure, Retry, RetryableSendFailure, RecipientOnionFields};
+use crate::ln::script::ShutdownScript;
// We hold various information about HTLC relay in the HTLC objects in Channel itself:
//
IncorrectOrUnknownPaymentDetails = 0x4000 | 15,
}
-type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>);
-
/// Error type returned across the peer_state mutex boundary. When an Err is generated for a
/// Channel, we generally end up with a ChannelError::Close for which we have to close the channel
/// immediately (ie with no further calls on it made). Thus, this step happens inside a
pending_claiming_payments: HashMap<PaymentHash, ClaimingPayment>,
}
-/// Events which we process internally but cannot be procsesed immediately at the generation site
-/// for some reason. They are handled in timer_tick_occurred, so may be processed with
-/// quite some time lag.
+/// Events which we process internally but cannot be processed immediately at the generation site
+/// usually because we're running pre-full-init. They are handled immediately once we detect we are
+/// running normally, and specifically must be processed before any other non-background
+/// [`ChannelMonitorUpdate`]s are applied.
enum BackgroundEvent {
- /// Handle a ChannelMonitorUpdate that closes a channel, broadcasting its current latest holder
- /// commitment transaction.
- ClosingMonitorUpdate((OutPoint, ChannelMonitorUpdate)),
+ /// Handle a ChannelMonitorUpdate which closes the channel. This is only separated from
+ /// [`Self::MonitorUpdateRegeneratedOnStartup`] as the maybe-non-closing variant needs a public
+ /// key to handle channel resumption, whereas if the channel has been force-closed we do not
+ /// need the counterparty node_id.
+ ///
+ /// Note that any such events are lost on shutdown, so in general they must be updates which
+ /// are regenerated on startup.
+ ClosingMonitorUpdateRegeneratedOnStartup((OutPoint, ChannelMonitorUpdate)),
+ /// Handle a ChannelMonitorUpdate which may or may not close the channel and may unblock the
+ /// channel to continue normal operation.
+ ///
+ /// In general this should be used rather than
+ /// [`Self::ClosingMonitorUpdateRegeneratedOnStartup`], however in cases where the
+ /// `counterparty_node_id` is not available as the channel has closed from a [`ChannelMonitor`]
+ /// error the other variant is acceptable.
+ ///
+ /// Note that any such events are lost on shutdown, so in general they must be updates which
+ /// are regenerated on startup.
+ MonitorUpdateRegeneratedOnStartup {
+ counterparty_node_id: PublicKey,
+ funding_txo: OutPoint,
+ update: ChannelMonitorUpdate
+ },
}
#[derive(Debug)]
/// this payment. Note that this is only best-effort. On restart it's possible such a duplicate
/// event can be generated.
PaymentClaimed { payment_hash: PaymentHash },
- /// Indicates an [`events::Event`] should be surfaced to the user.
- EmitEvent { event: events::Event },
+ /// Indicates an [`events::Event`] should be surfaced to the user and possibly resume the
+ /// operation of another channel.
+ ///
+ /// This is usually generated when we've forwarded an HTLC and want to block the outbound edge
+ /// from completing a monitor update which removes the payment preimage until the inbound edge
+ /// completes a monitor update containing the payment preimage. In that case, after the inbound
+ /// edge completes, we will surface an [`Event::PaymentForwarded`] as well as unblock the
+ /// outbound edge.
+ EmitEventAndFreeOtherChannel {
+ event: events::Event,
+ downstream_counterparty_and_funding_outpoint: Option<(PublicKey, OutPoint, RAAMonitorUpdateBlockingAction)>,
+ },
}
impl_writeable_tlv_based_enum_upgradable!(MonitorUpdateCompletionAction,
(0, PaymentClaimed) => { (0, payment_hash, required) },
- (2, EmitEvent) => { (0, event, upgradable_required) },
+ (2, EmitEventAndFreeOtherChannel) => {
+ (0, event, upgradable_required),
+ // LDK prior to 0.0.116 did not have this field as the monitor update application order was
+ // required by clients. If we downgrade to something prior to 0.0.116 this may result in
+ // monitor updates which aren't properly blocked or resumed, however that's fine - we don't
+ // support async monitor updates even in LDK 0.0.116 and once we do we'll require no
+ // downgrades to prior versions.
+ (1, downstream_counterparty_and_funding_outpoint, option),
+ },
+);
+
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub(crate) enum EventCompletionAction {
+ ReleaseRAAChannelMonitorUpdate {
+ counterparty_node_id: PublicKey,
+ channel_funding_outpoint: OutPoint,
+ },
+}
+impl_writeable_tlv_based_enum!(EventCompletionAction,
+ (0, ReleaseRAAChannelMonitorUpdate) => {
+ (0, channel_funding_outpoint, required),
+ (2, counterparty_node_id, required),
+ };
);
+#[derive(Clone, PartialEq, Eq, Debug)]
+/// If something is blocked on the completion of an RAA-generated [`ChannelMonitorUpdate`] we track
+/// the blocked action here. See enum variants for more info.
+pub(crate) enum RAAMonitorUpdateBlockingAction {
+ /// A forwarded payment was claimed. We block the downstream channel completing its monitor
+ /// update which removes the HTLC preimage until the upstream channel has gotten the preimage
+ /// durably to disk.
+ ForwardedPaymentInboundClaim {
+ /// The upstream channel ID (i.e. the inbound edge).
+ channel_id: [u8; 32],
+ /// The HTLC ID on the inbound edge.
+ htlc_id: u64,
+ },
+}
+
+impl RAAMonitorUpdateBlockingAction {
+ #[allow(unused)]
+ fn from_prev_hop_data(prev_hop: &HTLCPreviousHopData) -> Self {
+ Self::ForwardedPaymentInboundClaim {
+ channel_id: prev_hop.outpoint.to_channel_id(),
+ htlc_id: prev_hop.htlc_id,
+ }
+ }
+}
+
+impl_writeable_tlv_based_enum!(RAAMonitorUpdateBlockingAction,
+ (0, ForwardedPaymentInboundClaim) => { (0, channel_id, required), (2, htlc_id, required) }
+;);
+
+
/// State we hold per-peer.
pub(super) struct PeerState<Signer: ChannelSigner> {
/// `temporary_channel_id` or `channel_id` -> `channel`.
/// to funding appearing on-chain), the downstream `ChannelMonitor` set is required to ensure
/// duplicates do not occur, so such channels should fail without a monitor update completing.
monitor_update_blocked_actions: BTreeMap<[u8; 32], Vec<MonitorUpdateCompletionAction>>,
+ /// If another channel's [`ChannelMonitorUpdate`] needs to complete before a channel we have
+ /// with this peer can complete an RAA [`ChannelMonitorUpdate`] (e.g. because the RAA update
+ /// will remove a preimage that needs to be durably in an upstream channel first), we put an
+ /// entry here to note that the channel with the key's ID is blocked on a set of actions.
+ actions_blocking_raa_monitor_updates: BTreeMap<[u8; 32], Vec<RAAMonitorUpdateBlockingAction>>,
/// The peer is currently connected (i.e. we've seen a
/// [`ChannelMessageHandler::peer_connected`] and no corresponding
/// [`ChannelMessageHandler::peer_disconnected`].
Arc<DefaultRouter<
Arc<NetworkGraph<Arc<L>>>,
Arc<L>,
- Arc<Mutex<ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>>>
+ Arc<Mutex<ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>>>,
+ ProbabilisticScoringFeeParameters,
+ ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>,
>>,
Arc<L>
>;
/// of [`KeysManager`] and [`DefaultRouter`].
///
/// This is not exported to bindings users as Arcs don't make sense in bindings
-pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = ChannelManager<&'a M, &'b T, &'c KeysManager, &'c KeysManager, &'c KeysManager, &'d F, &'e DefaultRouter<&'f NetworkGraph<&'g L>, &'g L, &'h Mutex<ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>>, &'g L>;
+pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = ChannelManager<&'a M, &'b T, &'c KeysManager, &'c KeysManager, &'c KeysManager, &'d F, &'e DefaultRouter<&'f NetworkGraph<&'g L>, &'g L, &'h Mutex<ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>, ProbabilisticScoringFeeParameters, ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>, &'g L>;
+macro_rules! define_test_pub_trait { ($vis: vis) => {
/// A trivial trait which describes any [`ChannelManager`] used in testing.
-#[cfg(any(test, feature = "_test_utils"))]
-pub trait AChannelManager {
- type Watch: chain::Watch<Self::Signer>;
+$vis trait AChannelManager {
+ type Watch: chain::Watch<Self::Signer> + ?Sized;
type M: Deref<Target = Self::Watch>;
- type Broadcaster: BroadcasterInterface;
+ type Broadcaster: BroadcasterInterface + ?Sized;
type T: Deref<Target = Self::Broadcaster>;
- type EntropySource: EntropySource;
+ type EntropySource: EntropySource + ?Sized;
type ES: Deref<Target = Self::EntropySource>;
- type NodeSigner: NodeSigner;
+ type NodeSigner: NodeSigner + ?Sized;
type NS: Deref<Target = Self::NodeSigner>;
- type Signer: WriteableEcdsaChannelSigner;
- type SignerProvider: SignerProvider<Signer = Self::Signer>;
+ type Signer: WriteableEcdsaChannelSigner + Sized;
+ type SignerProvider: SignerProvider<Signer = Self::Signer> + ?Sized;
type SP: Deref<Target = Self::SignerProvider>;
- type FeeEstimator: FeeEstimator;
+ type FeeEstimator: FeeEstimator + ?Sized;
type F: Deref<Target = Self::FeeEstimator>;
- type Router: Router;
+ type Router: Router + ?Sized;
type R: Deref<Target = Self::Router>;
- type Logger: Logger;
+ type Logger: Logger + ?Sized;
type L: Deref<Target = Self::Logger>;
fn get_cm(&self) -> &ChannelManager<Self::M, Self::T, Self::ES, Self::NS, Self::SP, Self::F, Self::R, Self::L>;
}
+} }
#[cfg(any(test, feature = "_test_utils"))]
+define_test_pub_trait!(pub);
+#[cfg(not(any(test, feature = "_test_utils")))]
+define_test_pub_trait!(pub(crate));
impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> AChannelManager
for ChannelManager<M, T, ES, NS, SP, F, R, L>
where
- M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer> + Sized,
- T::Target: BroadcasterInterface + Sized,
- ES::Target: EntropySource + Sized,
- NS::Target: NodeSigner + Sized,
- SP::Target: SignerProvider + Sized,
- F::Target: FeeEstimator + Sized,
- R::Target: Router + Sized,
- L::Target: Logger + Sized,
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
+ T::Target: BroadcasterInterface,
+ ES::Target: EntropySource,
+ NS::Target: NodeSigner,
+ SP::Target: SignerProvider,
+ F::Target: FeeEstimator,
+ R::Target: Router,
+ L::Target: Logger,
{
type Watch = M::Target;
type M = M;
#[cfg(any(test, feature = "_test_utils"))]
pub(super) per_peer_state: FairRwLock<HashMap<PublicKey, Mutex<PeerState<<SP::Target as SignerProvider>::Signer>>>>,
+ /// The set of events which we need to give to the user to handle. In some cases an event may
+ /// require some further action after the user handles it (currently only blocking a monitor
+ /// update from being handed to the user to ensure the included changes to the channel state
+ /// are handled by the user before they're persisted durably to disk). In that case, the second
+ /// element in the tuple is set to `Some` with further details of the action.
+ ///
+ /// Note that events MUST NOT be removed from pending_events after deserialization, as they
+ /// could be in the middle of being processed without the direct mutex held.
+ ///
/// See `ChannelManager` struct-level documentation for lock order requirements.
- pending_events: Mutex<Vec<events::Event>>,
+ pending_events: Mutex<VecDeque<(events::Event, Option<EventCompletionAction>)>>,
/// A simple atomic flag to ensure only one task at a time can be processing events asynchronously.
pending_events_processor: AtomicBool,
+
+ /// If we are running during init (either directly during the deserialization method or in
+ /// block connection methods which run after deserialization but before normal operation) we
+ /// cannot provide the user with [`ChannelMonitorUpdate`]s through the normal update flow -
+ /// prior to normal operation the user may not have loaded the [`ChannelMonitor`]s into their
+ /// [`ChainMonitor`] and thus attempting to update it will fail or panic.
+ ///
+ /// Thus, we place them here to be handled as soon as possible once we are running normally.
+ ///
/// See `ChannelManager` struct-level documentation for lock order requirements.
+ ///
+ /// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor
pending_background_events: Mutex<Vec<BackgroundEvent>>,
/// Used when we have to take a BIG lock to make sure everything is self-consistent.
/// Essentially just when we're serializing ourselves out.
/// Notifier the lock contains sends out a notification when the lock is released.
total_consistency_lock: RwLock<()>,
+ #[cfg(debug_assertions)]
+ background_events_processed_since_startup: AtomicBool,
+
persistence_notifier: Notifier,
entropy_source: ES,
}
#[derive(Copy, Clone, PartialEq)]
+#[must_use]
enum NotifyOption {
DoPersist,
SkipPersist,
}
impl<'a> PersistenceNotifierGuard<'a, fn() -> NotifyOption> { // We don't care what the concrete F is here, it's unused
- fn notify_on_drop(lock: &'a RwLock<()>, notifier: &'a Notifier) -> PersistenceNotifierGuard<'a, impl Fn() -> NotifyOption> {
- PersistenceNotifierGuard::optionally_notify(lock, notifier, || -> NotifyOption { NotifyOption::DoPersist })
+ fn notify_on_drop<C: AChannelManager>(cm: &'a C) -> PersistenceNotifierGuard<'a, impl Fn() -> NotifyOption> {
+ let read_guard = cm.get_cm().total_consistency_lock.read().unwrap();
+ let _ = cm.get_cm().process_background_events(); // We always persist
+
+ PersistenceNotifierGuard {
+ persistence_notifier: &cm.get_cm().persistence_notifier,
+ should_persist: || -> NotifyOption { NotifyOption::DoPersist },
+ _read_guard: read_guard,
+ }
+
}
+ /// Note that if any [`ChannelMonitorUpdate`]s are possibly generated,
+ /// [`ChannelManager::process_background_events`] MUST be called first.
fn optionally_notify<F: Fn() -> NotifyOption>(lock: &'a RwLock<()>, notifier: &'a Notifier, persist_check: F) -> PersistenceNotifierGuard<'a, F> {
let read_guard = lock.read().unwrap();
/// Route hints used in constructing invoices for [phantom node payents].
///
-/// [phantom node payments]: crate::chain::keysinterface::PhantomKeysManager
+/// [phantom node payments]: crate::sign::PhantomKeysManager
#[derive(Clone)]
pub struct PhantomRouteHints {
/// The list of channels to be included in the invoice route hints.
});
}
if let Some((channel_id, user_channel_id)) = chan_id {
- $self.pending_events.lock().unwrap().push(events::Event::ChannelClosed {
+ $self.pending_events.lock().unwrap().push_back((events::Event::ChannelClosed {
channel_id, user_channel_id,
reason: ClosureReason::ProcessingError { err: err.err.clone() }
- });
+ }, None));
}
}
macro_rules! emit_channel_pending_event {
($locked_events: expr, $channel: expr) => {
if $channel.should_emit_channel_pending_event() {
- $locked_events.push(events::Event::ChannelPending {
+ $locked_events.push_back((events::Event::ChannelPending {
channel_id: $channel.channel_id(),
former_temporary_channel_id: $channel.temporary_channel_id(),
counterparty_node_id: $channel.get_counterparty_node_id(),
user_channel_id: $channel.get_user_id(),
funding_txo: $channel.get_funding_txo().unwrap().into_bitcoin_outpoint(),
- });
+ }, None));
$channel.set_channel_pending_event_emitted();
}
}
($locked_events: expr, $channel: expr) => {
if $channel.should_emit_channel_ready_event() {
debug_assert!($channel.channel_pending_event_emitted());
- $locked_events.push(events::Event::ChannelReady {
+ $locked_events.push_back((events::Event::ChannelReady {
channel_id: $channel.channel_id(),
user_channel_id: $channel.get_user_id(),
counterparty_node_id: $channel.get_counterparty_node_id(),
channel_type: $channel.get_channel_type().clone(),
- });
+ }, None));
$channel.set_channel_ready_event_emitted();
}
}
// update_maps_on_chan_removal needs to be able to take id_to_peer, so make sure we can in
// any case so that it won't deadlock.
debug_assert_ne!($self.id_to_peer.held_by_thread(), LockHeldState::HeldByThread);
+ #[cfg(debug_assertions)] {
+ debug_assert!($self.background_events_processed_since_startup.load(Ordering::Acquire));
+ }
match $update_res {
ChannelMonitorUpdateStatus::InProgress => {
log_debug!($self.logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.",
res
},
ChannelMonitorUpdateStatus::Completed => {
- if ($update_id == 0 || $chan.get_next_monitor_update()
- .expect("We can't be processing a monitor update if it isn't queued")
- .update_id == $update_id) &&
- $chan.get_latest_monitor_update_id() == $update_id
- {
+ $chan.complete_one_mon_update($update_id);
+ if $chan.no_monitor_updates_pending() {
handle_monitor_update_completion!($self, $update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan);
}
Ok(())
// persists happen while processing monitor events.
let _read_guard = $self.total_consistency_lock.read().unwrap();
+ // Because `handle_post_event_actions` may send `ChannelMonitorUpdate`s to the user we must
+ // ensure any startup-generated background events are handled first.
+ if $self.process_background_events() == NotifyOption::DoPersist { result = NotifyOption::DoPersist; }
+
// TODO: This behavior should be documented. It's unintuitive that we query
// ChannelMonitors when clearing other events.
if $self.process_pending_monitor_events() {
result = NotifyOption::DoPersist;
}
- for event in pending_events {
+ let mut post_event_actions = Vec::new();
+
+ for (event, action_opt) in pending_events {
$event_to_handle = event;
$handle_event;
+ if let Some(action) = action_opt {
+ post_event_actions.push(action);
+ }
}
{
$self.pending_events_processor.store(false, Ordering::Release);
}
+ if !post_event_actions.is_empty() {
+ $self.handle_post_event_actions(post_event_actions);
+ // If we had some actions, go around again as we may have more events now
+ processed_all_events = false;
+ }
+
if result == NotifyOption::DoPersist {
$self.persistence_notifier.notify();
}
per_peer_state: FairRwLock::new(HashMap::new()),
- pending_events: Mutex::new(Vec::new()),
+ pending_events: Mutex::new(VecDeque::new()),
pending_events_processor: AtomicBool::new(false),
pending_background_events: Mutex::new(Vec::new()),
total_consistency_lock: RwLock::new(()),
+ #[cfg(debug_assertions)]
+ background_events_processed_since_startup: AtomicBool::new(false),
persistence_notifier: Notifier::new(),
entropy_source,
return Err(APIError::APIMisuseError { err: format!("Channel value must be at least 1000 satoshis. It was {}", channel_value_satoshis) });
}
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
// We want to make sure the lock is actually acquired by PersistenceNotifierGuard.
debug_assert!(&self.total_consistency_lock.try_write().is_err());
let mut pending_events_lock = self.pending_events.lock().unwrap();
match channel.unbroadcasted_funding() {
Some(transaction) => {
- pending_events_lock.push(events::Event::DiscardFunding { channel_id: channel.channel_id(), transaction })
+ pending_events_lock.push_back((events::Event::DiscardFunding {
+ channel_id: channel.channel_id(), transaction
+ }, None));
},
None => {},
}
- pending_events_lock.push(events::Event::ChannelClosed {
+ pending_events_lock.push_back((events::Event::ChannelClosed {
channel_id: channel.channel_id(),
user_channel_id: channel.get_user_id(),
reason: closure_reason
- });
+ }, None));
}
- fn close_channel_internal(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>) -> Result<(), APIError> {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ fn close_channel_internal(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>, override_shutdown_script: Option<ShutdownScript>) -> Result<(), APIError> {
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>;
let result: Result<(), _> = loop {
let funding_txo_opt = chan_entry.get().get_funding_txo();
let their_features = &peer_state.latest_features;
let (shutdown_msg, mut monitor_update_opt, htlcs) = chan_entry.get_mut()
- .get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight)?;
+ .get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
failed_htlcs = htlcs;
// We can send the `shutdown` message before updating the `ChannelMonitor`
/// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal
/// [`SendShutdown`]: crate::events::MessageSendEvent::SendShutdown
pub fn close_channel(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey) -> Result<(), APIError> {
- self.close_channel_internal(channel_id, counterparty_node_id, None)
+ self.close_channel_internal(channel_id, counterparty_node_id, None, None)
}
/// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs
/// transaction feerate below `target_feerate_sat_per_1000_weight` (or the feerate which
/// will appear on a force-closure transaction, whichever is lower).
///
+ /// The `shutdown_script` provided will be used as the `scriptPubKey` for the closing transaction.
+ /// Will fail if a shutdown script has already been set for this channel by
+ /// ['ChannelHandshakeConfig::commit_upfront_shutdown_pubkey`]. The given shutdown script must
+ /// also be compatible with our and the counterparty's features.
+ ///
/// May generate a [`SendShutdown`] message event on success, which should be relayed.
///
/// Raises [`APIError::ChannelUnavailable`] if the channel cannot be closed due to failing to
/// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background
/// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal
/// [`SendShutdown`]: crate::events::MessageSendEvent::SendShutdown
- pub fn close_channel_with_target_feerate(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: u32) -> Result<(), APIError> {
- self.close_channel_internal(channel_id, counterparty_node_id, Some(target_feerate_sats_per_1000_weight))
+ pub fn close_channel_with_feerate_and_script(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>, shutdown_script: Option<ShutdownScript>) -> Result<(), APIError> {
+ self.close_channel_internal(channel_id, counterparty_node_id, target_feerate_sats_per_1000_weight, shutdown_script)
}
#[inline]
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
}
- if let Some((funding_txo, monitor_update)) = monitor_update_option {
+ if let Some((_, funding_txo, monitor_update)) = monitor_update_option {
// There isn't anything we can do if we get an update failure - we're already
// force-closing. The monitor update on the required in-memory copy should broadcast
// the latest local state, which is the best we can do anyway. Thus, it is safe to
}
fn force_close_sending_error(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, broadcast: bool) -> Result<(), APIError> {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
match self.force_close_channel_with_peer(channel_id, counterparty_node_id, None, broadcast) {
Ok(counterparty_node_id) => {
let per_peer_state = self.per_peer_state.read().unwrap();
let onion_keys = onion_utils::construct_onion_keys(&self.secp_ctx, &path, &session_priv)
.map_err(|_| APIError::InvalidRoute{err: "Pubkey along hop was maliciously selected".to_owned()})?;
let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(path, total_value, recipient_onion, cur_height, keysend_preimage)?;
- if onion_utils::route_size_insane(&onion_payloads) {
- return Err(APIError::InvalidRoute{err: "Route size too large considering onion data".to_owned()});
- }
- let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, payment_hash);
+
+ let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, payment_hash)
+ .map_err(|_| APIError::InvalidRoute { err: "Route size too large considering onion data".to_owned()})?;
let err: Result<(), _> = loop {
let (counterparty_node_id, id) = match self.short_to_chan_info.read().unwrap().get(&path.hops.first().unwrap().short_channel_id) {
/// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
pub fn send_payment_with_route(&self, route: &Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId) -> Result<(), PaymentSendFailure> {
let best_block_height = self.best_block.read().unwrap().height();
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.pending_outbound_payments
.send_payment_with_route(route, payment_hash, recipient_onion, payment_id, &self.entropy_source, &self.node_signer, best_block_height,
|path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv|
self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv))
}
- /// Similar to [`ChannelManager::send_payment`], but will automatically find a route based on
+ /// Similar to [`ChannelManager::send_payment_with_route`], but will automatically find a route based on
/// `route_params` and retry failed payment paths based on `retry_strategy`.
pub fn send_payment(&self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result<(), RetryableSendFailure> {
let best_block_height = self.best_block.read().unwrap().height();
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.pending_outbound_payments
.send_payment(payment_hash, recipient_onion, payment_id, retry_strategy, route_params,
&self.router, self.list_usable_channels(), || self.compute_inflight_htlcs(),
#[cfg(test)]
pub(super) fn test_send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, keysend_preimage: Option<PaymentPreimage>, payment_id: PaymentId, recv_value_msat: Option<u64>, onion_session_privs: Vec<[u8; 32]>) -> Result<(), PaymentSendFailure> {
let best_block_height = self.best_block.read().unwrap().height();
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.pending_outbound_payments.test_send_payment_internal(route, payment_hash, recipient_onion, keysend_preimage, payment_id, recv_value_msat, onion_session_privs, &self.node_signer, best_block_height,
|path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv|
self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv))
/// [`Event::PaymentFailed`]: events::Event::PaymentFailed
/// [`Event::PaymentSent`]: events::Event::PaymentSent
pub fn abandon_payment(&self, payment_id: PaymentId) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.pending_outbound_payments.abandon_payment(payment_id, PaymentFailureReason::UserAbandoned, &self.pending_events);
}
/// [`send_payment`]: Self::send_payment
pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option<PaymentPreimage>, recipient_onion: RecipientOnionFields, payment_id: PaymentId) -> Result<PaymentHash, PaymentSendFailure> {
let best_block_height = self.best_block.read().unwrap().height();
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.pending_outbound_payments.send_spontaneous_payment_with_route(
route, payment_preimage, recipient_onion, payment_id, &self.entropy_source,
&self.node_signer, best_block_height,
/// [`PaymentParameters::for_keysend`]: crate::routing::router::PaymentParameters::for_keysend
pub fn send_spontaneous_payment_with_retry(&self, payment_preimage: Option<PaymentPreimage>, recipient_onion: RecipientOnionFields, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result<PaymentHash, RetryableSendFailure> {
let best_block_height = self.best_block.read().unwrap().height();
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.pending_outbound_payments.send_spontaneous_payment(payment_preimage, recipient_onion,
payment_id, retry_strategy, route_params, &self.router, self.list_usable_channels(),
|| self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height,
/// us to easily discern them from real payments.
pub fn send_probe(&self, path: Path) -> Result<(PaymentHash, PaymentId), PaymentSendFailure> {
let best_block_height = self.best_block.read().unwrap().height();
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.pending_outbound_payments.send_probe(path, self.probing_cookie_secret, &self.entropy_source, &self.node_signer, best_block_height,
|path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv|
self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv))
/// [`Event::FundingGenerationReady`]: crate::events::Event::FundingGenerationReady
/// [`Event::ChannelClosed`]: crate::events::Event::ChannelClosed
pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, funding_transaction: Transaction) -> Result<(), APIError> {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
for inp in funding_transaction.input.iter() {
if inp.witness.is_empty() {
}
}
self.funding_transaction_generated_intern(temporary_channel_id, counterparty_node_id, funding_transaction, |chan, tx| {
+ if tx.output.len() > u16::max_value() as usize {
+ return Err(APIError::APIMisuseError {
+ err: "Transaction had more than 2^16 outputs, which is not supported".to_owned()
+ });
+ }
+
let mut output_index = None;
let expected_spk = chan.get_funding_redeemscript().to_v0_p2wsh();
for (idx, outp) in tx.output.iter().enumerate() {
err: "Multiple outputs matched the expected script and value".to_owned()
});
}
- if idx > u16::max_value() as usize {
- return Err(APIError::APIMisuseError {
- err: "Transaction had more than 2^16 outputs, which is not supported".to_owned()
- });
- }
output_index = Some(idx as u16);
}
}
});
}
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(
- &self.total_consistency_lock, &self.persistence_notifier,
- );
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
// TODO: when we move to deciding the best outbound channel at forward time, only take
// `next_node_id` and not `next_hop_channel_id`
pub fn forward_intercepted_htlc(&self, intercept_id: InterceptId, next_hop_channel_id: &[u8; 32], next_node_id: PublicKey, amt_to_forward_msat: u64) -> Result<(), APIError> {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let next_hop_scid = {
let peer_state_lock = self.per_peer_state.read().unwrap();
///
/// [`HTLCIntercepted`]: events::Event::HTLCIntercepted
pub fn fail_intercepted_htlc(&self, intercept_id: InterceptId) -> Result<(), APIError> {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let payment = self.pending_intercepted_htlcs.lock().unwrap().remove(&intercept_id)
.ok_or_else(|| APIError::APIMisuseError {
/// Should only really ever be called in response to a PendingHTLCsForwardable event.
/// Will likely generate further events.
pub fn process_pending_htlc_forwards(&self) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
- let mut new_events = Vec::new();
+ let mut new_events = VecDeque::new();
let mut failed_forwards = Vec::new();
let mut phantom_receives: Vec<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new();
{
htlcs.push(claimable_htlc);
let amount_msat = htlcs.iter().map(|htlc| htlc.value).sum();
htlcs.iter_mut().for_each(|htlc| htlc.total_value_received = Some(amount_msat));
- new_events.push(events::Event::PaymentClaimable {
+ new_events.push_back((events::Event::PaymentClaimable {
receiver_node_id: Some(receiver_node_id),
payment_hash,
purpose: purpose(),
via_user_channel_id: Some(prev_user_channel_id),
claim_deadline: Some(earliest_expiry - HTLC_FAIL_BACK_BUFFER),
onion_fields: claimable_payment.onion_fields.clone(),
- });
+ }, None));
payment_claimable_generated = true;
} else {
// Nothing to do - we haven't reached the total
htlcs: vec![claimable_htlc],
});
let prev_channel_id = prev_funding_outpoint.to_channel_id();
- new_events.push(events::Event::PaymentClaimable {
+ new_events.push_back((events::Event::PaymentClaimable {
receiver_node_id: Some(receiver_node_id),
payment_hash,
amount_msat,
via_user_channel_id: Some(prev_user_channel_id),
claim_deadline,
onion_fields: Some(onion_fields),
- });
+ }, None));
},
hash_map::Entry::Occupied(_) => {
log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} for a duplicative payment hash", log_bytes!(payment_hash.0));
events.append(&mut new_events);
}
- /// Free the background events, generally called from timer_tick_occurred.
- ///
- /// Exposed for testing to allow us to process events quickly without generating accidental
- /// BroadcastChannelUpdate events in timer_tick_occurred.
+ /// Free the background events, generally called from [`PersistenceNotifierGuard`] constructors.
///
/// Expects the caller to have a total_consistency_lock read lock.
- fn process_background_events(&self) -> bool {
+ fn process_background_events(&self) -> NotifyOption {
+ debug_assert_ne!(self.total_consistency_lock.held_by_thread(), LockHeldState::NotHeldByThread);
+
+ #[cfg(debug_assertions)]
+ self.background_events_processed_since_startup.store(true, Ordering::Release);
+
let mut background_events = Vec::new();
mem::swap(&mut *self.pending_background_events.lock().unwrap(), &mut background_events);
if background_events.is_empty() {
- return false;
+ return NotifyOption::SkipPersist;
}
for event in background_events.drain(..) {
match event {
- BackgroundEvent::ClosingMonitorUpdate((funding_txo, update)) => {
+ BackgroundEvent::ClosingMonitorUpdateRegeneratedOnStartup((funding_txo, update)) => {
// The channel has already been closed, so no use bothering to care about the
// monitor updating completing.
let _ = self.chain_monitor.update_channel(funding_txo, &update);
},
+ BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, update } => {
+ let update_res = self.chain_monitor.update_channel(funding_txo, &update);
+
+ let res = {
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ match peer_state.channel_by_id.entry(funding_txo.to_channel_id()) {
+ hash_map::Entry::Occupied(mut chan) => {
+ handle_new_monitor_update!(self, update_res, update.update_id, peer_state_lock, peer_state, per_peer_state, chan)
+ },
+ hash_map::Entry::Vacant(_) => Ok(()),
+ }
+ } else { Ok(()) }
+ };
+ // TODO: If this channel has since closed, we're likely providing a payment
+ // preimage update, which we must ensure is durable! We currently don't,
+ // however, ensure that.
+ if res.is_err() {
+ log_error!(self.logger,
+ "Failed to provide ChannelMonitorUpdate to closed channel! This likely lost us a payment preimage!");
+ }
+ let _ = handle_error!(self, res, counterparty_node_id);
+ },
}
}
- true
+ NotifyOption::DoPersist
}
#[cfg(any(test, feature = "_test_utils"))]
/// Process background events, for functional testing
pub fn test_process_background_events(&self) {
- self.process_background_events();
+ let _lck = self.total_consistency_lock.read().unwrap();
+ let _ = self.process_background_events();
}
fn update_channel_fee(&self, chan_id: &[u8; 32], chan: &mut Channel<<SP::Target as SignerProvider>::Signer>, new_feerate: u32) -> NotifyOption {
/// it wants to detect). Thus, we have a variant exposed here for its benefit.
pub fn maybe_update_chan_fees(&self) {
PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
- let mut should_persist = NotifyOption::SkipPersist;
+ let mut should_persist = self.process_background_events();
let new_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
/// [`ChannelConfig`]: crate::util::config::ChannelConfig
pub fn timer_tick_occurred(&self) {
PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
- let mut should_persist = NotifyOption::SkipPersist;
- if self.process_background_events() { should_persist = NotifyOption::DoPersist; }
+ let mut should_persist = self.process_background_events();
let new_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
chan.maybe_expire_prev_config();
+ if chan.should_disconnect_peer_awaiting_response() {
+ log_debug!(self.logger, "Disconnecting peer {} due to not making any progress on channel {}",
+ counterparty_node_id, log_bytes!(*chan_id));
+ pending_msg_events.push(MessageSendEvent::HandleError {
+ node_id: counterparty_node_id,
+ action: msgs::ErrorAction::DisconnectPeerWithWarning {
+ msg: msgs::WarningMessage {
+ channel_id: *chan_id,
+ data: "Disconnecting due to timeout awaiting response".to_owned(),
+ },
+ },
+ });
+ }
+
true
});
if peer_state.ok_to_remove(true) {
///
/// See [`FailureCode`] for valid failure codes.
pub fn fail_htlc_backwards_with_reason(&self, payment_hash: &PaymentHash, failure_code: FailureCode) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let removed_source = self.claimable_payments.lock().unwrap().claimable_payments.remove(payment_hash);
if let Some(payment) = removed_source {
mem::drop(forward_htlcs);
if push_forward_ev { self.push_pending_forwards_ev(); }
let mut pending_events = self.pending_events.lock().unwrap();
- pending_events.push(events::Event::HTLCHandlingFailed {
+ pending_events.push_back((events::Event::HTLCHandlingFailed {
prev_channel_id: outpoint.to_channel_id(),
failed_next_destination: destination,
- });
+ }, None));
},
}
}
pub fn claim_funds(&self, payment_preimage: PaymentPreimage) {
let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let mut sources = {
let mut claimable_payments = self.claimable_payments.lock().unwrap();
Some(claimed_htlc_value - forwarded_htlc_value)
} else { None };
- let prev_channel_id = Some(prev_outpoint.to_channel_id());
- let next_channel_id = Some(next_channel_id);
-
- Some(MonitorUpdateCompletionAction::EmitEvent { event: events::Event::PaymentForwarded {
- fee_earned_msat,
- claim_from_onchain_tx: from_onchain,
- prev_channel_id,
- next_channel_id,
- outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
- }})
+ Some(MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
+ event: events::Event::PaymentForwarded {
+ fee_earned_msat,
+ claim_from_onchain_tx: from_onchain,
+ prev_channel_id: Some(prev_outpoint.to_channel_id()),
+ next_channel_id: Some(next_channel_id),
+ outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
+ },
+ downstream_counterparty_and_funding_outpoint: None,
+ })
} else { None }
});
if let Err((pk, err)) = res {
MonitorUpdateCompletionAction::PaymentClaimed { payment_hash } => {
let payment = self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
if let Some(ClaimingPayment { amount_msat, payment_purpose: purpose, receiver_node_id }) = payment {
- self.pending_events.lock().unwrap().push(events::Event::PaymentClaimed {
+ self.pending_events.lock().unwrap().push_back((events::Event::PaymentClaimed {
payment_hash, purpose, amount_msat, receiver_node_id: Some(receiver_node_id),
- });
+ }, None));
}
},
- MonitorUpdateCompletionAction::EmitEvent { event } => {
- self.pending_events.lock().unwrap().push(event);
+ MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
+ event, downstream_counterparty_and_funding_outpoint
+ } => {
+ self.pending_events.lock().unwrap().push_back((event, None));
+ if let Some((node_id, funding_outpoint, blocker)) = downstream_counterparty_and_funding_outpoint {
+ self.handle_monitor_update_release(node_id, funding_outpoint, Some(blocker));
+ }
},
}
}
if let Some(tx) = funding_broadcastable {
log_info!(self.logger, "Broadcasting funding transaction with txid {}", tx.txid());
- self.tx_broadcaster.broadcast_transaction(&tx);
+ self.tx_broadcaster.broadcast_transactions(&[&tx]);
}
{
}
fn do_accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, accept_0conf: bool, user_channel_id: u128) -> Result<(), APIError> {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let peers_without_funded_channels = self.peers_without_funded_channels(|peer| !peer.channel_by_id.is_empty());
let per_peer_state = self.per_peer_state.read().unwrap();
});
} else {
let mut pending_events = self.pending_events.lock().unwrap();
- pending_events.push(
- events::Event::OpenChannelRequest {
- temporary_channel_id: msg.temporary_channel_id.clone(),
- counterparty_node_id: counterparty_node_id.clone(),
- funding_satoshis: msg.funding_satoshis,
- push_msat: msg.push_msat,
- channel_type: channel.get_channel_type().clone(),
- }
- );
+ pending_events.push_back((events::Event::OpenChannelRequest {
+ temporary_channel_id: msg.temporary_channel_id.clone(),
+ counterparty_node_id: counterparty_node_id.clone(),
+ funding_satoshis: msg.funding_satoshis,
+ push_msat: msg.push_msat,
+ channel_type: channel.get_channel_type().clone(),
+ }, None));
}
entry.insert(channel);
}
};
let mut pending_events = self.pending_events.lock().unwrap();
- pending_events.push(events::Event::FundingGenerationReady {
+ pending_events.push_back((events::Event::FundingGenerationReady {
temporary_channel_id: msg.temporary_channel_id,
counterparty_node_id: *counterparty_node_id,
channel_value_satoshis: value,
output_script,
user_channel_id: user_id,
- });
+ }, None));
Ok(())
}
};
if let Some(broadcast_tx) = tx {
log_info!(self.logger, "Broadcasting {}", log_tx!(broadcast_tx));
- self.tx_broadcaster.broadcast_transaction(&broadcast_tx);
+ self.tx_broadcaster.broadcast_transactions(&[&broadcast_tx]);
}
if let Some(chan) = chan_option {
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan) => {
let funding_txo = chan.get().get_funding_txo();
- let monitor_update = try_chan_entry!(self, chan.get_mut().commitment_signed(&msg, &self.logger), chan);
- let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), monitor_update);
- let update_id = monitor_update.update_id;
- handle_new_monitor_update!(self, update_res, update_id, peer_state_lock,
- peer_state, per_peer_state, chan)
+ let monitor_update_opt = try_chan_entry!(self, chan.get_mut().commitment_signed(&msg, &self.logger), chan);
+ if let Some(monitor_update) = monitor_update_opt {
+ let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), monitor_update);
+ let update_id = monitor_update.update_id;
+ handle_new_monitor_update!(self, update_res, update_id, peer_state_lock,
+ peer_state, per_peer_state, chan)
+ } else { Ok(()) }
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)]) {
for &mut (prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, ref mut pending_forwards) in per_source_pending_forwards {
let mut push_forward_event = false;
- let mut new_intercept_events = Vec::new();
+ let mut new_intercept_events = VecDeque::new();
let mut failed_intercept_forwards = Vec::new();
if !pending_forwards.is_empty() {
for (forward_info, prev_htlc_id) in pending_forwards.drain(..) {
let mut pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap();
match pending_intercepts.entry(intercept_id) {
hash_map::Entry::Vacant(entry) => {
- new_intercept_events.push(events::Event::HTLCIntercepted {
+ new_intercept_events.push_back((events::Event::HTLCIntercepted {
requested_next_hop_scid: scid,
payment_hash: forward_info.payment_hash,
inbound_amount_msat: forward_info.incoming_amt_msat.unwrap(),
expected_outbound_amount_msat: forward_info.outgoing_amt_msat,
intercept_id
- });
+ }, None));
entry.insert(PendingAddHTLCInfo {
prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info });
},
fn push_pending_forwards_ev(&self) {
let mut pending_events = self.pending_events.lock().unwrap();
let forward_ev_exists = pending_events.iter()
- .find(|ev| if let events::Event::PendingHTLCsForwardable { .. } = ev { true } else { false })
+ .find(|(ev, _)| if let events::Event::PendingHTLCsForwardable { .. } = ev { true } else { false })
.is_some();
if !forward_ev_exists {
- pending_events.push(events::Event::PendingHTLCsForwardable {
+ pending_events.push_back((events::Event::PendingHTLCsForwardable {
time_forwardable:
Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS),
- });
- }
+ }, None));
+ }
+ }
+
+ /// Checks whether [`ChannelMonitorUpdate`]s generated by the receipt of a remote
+ /// [`msgs::RevokeAndACK`] should be held for the given channel until some other event
+ /// completes. Note that this needs to happen in the same [`PeerState`] mutex as any release of
+ /// the [`ChannelMonitorUpdate`] in question.
+ fn raa_monitor_updates_held(&self,
+ actions_blocking_raa_monitor_updates: &BTreeMap<[u8; 32], Vec<RAAMonitorUpdateBlockingAction>>,
+ channel_funding_outpoint: OutPoint, counterparty_node_id: PublicKey
+ ) -> bool {
+ actions_blocking_raa_monitor_updates
+ .get(&channel_funding_outpoint.to_channel_id()).map(|v| !v.is_empty()).unwrap_or(false)
+ || self.pending_events.lock().unwrap().iter().any(|(_, action)| {
+ action == &Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
+ channel_funding_outpoint,
+ counterparty_node_id,
+ })
+ })
}
fn internal_revoke_and_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), MsgHandleErrInternal> {
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan) => {
let funding_txo = chan.get().get_funding_txo();
- let (htlcs_to_fail, monitor_update) = try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.logger), chan);
- let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), monitor_update);
- let update_id = monitor_update.update_id;
- let res = handle_new_monitor_update!(self, update_res, update_id,
- peer_state_lock, peer_state, per_peer_state, chan);
+ let (htlcs_to_fail, monitor_update_opt) = try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.logger), chan);
+ let res = if let Some(monitor_update) = monitor_update_opt {
+ let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), monitor_update);
+ let update_id = monitor_update.update_id;
+ handle_new_monitor_update!(self, update_res, update_id,
+ peer_state_lock, peer_state, per_peer_state, chan)
+ } else { Ok(()) };
(htlcs_to_fail, res)
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
/// update events as a separate process method here.
#[cfg(fuzzing)]
pub fn process_monitor_events(&self) {
- PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
- if self.process_pending_monitor_events() {
- NotifyOption::DoPersist
- } else {
- NotifyOption::SkipPersist
- }
- });
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
+ self.process_pending_monitor_events();
}
/// Check the holding cell in each channel and free any pending HTLCs in them if possible.
self.issue_channel_close_events(chan, ClosureReason::CooperativeClosure);
log_info!(self.logger, "Broadcasting {}", log_tx!(tx));
- self.tx_broadcaster.broadcast_transaction(&tx);
+ self.tx_broadcaster.broadcast_transactions(&[&tx]);
update_maps_on_chan_removal!(self, chan);
false
} else { true }
// Channel::force_shutdown tries to make us do) as we may still be in initialization,
// so we track the update internally and handle it when the user next calls
// timer_tick_occurred, guaranteeing we're running normally.
- if let Some((funding_txo, update)) = failure.0.take() {
+ if let Some((counterparty_node_id, funding_txo, update)) = failure.0.take() {
assert_eq!(update.updates.len(), 1);
if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] {
assert!(should_broadcast);
} else { unreachable!(); }
- self.pending_background_events.lock().unwrap().push(BackgroundEvent::ClosingMonitorUpdate((funding_txo, update)));
+ self.pending_background_events.lock().unwrap().push(
+ BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
+ counterparty_node_id, funding_txo, update
+ });
}
self.finish_force_close_channel(failure);
}
let payment_secret = PaymentSecret(self.entropy_source.get_secure_random_bytes());
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let mut payment_secrets = self.pending_inbound_payments.lock().unwrap();
match payment_secrets.entry(payment_hash) {
hash_map::Entry::Vacant(e) => {
/// Gets a fake short channel id for use in receiving [phantom node payments]. These fake scids
/// are used when constructing the phantom invoice's route hints.
///
- /// [phantom node payments]: crate::chain::keysinterface::PhantomKeysManager
+ /// [phantom node payments]: crate::sign::PhantomKeysManager
pub fn get_phantom_scid(&self) -> u64 {
let best_block_height = self.best_block.read().unwrap().height();
let short_to_chan_info = self.short_to_chan_info.read().unwrap();
/// Gets route hints for use in receiving [phantom node payments].
///
- /// [phantom node payments]: crate::chain::keysinterface::PhantomKeysManager
+ /// [phantom node payments]: crate::sign::PhantomKeysManager
pub fn get_phantom_route_hints(&self) -> PhantomRouteHints {
PhantomRouteHints {
channels: self.list_usable_channels(),
#[cfg(feature = "_test_utils")]
pub fn push_pending_event(&self, event: events::Event) {
let mut events = self.pending_events.lock().unwrap();
- events.push(event);
+ events.push_back((event, None));
}
#[cfg(test)]
pub fn pop_pending_event(&self) -> Option<events::Event> {
let mut events = self.pending_events.lock().unwrap();
- if events.is_empty() { None } else { Some(events.remove(0)) }
+ events.pop_front().map(|(e, _)| e)
}
#[cfg(test)]
self.pending_outbound_payments.clear_pending_payments()
}
+ /// When something which was blocking a channel from updating its [`ChannelMonitor`] (e.g. an
+ /// [`Event`] being handled) completes, this should be called to restore the channel to normal
+ /// operation. It will double-check that nothing *else* is also blocking the same channel from
+ /// making progress and then any blocked [`ChannelMonitorUpdate`]s fly.
+ fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey, channel_funding_outpoint: OutPoint, mut completed_blocker: Option<RAAMonitorUpdateBlockingAction>) {
+ let mut errors = Vec::new();
+ loop {
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) {
+ let mut peer_state_lck = peer_state_mtx.lock().unwrap();
+ let peer_state = &mut *peer_state_lck;
+
+ if let Some(blocker) = completed_blocker.take() {
+ // Only do this on the first iteration of the loop.
+ if let Some(blockers) = peer_state.actions_blocking_raa_monitor_updates
+ .get_mut(&channel_funding_outpoint.to_channel_id())
+ {
+ blockers.retain(|iter| iter != &blocker);
+ }
+ }
+
+ if self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates,
+ channel_funding_outpoint, counterparty_node_id) {
+ // Check that, while holding the peer lock, we don't have anything else
+ // blocking monitor updates for this channel. If we do, release the monitor
+ // update(s) when those blockers complete.
+ log_trace!(self.logger, "Delaying monitor unlock for channel {} as another channel's mon update needs to complete first",
+ log_bytes!(&channel_funding_outpoint.to_channel_id()[..]));
+ break;
+ }
+
+ if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(channel_funding_outpoint.to_channel_id()) {
+ debug_assert_eq!(chan.get().get_funding_txo().unwrap(), channel_funding_outpoint);
+ if let Some((monitor_update, further_update_exists)) = chan.get_mut().unblock_next_blocked_monitor_update() {
+ log_debug!(self.logger, "Unlocking monitor updating for channel {} and updating monitor",
+ log_bytes!(&channel_funding_outpoint.to_channel_id()[..]));
+ let update_res = self.chain_monitor.update_channel(channel_funding_outpoint, monitor_update);
+ let update_id = monitor_update.update_id;
+ if let Err(e) = handle_new_monitor_update!(self, update_res, update_id,
+ peer_state_lck, peer_state, per_peer_state, chan)
+ {
+ errors.push((e, counterparty_node_id));
+ }
+ if further_update_exists {
+ // If there are more `ChannelMonitorUpdate`s to process, restart at the
+ // top of the loop.
+ continue;
+ }
+ } else {
+ log_trace!(self.logger, "Unlocked monitor updating for channel {} without monitors to update",
+ log_bytes!(&channel_funding_outpoint.to_channel_id()[..]));
+ }
+ }
+ } else {
+ log_debug!(self.logger,
+ "Got a release post-RAA monitor update for peer {} but the channel is gone",
+ log_pubkey!(counterparty_node_id));
+ }
+ break;
+ }
+ for (err, counterparty_node_id) in errors {
+ let res = Err::<(), _>(err);
+ let _ = handle_error!(self, res, counterparty_node_id);
+ }
+ }
+
+ fn handle_post_event_actions(&self, actions: Vec<EventCompletionAction>) {
+ for action in actions {
+ match action {
+ EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
+ channel_funding_outpoint, counterparty_node_id
+ } => {
+ self.handle_monitor_update_release(counterparty_node_id, channel_funding_outpoint, None);
+ }
+ }
+ }
+ }
+
/// Processes any events asynchronously in the order they were generated since the last call
/// using the given event handler.
///
fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
let events = RefCell::new(Vec::new());
PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
- let mut result = NotifyOption::SkipPersist;
+ let mut result = self.process_background_events();
// TODO: This behavior should be documented. It's unintuitive that we query
// ChannelMonitors when clearing other events.
}
fn block_disconnected(&self, header: &BlockHeader, height: u32) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock,
+ &self.persistence_notifier, || -> NotifyOption { NotifyOption::DoPersist });
let new_height = height - 1;
{
let mut best_block = self.best_block.write().unwrap();
let block_hash = header.block_hash();
log_trace!(self.logger, "{} transactions included in block {} at height {} provided", txdata.len(), block_hash, height);
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock,
+ &self.persistence_notifier, || -> NotifyOption { NotifyOption::DoPersist });
self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger)
.map(|(a, b)| (a, Vec::new(), b)));
let block_hash = header.block_hash();
log_trace!(self.logger, "New best block: {} at height {}", block_hash, height);
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
-
+ let _persistence_guard = PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock,
+ &self.persistence_notifier, || -> NotifyOption { NotifyOption::DoPersist });
*self.best_block.write().unwrap() = BestBlock::new(block_hash, height);
self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger));
}
fn transaction_unconfirmed(&self, txid: &Txid) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock,
+ &self.persistence_notifier, || -> NotifyOption { NotifyOption::DoPersist });
self.do_chain_event(None, |channel| {
if let Some(funding_txo) = channel.get_funding_txo() {
if funding_txo.txid == *txid {
L::Target: Logger,
{
fn handle_open_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannel) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let _ = handle_error!(self, self.internal_open_channel(counterparty_node_id, msg), *counterparty_node_id);
}
+ fn handle_open_channel_v2(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannelV2) {
+ let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+ "Dual-funded channels not supported".to_owned(),
+ msg.temporary_channel_id.clone())), *counterparty_node_id);
+ }
+
fn handle_accept_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannel) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let _ = handle_error!(self, self.internal_accept_channel(counterparty_node_id, msg), *counterparty_node_id);
}
+ fn handle_accept_channel_v2(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannelV2) {
+ let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+ "Dual-funded channels not supported".to_owned(),
+ msg.temporary_channel_id.clone())), *counterparty_node_id);
+ }
+
fn handle_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let _ = handle_error!(self, self.internal_funding_created(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let _ = handle_error!(self, self.internal_funding_signed(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_channel_ready(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReady) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let _ = handle_error!(self, self.internal_channel_ready(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_shutdown(&self, counterparty_node_id: &PublicKey, msg: &msgs::Shutdown) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let _ = handle_error!(self, self.internal_shutdown(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let _ = handle_error!(self, self.internal_closing_signed(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_update_add_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let _ = handle_error!(self, self.internal_update_add_htlc(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let _ = handle_error!(self, self.internal_update_fulfill_htlc(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_update_fail_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let _ = handle_error!(self, self.internal_update_fail_htlc(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_update_fail_malformed_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let _ = handle_error!(self, self.internal_update_fail_malformed_htlc(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_commitment_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::CommitmentSigned) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let _ = handle_error!(self, self.internal_commitment_signed(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_revoke_and_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::RevokeAndACK) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let _ = handle_error!(self, self.internal_revoke_and_ack(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_update_fee(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFee) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let _ = handle_error!(self, self.internal_update_fee(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_announcement_signatures(&self, counterparty_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let _ = handle_error!(self, self.internal_announcement_signatures(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) {
PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
+ let force_persist = self.process_background_events();
if let Ok(persist) = handle_error!(self, self.internal_channel_update(counterparty_node_id, msg), *counterparty_node_id) {
- persist
+ if force_persist == NotifyOption::DoPersist { NotifyOption::DoPersist } else { persist }
} else {
NotifyOption::SkipPersist
}
}
fn handle_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let _ = handle_error!(self, self.internal_channel_reestablish(counterparty_node_id, msg), *counterparty_node_id);
}
fn peer_disconnected(&self, counterparty_node_id: &PublicKey) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let mut failed_channels = Vec::new();
let mut per_peer_state = self.per_peer_state.write().unwrap();
let remove_peer = {
});
pending_msg_events.retain(|msg| {
match msg {
+ // V1 Channel Establishment
&events::MessageSendEvent::SendAcceptChannel { .. } => false,
&events::MessageSendEvent::SendOpenChannel { .. } => false,
&events::MessageSendEvent::SendFundingCreated { .. } => false,
&events::MessageSendEvent::SendFundingSigned { .. } => false,
+ // V2 Channel Establishment
+ &events::MessageSendEvent::SendAcceptChannelV2 { .. } => false,
+ &events::MessageSendEvent::SendOpenChannelV2 { .. } => false,
+ // Common Channel Establishment
&events::MessageSendEvent::SendChannelReady { .. } => false,
&events::MessageSendEvent::SendAnnouncementSignatures { .. } => false,
+ // Interactive Transaction Construction
+ &events::MessageSendEvent::SendTxAddInput { .. } => false,
+ &events::MessageSendEvent::SendTxAddOutput { .. } => false,
+ &events::MessageSendEvent::SendTxRemoveInput { .. } => false,
+ &events::MessageSendEvent::SendTxRemoveOutput { .. } => false,
+ &events::MessageSendEvent::SendTxComplete { .. } => false,
+ &events::MessageSendEvent::SendTxSignatures { .. } => false,
+ &events::MessageSendEvent::SendTxInitRbf { .. } => false,
+ &events::MessageSendEvent::SendTxAckRbf { .. } => false,
+ &events::MessageSendEvent::SendTxAbort { .. } => false,
+ // Channel Operations
&events::MessageSendEvent::UpdateHTLCs { .. } => false,
&events::MessageSendEvent::SendRevokeAndACK { .. } => false,
&events::MessageSendEvent::SendClosingSigned { .. } => false,
&events::MessageSendEvent::SendShutdown { .. } => false,
&events::MessageSendEvent::SendChannelReestablish { .. } => false,
+ &events::MessageSendEvent::HandleError { .. } => false,
+ // Gossip
&events::MessageSendEvent::SendChannelAnnouncement { .. } => false,
&events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true,
&events::MessageSendEvent::BroadcastChannelUpdate { .. } => true,
&events::MessageSendEvent::BroadcastNodeAnnouncement { .. } => true,
&events::MessageSendEvent::SendChannelUpdate { .. } => false,
- &events::MessageSendEvent::HandleError { .. } => false,
&events::MessageSendEvent::SendChannelRangeQuery { .. } => false,
&events::MessageSendEvent::SendShortIdsQuery { .. } => false,
&events::MessageSendEvent::SendReplyChannelRange { .. } => false,
return Err(());
}
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
// If we have too many peers connected which don't have funded channels, disconnect the
// peer immediately (as long as it doesn't have funded channels). If we have a bunch of
latest_features: init_msg.features.clone(),
pending_msg_events: Vec::new(),
monitor_update_blocked_actions: BTreeMap::new(),
+ actions_blocking_raa_monitor_updates: BTreeMap::new(),
is_connected: true,
}));
},
}
fn handle_error(&self, counterparty_node_id: &PublicKey, msg: &msgs::ErrorMessage) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
if msg.channel_id == [0; 32] {
let channel_ids: Vec<[u8; 32]> = {
fn provided_init_features(&self, _their_init_features: &PublicKey) -> InitFeatures {
provided_init_features(&self.default_configuration)
}
+
+ fn get_genesis_hashes(&self) -> Option<Vec<ChainHash>> {
+ Some(vec![ChainHash::from(&self.genesis_hash[..])])
+ }
+
+ fn handle_tx_add_input(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxAddInput) {
+ let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+ "Dual-funded channels not supported".to_owned(),
+ msg.channel_id.clone())), *counterparty_node_id);
+ }
+
+ fn handle_tx_add_output(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxAddOutput) {
+ let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+ "Dual-funded channels not supported".to_owned(),
+ msg.channel_id.clone())), *counterparty_node_id);
+ }
+
+ fn handle_tx_remove_input(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxRemoveInput) {
+ let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+ "Dual-funded channels not supported".to_owned(),
+ msg.channel_id.clone())), *counterparty_node_id);
+ }
+
+ fn handle_tx_remove_output(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxRemoveOutput) {
+ let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+ "Dual-funded channels not supported".to_owned(),
+ msg.channel_id.clone())), *counterparty_node_id);
+ }
+
+ fn handle_tx_complete(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxComplete) {
+ let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+ "Dual-funded channels not supported".to_owned(),
+ msg.channel_id.clone())), *counterparty_node_id);
+ }
+
+ fn handle_tx_signatures(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxSignatures) {
+ let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+ "Dual-funded channels not supported".to_owned(),
+ msg.channel_id.clone())), *counterparty_node_id);
+ }
+
+ fn handle_tx_init_rbf(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxInitRbf) {
+ let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+ "Dual-funded channels not supported".to_owned(),
+ msg.channel_id.clone())), *counterparty_node_id);
+ }
+
+ fn handle_tx_ack_rbf(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxAckRbf) {
+ let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+ "Dual-funded channels not supported".to_owned(),
+ msg.channel_id.clone())), *counterparty_node_id);
+ }
+
+ fn handle_tx_abort(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxAbort) {
+ let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+ "Dual-funded channels not supported".to_owned(),
+ msg.channel_id.clone())), *counterparty_node_id);
+ }
}
/// Fetches the set of [`NodeFeatures`] flags which are provided by or required by
// should also add the corresponding (optional) bit to the [`ChannelMessageHandler`] impl for
// [`ErroringMessageHandler`].
let mut features = InitFeatures::empty();
- features.set_data_loss_protect_optional();
+ features.set_data_loss_protect_required();
features.set_upfront_shutdown_script_optional();
features.set_variable_length_onion_required();
features.set_static_remote_key_required();
return Err(DecodeError::InvalidValue);
}
if let Some(params) = payment_params.as_mut() {
- if params.final_cltv_expiry_delta == 0 {
- params.final_cltv_expiry_delta = path.final_cltv_expiry_delta().ok_or(DecodeError::InvalidValue)?;
+ if let Payee::Clear { ref mut final_cltv_expiry_delta, .. } = params.payee {
+ if final_cltv_expiry_delta == &0 {
+ *final_cltv_expiry_delta = path.final_cltv_expiry_delta().ok_or(DecodeError::InvalidValue)?;
+ }
}
}
Ok(HTLCSource::OutboundRoute {
}
let events = self.pending_events.lock().unwrap();
- (events.len() as u64).write(writer)?;
- for event in events.iter() {
- event.write(writer)?;
- }
-
- let background_events = self.pending_background_events.lock().unwrap();
- (background_events.len() as u64).write(writer)?;
- for event in background_events.iter() {
- match event {
- BackgroundEvent::ClosingMonitorUpdate((funding_txo, monitor_update)) => {
- 0u8.write(writer)?;
- funding_txo.write(writer)?;
- monitor_update.write(writer)?;
- },
+ // LDK versions prior to 0.0.115 don't support post-event actions, thus if there's no
+ // actions at all, skip writing the required TLV. Otherwise, pre-0.0.115 versions will
+ // refuse to read the new ChannelManager.
+ let events_not_backwards_compatible = events.iter().any(|(_, action)| action.is_some());
+ if events_not_backwards_compatible {
+ // If we're gonna write a even TLV that will overwrite our events anyway we might as
+ // well save the space and not write any events here.
+ 0u64.write(writer)?;
+ } else {
+ (events.len() as u64).write(writer)?;
+ for (event, _) in events.iter() {
+ event.write(writer)?;
}
}
+ // LDK versions prior to 0.0.116 wrote the `pending_background_events`
+ // `MonitorUpdateRegeneratedOnStartup`s here, however there was never a reason to do so -
+ // the closing monitor updates were always effectively replayed on startup (either directly
+ // by calling `broadcast_latest_holder_commitment_txn` on a `ChannelMonitor` during
+ // deserialization or, in 0.0.115, by regenerating the monitor update itself).
+ 0u64.write(writer)?;
+
// Prior to 0.0.111 we tracked node_announcement serials here, however that now happens in
// `PeerManager`, and thus we simply write the `highest_seen_timestamp` twice, which is
// likely to be identical.
(5, self.our_network_pubkey, required),
(6, monitor_update_blocked_actions_per_peer, option),
(7, self.fake_scid_rand_bytes, required),
+ (8, if events_not_backwards_compatible { Some(&*events) } else { None }, option),
(9, htlc_purposes, vec_type),
(11, self.probing_cookie_secret, required),
(13, htlc_onion_fields, optional_vec),
}
}
+impl Writeable for VecDeque<(Event, Option<EventCompletionAction>)> {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+ (self.len() as u64).write(w)?;
+ for (event, action) in self.iter() {
+ event.write(w)?;
+ action.write(w)?;
+ #[cfg(debug_assertions)] {
+ // Events are MaybeReadable, in some cases indicating that they shouldn't actually
+ // be persisted and are regenerated on restart. However, if such an event has a
+ // post-event-handling action we'll write nothing for the event and would have to
+ // either forget the action or fail on deserialization (which we do below). Thus,
+ // check that the event is sane here.
+ let event_encoded = event.encode();
+ let event_read: Option<Event> =
+ MaybeReadable::read(&mut &event_encoded[..]).unwrap();
+ if action.is_some() { assert!(event_read.is_some()); }
+ }
+ }
+ Ok(())
+ }
+}
+impl Readable for VecDeque<(Event, Option<EventCompletionAction>)> {
+ fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
+ let len: u64 = Readable::read(reader)?;
+ const MAX_ALLOC_SIZE: u64 = 1024 * 16;
+ let mut events: Self = VecDeque::with_capacity(cmp::min(
+ MAX_ALLOC_SIZE/mem::size_of::<(events::Event, Option<EventCompletionAction>)>() as u64,
+ len) as usize);
+ for _ in 0..len {
+ let ev_opt = MaybeReadable::read(reader)?;
+ let action = Readable::read(reader)?;
+ if let Some(ev) = ev_opt {
+ events.push_back((ev, action));
+ } else if action.is_some() {
+ return Err(DecodeError::InvalidValue);
+ }
+ }
+ Ok(events)
+ }
+}
+
/// Arguments for the creation of a ChannelManager that are not deserialized.
///
/// At a high-level, the process for deserializing a ChannelManager and resuming normal operation
let mut peer_channels: HashMap<PublicKey, HashMap<[u8; 32], Channel<<SP::Target as SignerProvider>::Signer>>> = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
let mut id_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
- let mut channel_closures = Vec::new();
+ let mut channel_closures = VecDeque::new();
let mut pending_background_events = Vec::new();
for _ in 0..channel_count {
let mut channel: Channel<<SP::Target as SignerProvider>::Signer> = Channel::read(reader, (
let funding_txo = channel.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
funding_txo_set.insert(funding_txo.clone());
if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) {
- if channel.get_cur_holder_commitment_transaction_number() < monitor.get_cur_holder_commitment_number() ||
- channel.get_revoked_counterparty_commitment_transaction_number() < monitor.get_min_seen_secret() ||
- channel.get_cur_counterparty_commitment_transaction_number() < monitor.get_cur_counterparty_commitment_number() ||
- channel.get_latest_monitor_update_id() > monitor.get_latest_update_id() {
+ if channel.get_latest_complete_monitor_update_id() > monitor.get_latest_update_id() {
// If the channel is ahead of the monitor, return InvalidValue:
log_error!(args.logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!");
log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
- log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id());
+ log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_complete_monitor_update_id());
log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
log_error!(args.logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id());
let (monitor_update, mut new_failed_htlcs) = channel.force_shutdown(true);
- if let Some(monitor_update) = monitor_update {
- pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate(monitor_update));
+ if let Some((counterparty_node_id, funding_txo, update)) = monitor_update {
+ pending_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
+ counterparty_node_id, funding_txo, update
+ });
}
failed_htlcs.append(&mut new_failed_htlcs);
- channel_closures.push(events::Event::ChannelClosed {
+ channel_closures.push_back((events::Event::ChannelClosed {
channel_id: channel.channel_id(),
user_channel_id: channel.get_user_id(),
reason: ClosureReason::OutdatedChannelManager
- });
+ }, None));
for (channel_htlc_source, payment_hash) in channel.inflight_htlc_sources() {
let mut found_htlc = false;
for (monitor_htlc_source, _) in monitor.get_all_current_outbound_htlcs() {
}
}
} else {
- log_info!(args.logger, "Successfully loaded channel {}", log_bytes!(channel.channel_id()));
+ log_info!(args.logger, "Successfully loaded channel {} at update_id {} against monitor at update id {}",
+ log_bytes!(channel.channel_id()), channel.get_latest_monitor_update_id(),
+ monitor.get_latest_update_id());
+ channel.complete_all_mon_updates_through(monitor.get_latest_update_id());
if let Some(short_channel_id) = channel.get_short_channel_id() {
short_to_chan_info.insert(short_channel_id, (channel.get_counterparty_node_id(), channel.channel_id()));
}
// was in-progress, we never broadcasted the funding transaction and can still
// safely discard the channel.
let _ = channel.force_shutdown(false);
- channel_closures.push(events::Event::ChannelClosed {
+ channel_closures.push_back((events::Event::ChannelClosed {
channel_id: channel.channel_id(),
user_channel_id: channel.get_user_id(),
reason: ClosureReason::DisconnectedPeer,
- });
+ }, None));
} else {
log_error!(args.logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", log_bytes!(channel.channel_id()));
log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
for (funding_txo, _) in args.channel_monitors.iter() {
if !funding_txo_set.contains(funding_txo) {
+ log_info!(args.logger, "Queueing monitor update to ensure missing channel {} is force closed",
+ log_bytes!(funding_txo.to_channel_id()));
let monitor_update = ChannelMonitorUpdate {
update_id: CLOSED_CHANNEL_UPDATE_ID,
updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }],
};
- pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate((*funding_txo, monitor_update)));
+ pending_background_events.push(BackgroundEvent::ClosingMonitorUpdateRegeneratedOnStartup((*funding_txo, monitor_update)));
}
}
latest_features: Readable::read(reader)?,
pending_msg_events: Vec::new(),
monitor_update_blocked_actions: BTreeMap::new(),
+ actions_blocking_raa_monitor_updates: BTreeMap::new(),
is_connected: false,
};
per_peer_state.insert(peer_pubkey, Mutex::new(peer_state));
}
let event_count: u64 = Readable::read(reader)?;
- let mut pending_events_read: Vec<events::Event> = Vec::with_capacity(cmp::min(event_count as usize, MAX_ALLOC_SIZE/mem::size_of::<events::Event>()));
+ let mut pending_events_read: VecDeque<(events::Event, Option<EventCompletionAction>)> =
+ VecDeque::with_capacity(cmp::min(event_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(events::Event, Option<EventCompletionAction>)>()));
for _ in 0..event_count {
match MaybeReadable::read(reader)? {
- Some(event) => pending_events_read.push(event),
+ Some(event) => pending_events_read.push_back((event, None)),
None => continue,
}
}
for _ in 0..background_event_count {
match <u8 as Readable>::read(reader)? {
0 => {
- let (funding_txo, monitor_update): (OutPoint, ChannelMonitorUpdate) = (Readable::read(reader)?, Readable::read(reader)?);
- if pending_background_events.iter().find(|e| {
- let BackgroundEvent::ClosingMonitorUpdate((pending_funding_txo, pending_monitor_update)) = e;
- *pending_funding_txo == funding_txo && *pending_monitor_update == monitor_update
- }).is_none() {
- pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate((funding_txo, monitor_update)));
- }
+ // LDK versions prior to 0.0.116 wrote pending `MonitorUpdateRegeneratedOnStartup`s here,
+ // however we really don't (and never did) need them - we regenerate all
+ // on-startup monitor updates.
+ let _: OutPoint = Readable::read(reader)?;
+ let _: ChannelMonitorUpdate = Readable::read(reader)?;
}
_ => return Err(DecodeError::InvalidValue),
}
}
+ for (node_id, peer_mtx) in per_peer_state.iter() {
+ let peer_state = peer_mtx.lock().unwrap();
+ for (_, chan) in peer_state.channel_by_id.iter() {
+ for update in chan.uncompleted_unblocked_mon_updates() {
+ if let Some(funding_txo) = chan.get_funding_txo() {
+ log_trace!(args.logger, "Replaying ChannelMonitorUpdate {} for channel {}",
+ update.update_id, log_bytes!(funding_txo.to_channel_id()));
+ pending_background_events.push(
+ BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
+ counterparty_node_id: *node_id, funding_txo, update: update.clone(),
+ });
+ } else {
+ return Err(DecodeError::InvalidValue);
+ }
+ }
+ }
+ }
+
let _last_node_announcement_serial: u32 = Readable::read(reader)?; // Only used < 0.0.111
let highest_seen_timestamp: u32 = Readable::read(reader)?;
let mut claimable_htlc_purposes = None;
let mut claimable_htlc_onion_fields = None;
let mut pending_claiming_payments = Some(HashMap::new());
- let mut monitor_update_blocked_actions_per_peer = Some(Vec::new());
+ let mut monitor_update_blocked_actions_per_peer: Option<Vec<(_, BTreeMap<_, Vec<_>>)>> = Some(Vec::new());
+ let mut events_override = None;
read_tlv_fields!(reader, {
(1, pending_outbound_payments_no_retry, option),
(2, pending_intercepted_htlcs, option),
(5, received_network_pubkey, option),
(6, monitor_update_blocked_actions_per_peer, option),
(7, fake_scid_rand_bytes, option),
+ (8, events_override, option),
(9, claimable_htlc_purposes, vec_type),
(11, probing_cookie_secret, option),
(13, claimable_htlc_onion_fields, optional_vec),
probing_cookie_secret = Some(args.entropy_source.get_secure_random_bytes());
}
+ if let Some(events) = events_override {
+ pending_events_read = events;
+ }
+
if !channel_closures.is_empty() {
pending_events_read.append(&mut channel_closures);
}
if pending_forward_matches_htlc(&htlc_info) {
log_info!(args.logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}",
log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id()));
- pending_events_read.retain(|event| {
+ pending_events_read.retain(|(event, _)| {
if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event {
intercepted_id != ev_id
} else { true }
// shut down before the timer hit. Either way, set the time_forwardable to a small
// constant as enough time has likely passed that we should simply handle the forwards
// now, or at least after the user gets a chance to reconnect to our peers.
- pending_events_read.push(events::Event::PendingHTLCsForwardable {
+ pending_events_read.push_back((events::Event::PendingHTLCsForwardable {
time_forwardable: Duration::from_secs(2),
- });
+ }, None));
}
let inbound_pmt_key_material = args.node_signer.get_inbound_payment_key_material();
previous_hop_monitor.provide_payment_preimage(&payment_hash, &payment_preimage, &args.tx_broadcaster, &bounded_fee_estimator, &args.logger);
}
}
- pending_events_read.push(events::Event::PaymentClaimed {
+ pending_events_read.push_back((events::Event::PaymentClaimed {
receiver_node_id,
payment_hash,
purpose: payment.purpose,
amount_msat: claimable_amt_msat,
- });
+ }, None));
}
}
}
for (node_id, monitor_update_blocked_actions) in monitor_update_blocked_actions_per_peer.unwrap() {
- if let Some(peer_state) = per_peer_state.get_mut(&node_id) {
+ if let Some(peer_state) = per_peer_state.get(&node_id) {
+ for (_, actions) in monitor_update_blocked_actions.iter() {
+ for action in actions.iter() {
+ if let MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
+ downstream_counterparty_and_funding_outpoint:
+ Some((blocked_node_id, blocked_channel_outpoint, blocking_action)), ..
+ } = action {
+ if let Some(blocked_peer_state) = per_peer_state.get(&blocked_node_id) {
+ blocked_peer_state.lock().unwrap().actions_blocking_raa_monitor_updates
+ .entry(blocked_channel_outpoint.to_channel_id())
+ .or_insert_with(Vec::new).push(blocking_action.clone());
+ }
+ }
+ }
+ }
peer_state.lock().unwrap().monitor_update_blocked_actions = monitor_update_blocked_actions;
} else {
log_error!(args.logger, "Got blocked actions without a per-peer-state for {}", node_id);
pending_events_processor: AtomicBool::new(false),
pending_background_events: Mutex::new(pending_background_events),
total_consistency_lock: RwLock::new(()),
+ #[cfg(debug_assertions)]
+ background_events_processed_since_startup: AtomicBool::new(false),
persistence_notifier: Notifier::new(),
entropy_source: args.entropy_source,
use crate::util::errors::APIError;
use crate::util::test_utils;
use crate::util::config::ChannelConfig;
- use crate::chain::keysinterface::EntropySource;
+ use crate::sign::EntropySource;
#[test]
fn test_notify_limits() {
};
let route = find_route(
&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
- None, nodes[0].logger, &scorer, &random_seed_bytes
+ None, nodes[0].logger, &scorer, &(), &random_seed_bytes
).unwrap();
nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage),
RecipientOnionFields::spontaneous_empty(), PaymentId(payment_preimage.0)).unwrap();
let payment_preimage = PaymentPreimage([42; 32]);
let route = find_route(
&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
- None, nodes[0].logger, &scorer, &random_seed_bytes
+ None, nodes[0].logger, &scorer, &(), &random_seed_bytes
).unwrap();
let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage),
RecipientOnionFields::spontaneous_empty(), PaymentId(payment_preimage.0)).unwrap();
let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
let route = find_route(
&payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
- nodes[0].logger, &scorer, &random_seed_bytes
+ nodes[0].logger, &scorer, &(), &random_seed_bytes
).unwrap();
let test_preimage = PaymentPreimage([42; 32]);
let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
let route = find_route(
&payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
- nodes[0].logger, &scorer, &random_seed_bytes
+ nodes[0].logger, &scorer, &(), &random_seed_bytes
).unwrap();
let test_preimage = PaymentPreimage([42; 32]);
&SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
peer_pks.push(random_pk);
nodes[1].node.peer_connected(&random_pk, &msgs::Init {
- features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
+ features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+ }, true).unwrap();
}
let last_random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
&SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
nodes[1].node.peer_connected(&last_random_pk, &msgs::Init {
- features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap_err();
+ features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+ }, true).unwrap_err();
// Also importantly, because nodes[0] isn't "protected", we will refuse a reconnection from
// them if we have too many un-channel'd peers.
if let Event::ChannelClosed { .. } = ev { } else { panic!(); }
}
nodes[1].node.peer_connected(&last_random_pk, &msgs::Init {
- features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
+ features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+ }, true).unwrap();
nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
- features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap_err();
+ features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+ }, true).unwrap_err();
// but of course if the connection is outbound its allowed...
nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
- features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
+ features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+ }, false).unwrap();
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
// Now nodes[0] is disconnected but still has a pending, un-funded channel lying around.
// "protected" and can connect again.
mine_transaction(&nodes[1], funding_tx.as_ref().unwrap());
nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
- features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
+ features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+ }, true).unwrap();
get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
// Further, because the first channel was funded, we can open another channel with
let random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
&SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
nodes[1].node.peer_connected(&random_pk, &msgs::Init {
- features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
+ features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+ }, true).unwrap();
nodes[1].node.handle_open_channel(&random_pk, &open_channel_msg);
let events = nodes[1].node.get_and_clear_pending_events();
let last_random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
&SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
nodes[1].node.peer_connected(&last_random_pk, &msgs::Init {
- features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
+ features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+ }, true).unwrap();
nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg);
let events = nodes[1].node.get_and_clear_pending_events();
match events[0] {
}
}
-#[cfg(all(any(test, feature = "_test_utils"), feature = "_bench_unstable"))]
+#[cfg(ldk_bench)]
pub mod bench {
use crate::chain::Listen;
use crate::chain::chainmonitor::{ChainMonitor, Persist};
- use crate::chain::keysinterface::{KeysManager, InMemorySigner};
+ use crate::sign::{KeysManager, InMemorySigner};
use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider};
use crate::ln::channelmanager::{BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage, PaymentId, RecipientOnionFields, Retry};
use crate::ln::functional_test_utils::*;
use crate::sync::{Arc, Mutex};
- use test::Bencher;
+ use criterion::Criterion;
type Manager<'a, P> = ChannelManager<
&'a ChainMonitor<InMemorySigner, &'a test_utils::TestChainSource,
fn chain_monitor(&self) -> Option<&test_utils::TestChainMonitor> { None }
}
- #[cfg(test)]
- #[bench]
- fn bench_sends(bench: &mut Bencher) {
- bench_two_sends(bench, test_utils::TestPersister::new(), test_utils::TestPersister::new());
+ pub fn bench_sends(bench: &mut Criterion) {
+ bench_two_sends(bench, "bench_sends", test_utils::TestPersister::new(), test_utils::TestPersister::new());
}
- pub fn bench_two_sends<P: Persist<InMemorySigner>>(bench: &mut Bencher, persister_a: P, persister_b: P) {
+ pub fn bench_two_sends<P: Persist<InMemorySigner>>(bench: &mut Criterion, bench_name: &str, persister_a: P, persister_b: P) {
// Do a simple benchmark of sending a payment back and forth between two nodes.
// Note that this is unrealistic as each payment send will require at least two fsync
// calls per node.
});
let node_b_holder = ANodeHolder { node: &node_b };
- node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: node_b.init_features(), remote_network_address: None }, true).unwrap();
- node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: node_a.init_features(), remote_network_address: None }, false).unwrap();
+ node_a.peer_connected(&node_b.get_our_node_id(), &Init {
+ features: node_b.init_features(), networks: None, remote_network_address: None
+ }, true).unwrap();
+ node_b.peer_connected(&node_a.get_our_node_id(), &Init {
+ features: node_a.init_features(), networks: None, remote_network_address: None
+ }, false).unwrap();
node_a.create_channel(node_b.get_our_node_id(), 8_000_000, 100_000_000, 42, None).unwrap();
node_b.handle_open_channel(&node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id()));
node_a.handle_accept_channel(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id()));
assert_eq!(&tx_broadcaster.txn_broadcasted.lock().unwrap()[..], &[tx.clone()]);
- let block = Block {
- header: BlockHeader { version: 0x20000000, prev_blockhash: BestBlock::from_network(network).block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
- txdata: vec![tx],
- };
+ let block = create_dummy_block(BestBlock::from_network(network).block_hash(), 42, vec![tx]);
Listen::block_connected(&node_a, &block, 1);
Listen::block_connected(&node_b, &block, 1);
macro_rules! send_payment {
($node_a: expr, $node_b: expr) => {
let payment_params = PaymentParameters::from_node_id($node_b.get_our_node_id(), TEST_FINAL_CLTV)
- .with_features($node_b.invoice_features());
+ .with_bolt11_features($node_b.invoice_features()).unwrap();
let mut payment_preimage = PaymentPreimage([0; 32]);
payment_preimage.0[0..8].copy_from_slice(&payment_count.to_le_bytes());
payment_count += 1;
}
}
- bench.iter(|| {
+ bench.bench_function(bench_name, |b| b.iter(|| {
send_payment!(node_a, node_b);
send_payment!(node_b, node_a);
- });
+ }));
}
}