X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=7bf437ee369a1b83c939158c54a58da79b3f3afe;hb=9f4e71452ae0b81850a42e2b9b52024cbc5bb141;hp=9007188fdafe992ef5dc5eb39a77978b9c3e314d;hpb=07def9292a49bb240daba6dec8b289d03bbb3f50;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 9007188f..7bf437ee 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -19,7 +19,7 @@ use bitcoin::blockdata::block::BlockHeader; use bitcoin::blockdata::transaction::Transaction; -use bitcoin::blockdata::constants::genesis_block; +use bitcoin::blockdata::constants::{genesis_block, ChainHash}; use bitcoin::network::constants::Network; use bitcoin::hashes::Hash; @@ -40,7 +40,7 @@ use crate::events::{Event, EventHandler, EventsProvider, MessageSendEvent, Messa // Since this struct is returned in `list_channels` methods, expose it here in case users want to // construct one themselves. use crate::ln::{inbound_payment, PaymentHash, PaymentPreimage, PaymentSecret}; -use crate::ln::channel::{Channel, ChannelError, ChannelUpdateStatus, UpdateFulfillCommitFetch}; +use crate::ln::channel::{Channel, ChannelError, ChannelUpdateStatus, ShutdownResult, UpdateFulfillCommitFetch}; use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures}; #[cfg(any(feature = "_test_utils", test))] use crate::ln::features::InvoiceFeatures; @@ -56,7 +56,7 @@ use crate::ln::outbound_payment; use crate::ln::outbound_payment::{OutboundPayments, PaymentAttempts, PendingOutboundPayment}; use crate::ln::wire::Encode; use crate::sign::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider, ChannelSigner, WriteableEcdsaChannelSigner}; -use crate::util::config::{UserConfig, ChannelConfig}; +use crate::util::config::{UserConfig, ChannelConfig, ChannelConfigUpdate}; use crate::util::wakers::{Future, Notifier}; use crate::util::scid_utils::fake_scid; use crate::util::string::UntrustedString; @@ -112,6 +112,8 @@ pub(super) enum PendingHTLCRouting { phantom_shared_secret: Option<[u8; 32]>, }, ReceiveKeysend { + /// This was added in 0.0.116 and will break deserialization on downgrades. + payment_data: Option, payment_preimage: PaymentPreimage, payment_metadata: Option>, incoming_cltv_expiry: u32, // Used to track when we should expire pending HTLCs that go unclaimed @@ -359,8 +361,6 @@ pub enum FailureCode { IncorrectOrUnknownPaymentDetails = 0x4000 | 15, } -type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>); - /// Error type returned across the peer_state mutex boundary. When an Err is generated for a /// Channel, we generally end up with a ChannelError::Close for which we have to close the channel /// immediately (ie with no further calls on it made). Thus, this step happens inside a @@ -497,15 +497,34 @@ struct ClaimablePayments { pending_claiming_payments: HashMap, } -/// Events which we process internally but cannot be procsesed immediately at the generation site -/// for some reason. They are handled in timer_tick_occurred, so may be processed with -/// quite some time lag. +/// Events which we process internally but cannot be processed immediately at the generation site +/// usually because we're running pre-full-init. They are handled immediately once we detect we are +/// running normally, and specifically must be processed before any other non-background +/// [`ChannelMonitorUpdate`]s are applied. enum BackgroundEvent { - /// Handle a ChannelMonitorUpdate + /// Handle a ChannelMonitorUpdate which closes the channel. This is only separated from + /// [`Self::MonitorUpdateRegeneratedOnStartup`] as the maybe-non-closing variant needs a public + /// key to handle channel resumption, whereas if the channel has been force-closed we do not + /// need the counterparty node_id. + /// + /// Note that any such events are lost on shutdown, so in general they must be updates which + /// are regenerated on startup. + ClosingMonitorUpdateRegeneratedOnStartup((OutPoint, ChannelMonitorUpdate)), + /// Handle a ChannelMonitorUpdate which may or may not close the channel and may unblock the + /// channel to continue normal operation. + /// + /// In general this should be used rather than + /// [`Self::ClosingMonitorUpdateRegeneratedOnStartup`], however in cases where the + /// `counterparty_node_id` is not available as the channel has closed from a [`ChannelMonitor`] + /// error the other variant is acceptable. /// /// Note that any such events are lost on shutdown, so in general they must be updates which /// are regenerated on startup. - MonitorUpdateRegeneratedOnStartup((OutPoint, ChannelMonitorUpdate)), + MonitorUpdateRegeneratedOnStartup { + counterparty_node_id: PublicKey, + funding_txo: OutPoint, + update: ChannelMonitorUpdate + }, } #[derive(Debug)] @@ -515,13 +534,31 @@ pub(crate) enum MonitorUpdateCompletionAction { /// this payment. Note that this is only best-effort. On restart it's possible such a duplicate /// event can be generated. PaymentClaimed { payment_hash: PaymentHash }, - /// Indicates an [`events::Event`] should be surfaced to the user. - EmitEvent { event: events::Event }, + /// Indicates an [`events::Event`] should be surfaced to the user and possibly resume the + /// operation of another channel. + /// + /// This is usually generated when we've forwarded an HTLC and want to block the outbound edge + /// from completing a monitor update which removes the payment preimage until the inbound edge + /// completes a monitor update containing the payment preimage. In that case, after the inbound + /// edge completes, we will surface an [`Event::PaymentForwarded`] as well as unblock the + /// outbound edge. + EmitEventAndFreeOtherChannel { + event: events::Event, + downstream_counterparty_and_funding_outpoint: Option<(PublicKey, OutPoint, RAAMonitorUpdateBlockingAction)>, + }, } impl_writeable_tlv_based_enum_upgradable!(MonitorUpdateCompletionAction, (0, PaymentClaimed) => { (0, payment_hash, required) }, - (2, EmitEvent) => { (0, event, upgradable_required) }, + (2, EmitEventAndFreeOtherChannel) => { + (0, event, upgradable_required), + // LDK prior to 0.0.116 did not have this field as the monitor update application order was + // required by clients. If we downgrade to something prior to 0.0.116 this may result in + // monitor updates which aren't properly blocked or resumed, however that's fine - we don't + // support async monitor updates even in LDK 0.0.116 and once we do we'll require no + // downgrades to prior versions. + (1, downstream_counterparty_and_funding_outpoint, option), + }, ); #[derive(Clone, Debug, PartialEq, Eq)] @@ -538,6 +575,36 @@ impl_writeable_tlv_based_enum!(EventCompletionAction, }; ); +#[derive(Clone, PartialEq, Eq, Debug)] +/// If something is blocked on the completion of an RAA-generated [`ChannelMonitorUpdate`] we track +/// the blocked action here. See enum variants for more info. +pub(crate) enum RAAMonitorUpdateBlockingAction { + /// A forwarded payment was claimed. We block the downstream channel completing its monitor + /// update which removes the HTLC preimage until the upstream channel has gotten the preimage + /// durably to disk. + ForwardedPaymentInboundClaim { + /// The upstream channel ID (i.e. the inbound edge). + channel_id: [u8; 32], + /// The HTLC ID on the inbound edge. + htlc_id: u64, + }, +} + +impl RAAMonitorUpdateBlockingAction { + #[allow(unused)] + fn from_prev_hop_data(prev_hop: &HTLCPreviousHopData) -> Self { + Self::ForwardedPaymentInboundClaim { + channel_id: prev_hop.outpoint.to_channel_id(), + htlc_id: prev_hop.htlc_id, + } + } +} + +impl_writeable_tlv_based_enum!(RAAMonitorUpdateBlockingAction, + (0, ForwardedPaymentInboundClaim) => { (0, channel_id, required), (2, htlc_id, required) } +;); + + /// State we hold per-peer. pub(super) struct PeerState { /// `temporary_channel_id` or `channel_id` -> `channel`. @@ -566,6 +633,11 @@ pub(super) struct PeerState { /// to funding appearing on-chain), the downstream `ChannelMonitor` set is required to ensure /// duplicates do not occur, so such channels should fail without a monitor update completing. monitor_update_blocked_actions: BTreeMap<[u8; 32], Vec>, + /// If another channel's [`ChannelMonitorUpdate`] needs to complete before a channel we have + /// with this peer can complete an RAA [`ChannelMonitorUpdate`] (e.g. because the RAA update + /// will remove a preimage that needs to be durably in an upstream channel first), we put an + /// entry here to note that the channel with the key's ID is blocked on a set of actions. + actions_blocking_raa_monitor_updates: BTreeMap<[u8; 32], Vec>, /// The peer is currently connected (i.e. we've seen a /// [`ChannelMessageHandler::peer_connected`] and no corresponding /// [`ChannelMessageHandler::peer_disconnected`]. @@ -645,40 +717,44 @@ pub type SimpleArcChannelManager = ChannelManager< /// This is not exported to bindings users as Arcs don't make sense in bindings pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = ChannelManager<&'a M, &'b T, &'c KeysManager, &'c KeysManager, &'c KeysManager, &'d F, &'e DefaultRouter<&'f NetworkGraph<&'g L>, &'g L, &'h Mutex, &'g L>>, ProbabilisticScoringFeeParameters, ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>, &'g L>; +macro_rules! define_test_pub_trait { ($vis: vis) => { /// A trivial trait which describes any [`ChannelManager`] used in testing. -#[cfg(any(test, feature = "_test_utils"))] -pub trait AChannelManager { - type Watch: chain::Watch; +$vis trait AChannelManager { + type Watch: chain::Watch + ?Sized; type M: Deref; - type Broadcaster: BroadcasterInterface; + type Broadcaster: BroadcasterInterface + ?Sized; type T: Deref; - type EntropySource: EntropySource; + type EntropySource: EntropySource + ?Sized; type ES: Deref; - type NodeSigner: NodeSigner; + type NodeSigner: NodeSigner + ?Sized; type NS: Deref; - type Signer: WriteableEcdsaChannelSigner; - type SignerProvider: SignerProvider; + type Signer: WriteableEcdsaChannelSigner + Sized; + type SignerProvider: SignerProvider + ?Sized; type SP: Deref; - type FeeEstimator: FeeEstimator; + type FeeEstimator: FeeEstimator + ?Sized; type F: Deref; - type Router: Router; + type Router: Router + ?Sized; type R: Deref; - type Logger: Logger; + type Logger: Logger + ?Sized; type L: Deref; fn get_cm(&self) -> &ChannelManager; } +} } #[cfg(any(test, feature = "_test_utils"))] +define_test_pub_trait!(pub); +#[cfg(not(any(test, feature = "_test_utils")))] +define_test_pub_trait!(pub(crate)); impl AChannelManager for ChannelManager where - M::Target: chain::Watch<::Signer> + Sized, - T::Target: BroadcasterInterface + Sized, - ES::Target: EntropySource + Sized, - NS::Target: NodeSigner + Sized, - SP::Target: SignerProvider + Sized, - F::Target: FeeEstimator + Sized, - R::Target: Router + Sized, - L::Target: Logger + Sized, + M::Target: chain::Watch<::Signer>, + T::Target: BroadcasterInterface, + ES::Target: EntropySource, + NS::Target: NodeSigner, + SP::Target: SignerProvider, + F::Target: FeeEstimator, + R::Target: Router, + L::Target: Logger, { type Watch = M::Target; type M = M; @@ -964,7 +1040,18 @@ where pending_events: Mutex)>>, /// A simple atomic flag to ensure only one task at a time can be processing events asynchronously. pending_events_processor: AtomicBool, + + /// If we are running during init (either directly during the deserialization method or in + /// block connection methods which run after deserialization but before normal operation) we + /// cannot provide the user with [`ChannelMonitorUpdate`]s through the normal update flow - + /// prior to normal operation the user may not have loaded the [`ChannelMonitor`]s into their + /// [`ChainMonitor`] and thus attempting to update it will fail or panic. + /// + /// Thus, we place them here to be handled as soon as possible once we are running normally. + /// /// See `ChannelManager` struct-level documentation for lock order requirements. + /// + /// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor pending_background_events: Mutex>, /// Used when we have to take a BIG lock to make sure everything is self-consistent. /// Essentially just when we're serializing ourselves out. @@ -974,6 +1061,9 @@ where /// Notifier the lock contains sends out a notification when the lock is released. total_consistency_lock: RwLock<()>, + #[cfg(debug_assertions)] + background_events_processed_since_startup: AtomicBool, + persistence_notifier: Notifier, entropy_source: ES, @@ -1000,6 +1090,7 @@ pub struct ChainParameters { } #[derive(Copy, Clone, PartialEq)] +#[must_use] enum NotifyOption { DoPersist, SkipPersist, @@ -1023,10 +1114,20 @@ struct PersistenceNotifierGuard<'a, F: Fn() -> NotifyOption> { } impl<'a> PersistenceNotifierGuard<'a, fn() -> NotifyOption> { // We don't care what the concrete F is here, it's unused - fn notify_on_drop(lock: &'a RwLock<()>, notifier: &'a Notifier) -> PersistenceNotifierGuard<'a, impl Fn() -> NotifyOption> { - PersistenceNotifierGuard::optionally_notify(lock, notifier, || -> NotifyOption { NotifyOption::DoPersist }) + fn notify_on_drop(cm: &'a C) -> PersistenceNotifierGuard<'a, impl Fn() -> NotifyOption> { + let read_guard = cm.get_cm().total_consistency_lock.read().unwrap(); + let _ = cm.get_cm().process_background_events(); // We always persist + + PersistenceNotifierGuard { + persistence_notifier: &cm.get_cm().persistence_notifier, + should_persist: || -> NotifyOption { NotifyOption::DoPersist }, + _read_guard: read_guard, + } + } + /// Note that if any [`ChannelMonitorUpdate`]s are possibly generated, + /// [`ChannelManager::process_background_events`] MUST be called first. fn optionally_notify NotifyOption>(lock: &'a RwLock<()>, notifier: &'a Notifier, persist_check: F) -> PersistenceNotifierGuard<'a, F> { let read_guard = lock.read().unwrap(); @@ -1274,8 +1375,14 @@ pub struct ChannelDetails { /// the current state and per-HTLC limit(s). This is intended for use when routing, allowing us /// to use a limit as close as possible to the HTLC limit we can currently send. /// - /// See also [`ChannelDetails::balance_msat`] and [`ChannelDetails::outbound_capacity_msat`]. + /// See also [`ChannelDetails::next_outbound_htlc_minimum_msat`], + /// [`ChannelDetails::balance_msat`], and [`ChannelDetails::outbound_capacity_msat`]. pub next_outbound_htlc_limit_msat: u64, + /// The minimum value for sending a single HTLC to the remote peer. This is the equivalent of + /// [`ChannelDetails::next_outbound_htlc_limit_msat`] but represents a lower-bound, rather than + /// an upper-bound. This is intended for use when routing, allowing us to ensure we pick a + /// route which is valid. + pub next_outbound_htlc_minimum_msat: u64, /// The available inbound capacity for the remote peer to send HTLCs to us. This does not /// include any pending HTLCs which are not yet fully resolved (and, thus, whose balance is not /// available for inclusion in new inbound HTLCs). @@ -1364,48 +1471,49 @@ impl ChannelDetails { let balance = channel.get_available_balances(); let (to_remote_reserve_satoshis, to_self_reserve_satoshis) = - channel.get_holder_counterparty_selected_channel_reserve_satoshis(); + channel.context.get_holder_counterparty_selected_channel_reserve_satoshis(); ChannelDetails { - channel_id: channel.channel_id(), + channel_id: channel.context.channel_id(), counterparty: ChannelCounterparty { - node_id: channel.get_counterparty_node_id(), + node_id: channel.context.get_counterparty_node_id(), features: latest_features, unspendable_punishment_reserve: to_remote_reserve_satoshis, - forwarding_info: channel.counterparty_forwarding_info(), + forwarding_info: channel.context.counterparty_forwarding_info(), // Ensures that we have actually received the `htlc_minimum_msat` value // from the counterparty through the `OpenChannel` or `AcceptChannel` // message (as they are always the first message from the counterparty). // Else `Channel::get_counterparty_htlc_minimum_msat` could return the // default `0` value set by `Channel::new_outbound`. - outbound_htlc_minimum_msat: if channel.have_received_message() { - Some(channel.get_counterparty_htlc_minimum_msat()) } else { None }, - outbound_htlc_maximum_msat: channel.get_counterparty_htlc_maximum_msat(), + outbound_htlc_minimum_msat: if channel.context.have_received_message() { + Some(channel.context.get_counterparty_htlc_minimum_msat()) } else { None }, + outbound_htlc_maximum_msat: channel.context.get_counterparty_htlc_maximum_msat(), }, - funding_txo: channel.get_funding_txo(), + funding_txo: channel.context.get_funding_txo(), // Note that accept_channel (or open_channel) is always the first message, so // `have_received_message` indicates that type negotiation has completed. - channel_type: if channel.have_received_message() { Some(channel.get_channel_type().clone()) } else { None }, - short_channel_id: channel.get_short_channel_id(), - outbound_scid_alias: if channel.is_usable() { Some(channel.outbound_scid_alias()) } else { None }, - inbound_scid_alias: channel.latest_inbound_scid_alias(), - channel_value_satoshis: channel.get_value_satoshis(), - feerate_sat_per_1000_weight: Some(channel.get_feerate_sat_per_1000_weight()), + channel_type: if channel.context.have_received_message() { Some(channel.context.get_channel_type().clone()) } else { None }, + short_channel_id: channel.context.get_short_channel_id(), + outbound_scid_alias: if channel.context.is_usable() { Some(channel.context.outbound_scid_alias()) } else { None }, + inbound_scid_alias: channel.context.latest_inbound_scid_alias(), + channel_value_satoshis: channel.context.get_value_satoshis(), + feerate_sat_per_1000_weight: Some(channel.context.get_feerate_sat_per_1000_weight()), unspendable_punishment_reserve: to_self_reserve_satoshis, balance_msat: balance.balance_msat, inbound_capacity_msat: balance.inbound_capacity_msat, outbound_capacity_msat: balance.outbound_capacity_msat, next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat, - user_channel_id: channel.get_user_id(), - confirmations_required: channel.minimum_depth(), - confirmations: Some(channel.get_funding_tx_confirmations(best_block_height)), - force_close_spend_delay: channel.get_counterparty_selected_contest_delay(), - is_outbound: channel.is_outbound(), - is_channel_ready: channel.is_usable(), - is_usable: channel.is_live(), - is_public: channel.should_announce(), - inbound_htlc_minimum_msat: Some(channel.get_holder_htlc_minimum_msat()), - inbound_htlc_maximum_msat: channel.get_holder_htlc_maximum_msat(), - config: Some(channel.config()), + next_outbound_htlc_minimum_msat: balance.next_outbound_htlc_minimum_msat, + user_channel_id: channel.context.get_user_id(), + confirmations_required: channel.context.minimum_depth(), + confirmations: Some(channel.context.get_funding_tx_confirmations(best_block_height)), + force_close_spend_delay: channel.context.get_counterparty_selected_contest_delay(), + is_outbound: channel.context.is_outbound(), + is_channel_ready: channel.context.is_usable(), + is_usable: channel.context.is_live(), + is_public: channel.context.should_announce(), + inbound_htlc_minimum_msat: Some(channel.context.get_holder_htlc_minimum_msat()), + inbound_htlc_maximum_msat: channel.context.get_holder_htlc_maximum_msat(), + config: Some(channel.context.config()), } } } @@ -1507,9 +1615,9 @@ macro_rules! handle_error { macro_rules! update_maps_on_chan_removal { ($self: expr, $channel: expr) => {{ - $self.id_to_peer.lock().unwrap().remove(&$channel.channel_id()); + $self.id_to_peer.lock().unwrap().remove(&$channel.context.channel_id()); let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap(); - if let Some(short_id) = $channel.get_short_channel_id() { + if let Some(short_id) = $channel.context.get_short_channel_id() { short_to_chan_info.remove(&short_id); } else { // If the channel was never confirmed on-chain prior to its closure, remove the @@ -1518,10 +1626,10 @@ macro_rules! update_maps_on_chan_removal { // also don't want a counterparty to be able to trivially cause a memory leak by simply // opening a million channels with us which are closed before we ever reach the funding // stage. - let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$channel.outbound_scid_alias()); + let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$channel.context.outbound_scid_alias()); debug_assert!(alias_removed); } - short_to_chan_info.remove(&$channel.outbound_scid_alias()); + short_to_chan_info.remove(&$channel.context.outbound_scid_alias()); }} } @@ -1539,7 +1647,7 @@ macro_rules! convert_chan_err { log_error!($self.logger, "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg); update_maps_on_chan_removal!($self, $channel); let shutdown_res = $channel.force_shutdown(true); - (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.get_user_id(), + (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.context.get_user_id(), shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok())) }, } @@ -1589,18 +1697,18 @@ macro_rules! remove_channel { macro_rules! send_channel_ready { ($self: ident, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => {{ $pending_msg_events.push(events::MessageSendEvent::SendChannelReady { - node_id: $channel.get_counterparty_node_id(), + node_id: $channel.context.get_counterparty_node_id(), msg: $channel_ready_msg, }); // Note that we may send a `channel_ready` multiple times for a channel if we reconnect, so // we allow collisions, but we shouldn't ever be updating the channel ID pointed to. let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap(); - let outbound_alias_insert = short_to_chan_info.insert($channel.outbound_scid_alias(), ($channel.get_counterparty_node_id(), $channel.channel_id())); - assert!(outbound_alias_insert.is_none() || outbound_alias_insert.unwrap() == ($channel.get_counterparty_node_id(), $channel.channel_id()), + let outbound_alias_insert = short_to_chan_info.insert($channel.context.outbound_scid_alias(), ($channel.context.get_counterparty_node_id(), $channel.context.channel_id())); + assert!(outbound_alias_insert.is_none() || outbound_alias_insert.unwrap() == ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()), "SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels"); - if let Some(real_scid) = $channel.get_short_channel_id() { - let scid_insert = short_to_chan_info.insert(real_scid, ($channel.get_counterparty_node_id(), $channel.channel_id())); - assert!(scid_insert.is_none() || scid_insert.unwrap() == ($channel.get_counterparty_node_id(), $channel.channel_id()), + if let Some(real_scid) = $channel.context.get_short_channel_id() { + let scid_insert = short_to_chan_info.insert(real_scid, ($channel.context.get_counterparty_node_id(), $channel.context.channel_id())); + assert!(scid_insert.is_none() || scid_insert.unwrap() == ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()), "SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels"); } }} @@ -1608,30 +1716,30 @@ macro_rules! send_channel_ready { macro_rules! emit_channel_pending_event { ($locked_events: expr, $channel: expr) => { - if $channel.should_emit_channel_pending_event() { + if $channel.context.should_emit_channel_pending_event() { $locked_events.push_back((events::Event::ChannelPending { - channel_id: $channel.channel_id(), - former_temporary_channel_id: $channel.temporary_channel_id(), - counterparty_node_id: $channel.get_counterparty_node_id(), - user_channel_id: $channel.get_user_id(), - funding_txo: $channel.get_funding_txo().unwrap().into_bitcoin_outpoint(), + channel_id: $channel.context.channel_id(), + former_temporary_channel_id: $channel.context.temporary_channel_id(), + counterparty_node_id: $channel.context.get_counterparty_node_id(), + user_channel_id: $channel.context.get_user_id(), + funding_txo: $channel.context.get_funding_txo().unwrap().into_bitcoin_outpoint(), }, None)); - $channel.set_channel_pending_event_emitted(); + $channel.context.set_channel_pending_event_emitted(); } } } macro_rules! emit_channel_ready_event { ($locked_events: expr, $channel: expr) => { - if $channel.should_emit_channel_ready_event() { - debug_assert!($channel.channel_pending_event_emitted()); + if $channel.context.should_emit_channel_ready_event() { + debug_assert!($channel.context.channel_pending_event_emitted()); $locked_events.push_back((events::Event::ChannelReady { - channel_id: $channel.channel_id(), - user_channel_id: $channel.get_user_id(), - counterparty_node_id: $channel.get_counterparty_node_id(), - channel_type: $channel.get_channel_type().clone(), + channel_id: $channel.context.channel_id(), + user_channel_id: $channel.context.get_user_id(), + counterparty_node_id: $channel.context.get_counterparty_node_id(), + channel_type: $channel.context.get_channel_type().clone(), }, None)); - $channel.set_channel_ready_event_emitted(); + $channel.context.set_channel_ready_event_emitted(); } } } @@ -1641,8 +1749,8 @@ macro_rules! handle_monitor_update_completion { let mut updates = $chan.monitor_updating_restored(&$self.logger, &$self.node_signer, $self.genesis_hash, &$self.default_configuration, $self.best_block.read().unwrap().height()); - let counterparty_node_id = $chan.get_counterparty_node_id(); - let channel_update = if updates.channel_ready.is_some() && $chan.is_usable() { + let counterparty_node_id = $chan.context.get_counterparty_node_id(); + let channel_update = if updates.channel_ready.is_some() && $chan.context.is_usable() { // We only send a channel_update in the case where we are just now sending a // channel_ready and the channel is in a usable state. We may re-send a // channel_update later through the announcement_signatures process for public @@ -1657,7 +1765,7 @@ macro_rules! handle_monitor_update_completion { } else { None }; let update_actions = $peer_state.monitor_update_blocked_actions - .remove(&$chan.channel_id()).unwrap_or(Vec::new()); + .remove(&$chan.context.channel_id()).unwrap_or(Vec::new()); let htlc_forwards = $self.handle_channel_resumption( &mut $peer_state.pending_msg_events, $chan, updates.raa, @@ -1668,7 +1776,7 @@ macro_rules! handle_monitor_update_completion { $peer_state.pending_msg_events.push(upd); } - let channel_id = $chan.channel_id(); + let channel_id = $chan.context.channel_id(); core::mem::drop($peer_state_lock); core::mem::drop($per_peer_state_lock); @@ -1690,19 +1798,22 @@ macro_rules! handle_new_monitor_update { // update_maps_on_chan_removal needs to be able to take id_to_peer, so make sure we can in // any case so that it won't deadlock. debug_assert_ne!($self.id_to_peer.held_by_thread(), LockHeldState::HeldByThread); + #[cfg(debug_assertions)] { + debug_assert!($self.background_events_processed_since_startup.load(Ordering::Acquire)); + } match $update_res { ChannelMonitorUpdateStatus::InProgress => { log_debug!($self.logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.", - log_bytes!($chan.channel_id()[..])); + log_bytes!($chan.context.channel_id()[..])); Ok(()) }, ChannelMonitorUpdateStatus::PermanentFailure => { log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure", - log_bytes!($chan.channel_id()[..])); + log_bytes!($chan.context.channel_id()[..])); update_maps_on_chan_removal!($self, $chan); let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown( - "ChannelMonitor storage failure".to_owned(), $chan.channel_id(), - $chan.get_user_id(), $chan.force_shutdown(false), + "ChannelMonitor storage failure".to_owned(), $chan.context.channel_id(), + $chan.context.get_user_id(), $chan.force_shutdown(false), $self.get_channel_update_for_broadcast(&$chan).ok())); $remove; res @@ -1736,6 +1847,10 @@ macro_rules! process_events_body { // persists happen while processing monitor events. let _read_guard = $self.total_consistency_lock.read().unwrap(); + // Because `handle_post_event_actions` may send `ChannelMonitorUpdate`s to the user we must + // ensure any startup-generated background events are handled first. + if $self.process_background_events() == NotifyOption::DoPersist { result = NotifyOption::DoPersist; } + // TODO: This behavior should be documented. It's unintuitive that we query // ChannelMonitors when clearing other events. if $self.process_pending_monitor_events() { @@ -1845,6 +1960,8 @@ where pending_events_processor: AtomicBool::new(false), pending_background_events: Mutex::new(Vec::new()), total_consistency_lock: RwLock::new(()), + #[cfg(debug_assertions)] + background_events_processed_since_startup: AtomicBool::new(false), persistence_notifier: Notifier::new(), entropy_source, @@ -1913,7 +2030,7 @@ where return Err(APIError::APIMisuseError { err: format!("Channel value must be at least 1000 satoshis. It was {}", channel_value_satoshis) }); } - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); // We want to make sure the lock is actually acquired by PersistenceNotifierGuard. debug_assert!(&self.total_consistency_lock.try_write().is_err()); @@ -1940,7 +2057,7 @@ where }; let res = channel.get_open_channel(self.genesis_hash.clone()); - let temporary_channel_id = channel.channel_id(); + let temporary_channel_id = channel.context.channel_id(); match peer_state.channel_by_id.entry(temporary_channel_id) { hash_map::Entry::Occupied(_) => { if cfg!(fuzzing) { @@ -1999,7 +2116,7 @@ where // Note we use is_live here instead of usable which leads to somewhat confused // internal/external nomenclature, but that's ok cause that's probably what the user // really wanted anyway. - self.list_channels_with_filter(|&(_, ref channel)| channel.is_live()) + self.list_channels_with_filter(|&(_, ref channel)| channel.context.is_live()) } /// Gets the list of channels we have with a given counterparty, in random order. @@ -2054,20 +2171,20 @@ where match channel.unbroadcasted_funding() { Some(transaction) => { pending_events_lock.push_back((events::Event::DiscardFunding { - channel_id: channel.channel_id(), transaction + channel_id: channel.context.channel_id(), transaction }, None)); }, None => {}, } pending_events_lock.push_back((events::Event::ChannelClosed { - channel_id: channel.channel_id(), - user_channel_id: channel.get_user_id(), + channel_id: channel.context.channel_id(), + user_channel_id: channel.context.get_user_id(), reason: closure_reason }, None)); } fn close_channel_internal(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option, override_shutdown_script: Option) -> Result<(), APIError> { - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>; let result: Result<(), _> = loop { @@ -2080,7 +2197,7 @@ where let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(channel_id.clone()) { hash_map::Entry::Occupied(mut chan_entry) => { - let funding_txo_opt = chan_entry.get().get_funding_txo(); + let funding_txo_opt = chan_entry.get().context.get_funding_txo(); let their_features = &peer_state.latest_features; let (shutdown_msg, mut monitor_update_opt, htlcs) = chan_entry.get_mut() .get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?; @@ -2197,7 +2314,7 @@ where let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id }; self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } - if let Some((funding_txo, monitor_update)) = monitor_update_option { + if let Some((_, funding_txo, monitor_update)) = monitor_update_option { // There isn't anything we can do if we get an update failure - we're already // force-closing. The monitor update on the required in-memory copy should broadcast // the latest local state, which is the best we can do anyway. Thus, it is safe to @@ -2236,11 +2353,11 @@ where }); } - Ok(chan.get_counterparty_node_id()) + Ok(chan.context.get_counterparty_node_id()) } fn force_close_sending_error(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, broadcast: bool) -> Result<(), APIError> { - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); match self.force_close_channel_with_peer(channel_id, counterparty_node_id, None, broadcast) { Ok(counterparty_node_id) => { let per_peer_state = self.per_peer_state.read().unwrap(); @@ -2342,20 +2459,7 @@ where }); }, msgs::OnionHopDataFormat::FinalNode { payment_data, keysend_preimage, payment_metadata } => { - if payment_data.is_some() && keysend_preimage.is_some() { - return Err(ReceiveError { - err_code: 0x4000|22, - err_data: Vec::new(), - msg: "We don't support MPP keysend payments", - }); - } else if let Some(data) = payment_data { - PendingHTLCRouting::Receive { - payment_data: data, - payment_metadata, - incoming_cltv_expiry: hop_data.outgoing_cltv_value, - phantom_shared_secret, - } - } else if let Some(payment_preimage) = keysend_preimage { + if let Some(payment_preimage) = keysend_preimage { // We need to check that the sender knows the keysend preimage before processing this // payment further. Otherwise, an intermediary routing hop forwarding non-keysend-HTLC X // could discover the final destination of X, by probing the adjacent nodes on the route @@ -2369,12 +2473,26 @@ where msg: "Payment preimage didn't match payment hash", }); } - + if !self.default_configuration.accept_mpp_keysend && payment_data.is_some() { + return Err(ReceiveError { + err_code: 0x4000|22, + err_data: Vec::new(), + msg: "We don't support MPP keysend payments", + }); + } PendingHTLCRouting::ReceiveKeysend { + payment_data, payment_preimage, payment_metadata, incoming_cltv_expiry: hop_data.outgoing_cltv_value, } + } else if let Some(data) = payment_data { + PendingHTLCRouting::Receive { + payment_data: data, + payment_metadata, + incoming_cltv_expiry: hop_data.outgoing_cltv_value, + phantom_shared_secret, + } } else { return Err(ReceiveError { err_code: 0x4000|0x2000|3, @@ -2532,13 +2650,13 @@ where }, Some(chan) => chan }; - if !chan.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels { + if !chan.context.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels { // Note that the behavior here should be identical to the above block - we // should NOT reveal the existence or non-existence of a private channel if // we don't allow forwards outbound over them. break Some(("Refusing to forward to a private channel based on our config.", 0x4000 | 10, None)); } - if chan.get_channel_type().supports_scid_privacy() && *short_channel_id != chan.outbound_scid_alias() { + if chan.context.get_channel_type().supports_scid_privacy() && *short_channel_id != chan.context.outbound_scid_alias() { // `option_scid_alias` (referred to in LDK as `scid_privacy`) means // "refuse to forward unless the SCID alias was used", so we pretend // we don't have the channel here. @@ -2551,7 +2669,7 @@ where // around to doing the actual forward, but better to fail early if we can and // hopefully an attacker trying to path-trace payments cannot make this occur // on a small/per-node/per-channel scale. - if !chan.is_live() { // channel_disabled + if !chan.context.is_live() { // channel_disabled // If the channel_update we're going to return is disabled (i.e. the // peer has been disabled for some time), return `channel_disabled`, // otherwise return `temporary_channel_failure`. @@ -2561,7 +2679,7 @@ where break Some(("Forwarding channel is not in a ready state.", 0x1000 | 7, chan_update_opt)); } } - if *outgoing_amt_msat < chan.get_counterparty_htlc_minimum_msat() { // amount_below_minimum + if *outgoing_amt_msat < chan.context.get_counterparty_htlc_minimum_msat() { // amount_below_minimum break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, chan_update_opt)); } if let Err((err, code)) = chan.htlc_satisfies_config(&msg, *outgoing_amt_msat, *outgoing_cltv_value) { @@ -2647,16 +2765,16 @@ where /// [`channel_update`]: msgs::ChannelUpdate /// [`internal_closing_signed`]: Self::internal_closing_signed fn get_channel_update_for_broadcast(&self, chan: &Channel<::Signer>) -> Result { - if !chan.should_announce() { + if !chan.context.should_announce() { return Err(LightningError { err: "Cannot broadcast a channel_update for a private channel".to_owned(), action: msgs::ErrorAction::IgnoreError }); } - if chan.get_short_channel_id().is_none() { + if chan.context.get_short_channel_id().is_none() { return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}); } - log_trace!(self.logger, "Attempting to generate broadcast channel update for channel {}", log_bytes!(chan.channel_id())); + log_trace!(self.logger, "Attempting to generate broadcast channel update for channel {}", log_bytes!(chan.context.channel_id())); self.get_channel_update_for_unicast(chan) } @@ -2672,8 +2790,8 @@ where /// [`channel_update`]: msgs::ChannelUpdate /// [`internal_closing_signed`]: Self::internal_closing_signed fn get_channel_update_for_unicast(&self, chan: &Channel<::Signer>) -> Result { - log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.channel_id())); - let short_channel_id = match chan.get_short_channel_id().or(chan.latest_inbound_scid_alias()) { + log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.context.channel_id())); + let short_channel_id = match chan.context.get_short_channel_id().or(chan.context.latest_inbound_scid_alias()) { None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}), Some(id) => id, }; @@ -2681,10 +2799,10 @@ where self.get_channel_update_for_onion(short_channel_id, chan) } fn get_channel_update_for_onion(&self, short_channel_id: u64, chan: &Channel<::Signer>) -> Result { - log_trace!(self.logger, "Generating channel update for channel {}", log_bytes!(chan.channel_id())); - let were_node_one = self.our_network_pubkey.serialize()[..] < chan.get_counterparty_node_id().serialize()[..]; + log_trace!(self.logger, "Generating channel update for channel {}", log_bytes!(chan.context.channel_id())); + let were_node_one = self.our_network_pubkey.serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..]; - let enabled = chan.is_usable() && match chan.channel_update_status() { + let enabled = chan.context.is_usable() && match chan.channel_update_status() { ChannelUpdateStatus::Enabled => true, ChannelUpdateStatus::DisabledStaged(_) => true, ChannelUpdateStatus::Disabled => false, @@ -2694,13 +2812,13 @@ where let unsigned = msgs::UnsignedChannelUpdate { chain_hash: self.genesis_hash, short_channel_id, - timestamp: chan.get_update_time_counter(), + timestamp: chan.context.get_update_time_counter(), flags: (!were_node_one) as u8 | ((!enabled as u8) << 1), - cltv_expiry_delta: chan.get_cltv_expiry_delta(), - htlc_minimum_msat: chan.get_counterparty_htlc_minimum_msat(), - htlc_maximum_msat: chan.get_announced_htlc_max_msat(), - fee_base_msat: chan.get_outbound_forwarding_fee_base_msat(), - fee_proportional_millionths: chan.get_fee_proportional_millionths(), + cltv_expiry_delta: chan.context.get_cltv_expiry_delta(), + htlc_minimum_msat: chan.context.get_counterparty_htlc_minimum_msat(), + htlc_maximum_msat: chan.context.get_announced_htlc_max_msat(), + fee_base_msat: chan.context.get_outbound_forwarding_fee_base_msat(), + fee_proportional_millionths: chan.context.get_fee_proportional_millionths(), excess_data: Vec::new(), }; // Panic on failure to signal LDK should be restarted to retry signing the `ChannelUpdate`. @@ -2748,10 +2866,10 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(id) { - if !chan.get().is_live() { + if !chan.get().context.is_live() { return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected".to_owned()}); } - let funding_txo = chan.get().get_funding_txo().unwrap(); + let funding_txo = chan.get().context.get_funding_txo().unwrap(); let send_res = chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute { path: path.clone(), @@ -2849,18 +2967,18 @@ where /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress pub fn send_payment_with_route(&self, route: &Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId) -> Result<(), PaymentSendFailure> { let best_block_height = self.best_block.read().unwrap().height(); - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); self.pending_outbound_payments .send_payment_with_route(route, payment_hash, recipient_onion, payment_id, &self.entropy_source, &self.node_signer, best_block_height, |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv| self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv)) } - /// Similar to [`ChannelManager::send_payment`], but will automatically find a route based on + /// Similar to [`ChannelManager::send_payment_with_route`], but will automatically find a route based on /// `route_params` and retry failed payment paths based on `retry_strategy`. pub fn send_payment(&self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result<(), RetryableSendFailure> { let best_block_height = self.best_block.read().unwrap().height(); - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); self.pending_outbound_payments .send_payment(payment_hash, recipient_onion, payment_id, retry_strategy, route_params, &self.router, self.list_usable_channels(), || self.compute_inflight_htlcs(), @@ -2873,7 +2991,7 @@ where #[cfg(test)] pub(super) fn test_send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, keysend_preimage: Option, payment_id: PaymentId, recv_value_msat: Option, onion_session_privs: Vec<[u8; 32]>) -> Result<(), PaymentSendFailure> { let best_block_height = self.best_block.read().unwrap().height(); - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); self.pending_outbound_payments.test_send_payment_internal(route, payment_hash, recipient_onion, keysend_preimage, payment_id, recv_value_msat, onion_session_privs, &self.node_signer, best_block_height, |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv| self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv)) @@ -2908,7 +3026,7 @@ where /// [`Event::PaymentFailed`]: events::Event::PaymentFailed /// [`Event::PaymentSent`]: events::Event::PaymentSent pub fn abandon_payment(&self, payment_id: PaymentId) { - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); self.pending_outbound_payments.abandon_payment(payment_id, PaymentFailureReason::UserAbandoned, &self.pending_events); } @@ -2927,7 +3045,7 @@ where /// [`send_payment`]: Self::send_payment pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option, recipient_onion: RecipientOnionFields, payment_id: PaymentId) -> Result { let best_block_height = self.best_block.read().unwrap().height(); - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); self.pending_outbound_payments.send_spontaneous_payment_with_route( route, payment_preimage, recipient_onion, payment_id, &self.entropy_source, &self.node_signer, best_block_height, @@ -2944,7 +3062,7 @@ where /// [`PaymentParameters::for_keysend`]: crate::routing::router::PaymentParameters::for_keysend pub fn send_spontaneous_payment_with_retry(&self, payment_preimage: Option, recipient_onion: RecipientOnionFields, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result { let best_block_height = self.best_block.read().unwrap().height(); - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); self.pending_outbound_payments.send_spontaneous_payment(payment_preimage, recipient_onion, payment_id, retry_strategy, route_params, &self.router, self.list_usable_channels(), || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height, @@ -2958,7 +3076,7 @@ where /// us to easily discern them from real payments. pub fn send_probe(&self, path: Path) -> Result<(PaymentHash, PaymentId), PaymentSendFailure> { let best_block_height = self.best_block.read().unwrap().height(); - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); self.pending_outbound_payments.send_probe(path, self.probing_cookie_secret, &self.entropy_source, &self.node_signer, best_block_height, |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv| self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv)) @@ -2988,7 +3106,7 @@ where let funding_res = chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger) .map_err(|e| if let ChannelError::Close(msg) = e { - MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.get_user_id(), chan.force_shutdown(true), None) + MsgHandleErrInternal::from_finish_shutdown(msg, chan.context.channel_id(), chan.context.get_user_id(), chan.force_shutdown(true), None) } else { unreachable!(); }); match funding_res { Ok(funding_msg) => (funding_msg, chan), @@ -2996,7 +3114,7 @@ where mem::drop(peer_state_lock); mem::drop(per_peer_state); - let _ = handle_error!(self, funding_res, chan.get_counterparty_node_id()); + let _ = handle_error!(self, funding_res, chan.context.get_counterparty_node_id()); return Err(APIError::ChannelUnavailable { err: "Signer refused to sign the initial commitment transaction".to_owned() }); @@ -3013,16 +3131,16 @@ where }; peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingCreated { - node_id: chan.get_counterparty_node_id(), + node_id: chan.context.get_counterparty_node_id(), msg, }); - match peer_state.channel_by_id.entry(chan.channel_id()) { + match peer_state.channel_by_id.entry(chan.context.channel_id()) { hash_map::Entry::Occupied(_) => { panic!("Generated duplicate funding txid?"); }, hash_map::Entry::Vacant(e) => { let mut id_to_peer = self.id_to_peer.lock().unwrap(); - if id_to_peer.insert(chan.channel_id(), chan.get_counterparty_node_id()).is_some() { + if id_to_peer.insert(chan.context.channel_id(), chan.context.get_counterparty_node_id()).is_some() { panic!("id_to_peer map already contained funding txid, which shouldn't be possible"); } e.insert(chan); @@ -3069,7 +3187,7 @@ where /// [`Event::FundingGenerationReady`]: crate::events::Event::FundingGenerationReady /// [`Event::ChannelClosed`]: crate::events::Event::ChannelClosed pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, funding_transaction: Transaction) -> Result<(), APIError> { - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); for inp in funding_transaction.input.iter() { if inp.witness.is_empty() { @@ -3098,9 +3216,9 @@ where } let mut output_index = None; - let expected_spk = chan.get_funding_redeemscript().to_v0_p2wsh(); + let expected_spk = chan.context.get_funding_redeemscript().to_v0_p2wsh(); for (idx, outp) in tx.output.iter().enumerate() { - if outp.script_pubkey == expected_spk && outp.value == chan.get_value_satoshis() { + if outp.script_pubkey == expected_spk && outp.value == chan.context.get_value_satoshis() { if output_index.is_some() { return Err(APIError::APIMisuseError { err: "Multiple outputs matched the expected script and value".to_owned() @@ -3118,7 +3236,7 @@ where }) } - /// Atomically updates the [`ChannelConfig`] for the given channels. + /// Atomically applies partial updates to the [`ChannelConfig`] of the given channels. /// /// Once the updates are applied, each eligible channel (advertised with a known short channel /// ID and a change in [`forwarding_fee_proportional_millionths`], [`forwarding_fee_base_msat`], @@ -3140,18 +3258,16 @@ where /// [`ChannelUpdate`]: msgs::ChannelUpdate /// [`ChannelUnavailable`]: APIError::ChannelUnavailable /// [`APIMisuseError`]: APIError::APIMisuseError - pub fn update_channel_config( - &self, counterparty_node_id: &PublicKey, channel_ids: &[[u8; 32]], config: &ChannelConfig, + pub fn update_partial_channel_config( + &self, counterparty_node_id: &PublicKey, channel_ids: &[[u8; 32]], config_update: &ChannelConfigUpdate, ) -> Result<(), APIError> { - if config.cltv_expiry_delta < MIN_CLTV_EXPIRY_DELTA { + if config_update.cltv_expiry_delta.map(|delta| delta < MIN_CLTV_EXPIRY_DELTA).unwrap_or(false) { return Err(APIError::APIMisuseError { err: format!("The chosen CLTV expiry delta is below the minimum of {}", MIN_CLTV_EXPIRY_DELTA), }); } - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop( - &self.total_consistency_lock, &self.persistence_notifier, - ); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?; @@ -3166,14 +3282,16 @@ where } for channel_id in channel_ids { let channel = peer_state.channel_by_id.get_mut(channel_id).unwrap(); - if !channel.update_config(config) { + let mut config = channel.context.config(); + config.apply(config_update); + if !channel.context.update_config(&config) { continue; } if let Ok(msg) = self.get_channel_update_for_broadcast(channel) { peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg }); } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) { peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { - node_id: channel.get_counterparty_node_id(), + node_id: channel.context.get_counterparty_node_id(), msg, }); } @@ -3181,6 +3299,34 @@ where Ok(()) } + /// Atomically updates the [`ChannelConfig`] for the given channels. + /// + /// Once the updates are applied, each eligible channel (advertised with a known short channel + /// ID and a change in [`forwarding_fee_proportional_millionths`], [`forwarding_fee_base_msat`], + /// or [`cltv_expiry_delta`]) has a [`BroadcastChannelUpdate`] event message generated + /// containing the new [`ChannelUpdate`] message which should be broadcast to the network. + /// + /// Returns [`ChannelUnavailable`] when a channel is not found or an incorrect + /// `counterparty_node_id` is provided. + /// + /// Returns [`APIMisuseError`] when a [`cltv_expiry_delta`] update is to be applied with a value + /// below [`MIN_CLTV_EXPIRY_DELTA`]. + /// + /// If an error is returned, none of the updates should be considered applied. + /// + /// [`forwarding_fee_proportional_millionths`]: ChannelConfig::forwarding_fee_proportional_millionths + /// [`forwarding_fee_base_msat`]: ChannelConfig::forwarding_fee_base_msat + /// [`cltv_expiry_delta`]: ChannelConfig::cltv_expiry_delta + /// [`BroadcastChannelUpdate`]: events::MessageSendEvent::BroadcastChannelUpdate + /// [`ChannelUpdate`]: msgs::ChannelUpdate + /// [`ChannelUnavailable`]: APIError::ChannelUnavailable + /// [`APIMisuseError`]: APIError::APIMisuseError + pub fn update_channel_config( + &self, counterparty_node_id: &PublicKey, channel_ids: &[[u8; 32]], config: &ChannelConfig, + ) -> Result<(), APIError> { + return self.update_partial_channel_config(counterparty_node_id, channel_ids, &(*config).into()); + } + /// Attempts to forward an intercepted HTLC over the provided channel id and with the provided /// amount to forward. Should only be called in response to an [`HTLCIntercepted`] event. /// @@ -3204,7 +3350,7 @@ where // TODO: when we move to deciding the best outbound channel at forward time, only take // `next_node_id` and not `next_hop_channel_id` pub fn forward_intercepted_htlc(&self, intercept_id: InterceptId, next_hop_channel_id: &[u8; 32], next_node_id: PublicKey, amt_to_forward_msat: u64) -> Result<(), APIError> { - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let next_hop_scid = { let peer_state_lock = self.per_peer_state.read().unwrap(); @@ -3214,12 +3360,12 @@ where let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.get(next_hop_channel_id) { Some(chan) => { - if !chan.is_usable() { + if !chan.context.is_usable() { return Err(APIError::ChannelUnavailable { err: format!("Channel with id {} not fully established", log_bytes!(*next_hop_channel_id)) }) } - chan.get_short_channel_id().unwrap_or(chan.outbound_scid_alias()) + chan.context.get_short_channel_id().unwrap_or(chan.context.outbound_scid_alias()) }, None => return Err(APIError::ChannelUnavailable { err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*next_hop_channel_id), next_node_id) @@ -3260,7 +3406,7 @@ where /// /// [`HTLCIntercepted`]: events::Event::HTLCIntercepted pub fn fail_intercepted_htlc(&self, intercept_id: InterceptId) -> Result<(), APIError> { - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let payment = self.pending_intercepted_htlcs.lock().unwrap().remove(&intercept_id) .ok_or_else(|| APIError::APIMisuseError { @@ -3289,7 +3435,7 @@ where /// Should only really ever be called in response to a PendingHTLCsForwardable event. /// Will likely generate further events. pub fn process_pending_htlc_forwards(&self) { - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let mut new_events = VecDeque::new(); let mut failed_forwards = Vec::new(); @@ -3445,7 +3591,7 @@ where let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan.get()); failed_forwards.push((htlc_source, payment_hash, HTLCFailReason::reason(failure_code, data), - HTLCDestination::NextHopChannel { node_id: Some(chan.get().get_counterparty_node_id()), channel_id: forward_chan_id } + HTLCDestination::NextHopChannel { node_id: Some(chan.get().context.get_counterparty_node_id()), channel_id: forward_chan_id } )); continue; } @@ -3490,16 +3636,19 @@ where (incoming_cltv_expiry, OnionPayload::Invoice { _legacy_hop_data }, Some(payment_data), phantom_shared_secret, onion_fields) }, - PendingHTLCRouting::ReceiveKeysend { payment_preimage, payment_metadata, incoming_cltv_expiry } => { - let onion_fields = RecipientOnionFields { payment_secret: None, payment_metadata }; + PendingHTLCRouting::ReceiveKeysend { payment_data, payment_preimage, payment_metadata, incoming_cltv_expiry } => { + let onion_fields = RecipientOnionFields { + payment_secret: payment_data.as_ref().map(|data| data.payment_secret), + payment_metadata + }; (incoming_cltv_expiry, OnionPayload::Spontaneous(payment_preimage), - None, None, onion_fields) + payment_data, None, onion_fields) }, _ => { panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive"); } }; - let mut claimable_htlc = ClaimableHTLC { + let claimable_htlc = ClaimableHTLC { prev_hop: HTLCPreviousHopData { short_channel_id: prev_short_channel_id, outpoint: prev_funding_outpoint, @@ -3549,13 +3698,11 @@ where } macro_rules! check_total_value { - ($payment_data: expr, $payment_preimage: expr) => {{ + ($purpose: expr) => {{ let mut payment_claimable_generated = false; - let purpose = || { - events::PaymentPurpose::InvoicePayment { - payment_preimage: $payment_preimage, - payment_secret: $payment_data.payment_secret, - } + let is_keysend = match $purpose { + events::PaymentPurpose::SpontaneousPayment(_) => true, + events::PaymentPurpose::InvoicePayment { .. } => false, }; let mut claimable_payments = self.claimable_payments.lock().unwrap(); if claimable_payments.pending_claiming_payments.contains_key(&payment_hash) { @@ -3567,9 +3714,18 @@ where .or_insert_with(|| { committed_to_claimable = true; ClaimablePayment { - purpose: purpose(), htlcs: Vec::new(), onion_fields: None, + purpose: $purpose.clone(), htlcs: Vec::new(), onion_fields: None, } }); + if $purpose != claimable_payment.purpose { + let log_keysend = |keysend| if keysend { "keysend" } else { "non-keysend" }; + log_trace!(self.logger, "Failing new {} HTLC with payment_hash {} as we already had an existing {} HTLC with the same payment hash", log_keysend(is_keysend), log_bytes!(payment_hash.0), log_keysend(!is_keysend)); + fail_htlc!(claimable_htlc, payment_hash); + } + if !self.default_configuration.accept_mpp_keysend && is_keysend && !claimable_payment.htlcs.is_empty() { + log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} as we already had an existing keysend HTLC with the same payment hash and our config states we don't accept MPP keysend", log_bytes!(payment_hash.0)); + fail_htlc!(claimable_htlc, payment_hash); + } if let Some(earlier_fields) = &mut claimable_payment.onion_fields { if earlier_fields.check_merge(&mut onion_fields).is_err() { fail_htlc!(claimable_htlc, payment_hash); @@ -3578,38 +3734,27 @@ where claimable_payment.onion_fields = Some(onion_fields); } let ref mut htlcs = &mut claimable_payment.htlcs; - if htlcs.len() == 1 { - if let OnionPayload::Spontaneous(_) = htlcs[0].onion_payload { - log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we already had an existing keysend HTLC with the same payment hash", log_bytes!(payment_hash.0)); - fail_htlc!(claimable_htlc, payment_hash); - } - } let mut total_value = claimable_htlc.sender_intended_value; let mut earliest_expiry = claimable_htlc.cltv_expiry; for htlc in htlcs.iter() { total_value += htlc.sender_intended_value; earliest_expiry = cmp::min(earliest_expiry, htlc.cltv_expiry); - match &htlc.onion_payload { - OnionPayload::Invoice { .. } => { - if htlc.total_msat != $payment_data.total_msat { - log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the HTLCs had inconsistent total values (eg {} and {})", - log_bytes!(payment_hash.0), $payment_data.total_msat, htlc.total_msat); - total_value = msgs::MAX_VALUE_MSAT; - } - if total_value >= msgs::MAX_VALUE_MSAT { break; } - }, - _ => unreachable!(), + if htlc.total_msat != claimable_htlc.total_msat { + log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the HTLCs had inconsistent total values (eg {} and {})", + log_bytes!(payment_hash.0), claimable_htlc.total_msat, htlc.total_msat); + total_value = msgs::MAX_VALUE_MSAT; } + if total_value >= msgs::MAX_VALUE_MSAT { break; } } // The condition determining whether an MPP is complete must // match exactly the condition used in `timer_tick_occurred` if total_value >= msgs::MAX_VALUE_MSAT { fail_htlc!(claimable_htlc, payment_hash); - } else if total_value - claimable_htlc.sender_intended_value >= $payment_data.total_msat { + } else if total_value - claimable_htlc.sender_intended_value >= claimable_htlc.total_msat { log_trace!(self.logger, "Failing HTLC with payment_hash {} as payment is already claimable", log_bytes!(payment_hash.0)); fail_htlc!(claimable_htlc, payment_hash); - } else if total_value >= $payment_data.total_msat { + } else if total_value >= claimable_htlc.total_msat { #[allow(unused_assignments)] { committed_to_claimable = true; } @@ -3620,7 +3765,7 @@ where new_events.push_back((events::Event::PaymentClaimable { receiver_node_id: Some(receiver_node_id), payment_hash, - purpose: purpose(), + purpose: $purpose, amount_msat, via_channel_id: Some(prev_channel_id), via_user_channel_id: Some(prev_user_channel_id), @@ -3668,49 +3813,23 @@ where fail_htlc!(claimable_htlc, payment_hash); } } - check_total_value!(payment_data, payment_preimage); + let purpose = events::PaymentPurpose::InvoicePayment { + payment_preimage: payment_preimage.clone(), + payment_secret: payment_data.payment_secret, + }; + check_total_value!(purpose); }, OnionPayload::Spontaneous(preimage) => { - let mut claimable_payments = self.claimable_payments.lock().unwrap(); - if claimable_payments.pending_claiming_payments.contains_key(&payment_hash) { - fail_htlc!(claimable_htlc, payment_hash); - } - match claimable_payments.claimable_payments.entry(payment_hash) { - hash_map::Entry::Vacant(e) => { - let amount_msat = claimable_htlc.value; - claimable_htlc.total_value_received = Some(amount_msat); - let claim_deadline = Some(claimable_htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER); - let purpose = events::PaymentPurpose::SpontaneousPayment(preimage); - e.insert(ClaimablePayment { - purpose: purpose.clone(), - onion_fields: Some(onion_fields.clone()), - htlcs: vec![claimable_htlc], - }); - let prev_channel_id = prev_funding_outpoint.to_channel_id(); - new_events.push_back((events::Event::PaymentClaimable { - receiver_node_id: Some(receiver_node_id), - payment_hash, - amount_msat, - purpose, - via_channel_id: Some(prev_channel_id), - via_user_channel_id: Some(prev_user_channel_id), - claim_deadline, - onion_fields: Some(onion_fields), - }, None)); - }, - hash_map::Entry::Occupied(_) => { - log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} for a duplicative payment hash", log_bytes!(payment_hash.0)); - fail_htlc!(claimable_htlc, payment_hash); - } - } + let purpose = events::PaymentPurpose::SpontaneousPayment(preimage); + check_total_value!(purpose); } } }, hash_map::Entry::Occupied(inbound_payment) => { - if payment_data.is_none() { + if let OnionPayload::Spontaneous(_) = claimable_htlc.onion_payload { log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} because we already have an inbound payment with the same payment hash", log_bytes!(payment_hash.0)); fail_htlc!(claimable_htlc, payment_hash); - }; + } let payment_data = payment_data.unwrap(); if inbound_payment.get().payment_secret != payment_data.payment_secret { log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our expected payment secret.", log_bytes!(payment_hash.0)); @@ -3720,7 +3839,11 @@ where log_bytes!(payment_hash.0), payment_data.total_msat, inbound_payment.get().min_value_msat.unwrap()); fail_htlc!(claimable_htlc, payment_hash); } else { - let payment_claimable_generated = check_total_value!(payment_data, inbound_payment.get().payment_preimage); + let purpose = events::PaymentPurpose::InvoicePayment { + payment_preimage: inbound_payment.get().payment_preimage, + payment_secret: payment_data.payment_secret, + }; + let payment_claimable_generated = check_total_value!(purpose); if payment_claimable_generated { inbound_payment.remove_entry(); } @@ -3760,52 +3883,80 @@ where events.append(&mut new_events); } - /// Free the background events, generally called from timer_tick_occurred. - /// - /// Exposed for testing to allow us to process events quickly without generating accidental - /// BroadcastChannelUpdate events in timer_tick_occurred. + /// Free the background events, generally called from [`PersistenceNotifierGuard`] constructors. /// /// Expects the caller to have a total_consistency_lock read lock. - fn process_background_events(&self) -> bool { + fn process_background_events(&self) -> NotifyOption { + debug_assert_ne!(self.total_consistency_lock.held_by_thread(), LockHeldState::NotHeldByThread); + + #[cfg(debug_assertions)] + self.background_events_processed_since_startup.store(true, Ordering::Release); + let mut background_events = Vec::new(); mem::swap(&mut *self.pending_background_events.lock().unwrap(), &mut background_events); if background_events.is_empty() { - return false; + return NotifyOption::SkipPersist; } for event in background_events.drain(..) { match event { - BackgroundEvent::MonitorUpdateRegeneratedOnStartup((funding_txo, update)) => { + BackgroundEvent::ClosingMonitorUpdateRegeneratedOnStartup((funding_txo, update)) => { // The channel has already been closed, so no use bothering to care about the // monitor updating completing. let _ = self.chain_monitor.update_channel(funding_txo, &update); }, + BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, update } => { + let update_res = self.chain_monitor.update_channel(funding_txo, &update); + + let res = { + let per_peer_state = self.per_peer_state.read().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(funding_txo.to_channel_id()) { + hash_map::Entry::Occupied(mut chan) => { + handle_new_monitor_update!(self, update_res, update.update_id, peer_state_lock, peer_state, per_peer_state, chan) + }, + hash_map::Entry::Vacant(_) => Ok(()), + } + } else { Ok(()) } + }; + // TODO: If this channel has since closed, we're likely providing a payment + // preimage update, which we must ensure is durable! We currently don't, + // however, ensure that. + if res.is_err() { + log_error!(self.logger, + "Failed to provide ChannelMonitorUpdate to closed channel! This likely lost us a payment preimage!"); + } + let _ = handle_error!(self, res, counterparty_node_id); + }, } } - true + NotifyOption::DoPersist } #[cfg(any(test, feature = "_test_utils"))] /// Process background events, for functional testing pub fn test_process_background_events(&self) { - self.process_background_events(); + let _lck = self.total_consistency_lock.read().unwrap(); + let _ = self.process_background_events(); } fn update_channel_fee(&self, chan_id: &[u8; 32], chan: &mut Channel<::Signer>, new_feerate: u32) -> NotifyOption { - if !chan.is_outbound() { return NotifyOption::SkipPersist; } + if !chan.context.is_outbound() { return NotifyOption::SkipPersist; } // If the feerate has decreased by less than half, don't bother - if new_feerate <= chan.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.get_feerate_sat_per_1000_weight() { + if new_feerate <= chan.context.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.context.get_feerate_sat_per_1000_weight() { log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {}.", - log_bytes!(chan_id[..]), chan.get_feerate_sat_per_1000_weight(), new_feerate); + log_bytes!(chan_id[..]), chan.context.get_feerate_sat_per_1000_weight(), new_feerate); return NotifyOption::SkipPersist; } - if !chan.is_live() { + if !chan.context.is_live() { log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).", - log_bytes!(chan_id[..]), chan.get_feerate_sat_per_1000_weight(), new_feerate); + log_bytes!(chan_id[..]), chan.context.get_feerate_sat_per_1000_weight(), new_feerate); return NotifyOption::SkipPersist; } log_trace!(self.logger, "Channel {} qualifies for a feerate change from {} to {}.", - log_bytes!(chan_id[..]), chan.get_feerate_sat_per_1000_weight(), new_feerate); + log_bytes!(chan_id[..]), chan.context.get_feerate_sat_per_1000_weight(), new_feerate); chan.queue_update_fee(new_feerate, &self.logger); NotifyOption::DoPersist @@ -3818,7 +3969,7 @@ where /// it wants to detect). Thus, we have a variant exposed here for its benefit. pub fn maybe_update_chan_fees(&self) { PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || { - let mut should_persist = NotifyOption::SkipPersist; + let mut should_persist = self.process_background_events(); let new_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal); @@ -3854,8 +4005,7 @@ where /// [`ChannelConfig`]: crate::util::config::ChannelConfig pub fn timer_tick_occurred(&self) { PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || { - let mut should_persist = NotifyOption::SkipPersist; - if self.process_background_events() { should_persist = NotifyOption::DoPersist; } + let mut should_persist = self.process_background_events(); let new_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal); @@ -3880,13 +4030,13 @@ where } match chan.channel_update_status() { - ChannelUpdateStatus::Enabled if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(0)), - ChannelUpdateStatus::Disabled if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged(0)), - ChannelUpdateStatus::DisabledStaged(_) if chan.is_live() + ChannelUpdateStatus::Enabled if !chan.context.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(0)), + ChannelUpdateStatus::Disabled if chan.context.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged(0)), + ChannelUpdateStatus::DisabledStaged(_) if chan.context.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Enabled), - ChannelUpdateStatus::EnabledStaged(_) if !chan.is_live() + ChannelUpdateStatus::EnabledStaged(_) if !chan.context.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Disabled), - ChannelUpdateStatus::DisabledStaged(mut n) if !chan.is_live() => { + ChannelUpdateStatus::DisabledStaged(mut n) if !chan.context.is_live() => { n += 1; if n >= DISABLE_GOSSIP_TICKS { chan.set_channel_update_status(ChannelUpdateStatus::Disabled); @@ -3900,7 +4050,7 @@ where chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(n)); } }, - ChannelUpdateStatus::EnabledStaged(mut n) if chan.is_live() => { + ChannelUpdateStatus::EnabledStaged(mut n) if chan.context.is_live() => { n += 1; if n >= ENABLE_GOSSIP_TICKS { chan.set_channel_update_status(ChannelUpdateStatus::Enabled); @@ -3917,7 +4067,21 @@ where _ => {}, } - chan.maybe_expire_prev_config(); + chan.context.maybe_expire_prev_config(); + + if chan.should_disconnect_peer_awaiting_response() { + log_debug!(self.logger, "Disconnecting peer {} due to not making any progress on channel {}", + counterparty_node_id, log_bytes!(*chan_id)); + pending_msg_events.push(MessageSendEvent::HandleError { + node_id: counterparty_node_id, + action: msgs::ErrorAction::DisconnectPeerWithWarning { + msg: msgs::WarningMessage { + channel_id: *chan_id, + data: "Disconnecting due to timeout awaiting response".to_owned(), + }, + }, + }); + } true }); @@ -4027,7 +4191,7 @@ where /// /// See [`FailureCode`] for valid failure codes. pub fn fail_htlc_backwards_with_reason(&self, payment_hash: &PaymentHash, failure_code: FailureCode) { - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let removed_source = self.claimable_payments.lock().unwrap().claimable_payments.remove(payment_hash); if let Some(payment) = removed_source { @@ -4063,10 +4227,10 @@ where // guess somewhat. If its a public channel, we figure best to just use the real SCID (as // we're not leaking that we have a channel with the counterparty), otherwise we try to use // an inbound SCID alias before the real SCID. - let scid_pref = if chan.should_announce() { - chan.get_short_channel_id().or(chan.latest_inbound_scid_alias()) + let scid_pref = if chan.context.should_announce() { + chan.context.get_short_channel_id().or(chan.context.latest_inbound_scid_alias()) } else { - chan.latest_inbound_scid_alias().or(chan.get_short_channel_id()) + chan.context.latest_inbound_scid_alias().or(chan.context.get_short_channel_id()) }; if let Some(scid) = scid_pref { self.get_htlc_temp_fail_err_and_data(desired_err_code, scid, chan) @@ -4204,7 +4368,7 @@ where pub fn claim_funds(&self, payment_preimage: PaymentPreimage) { let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner()); - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let mut sources = { let mut claimable_payments = self.claimable_payments.lock().unwrap(); @@ -4259,18 +4423,6 @@ where break; } expected_amt_msat = htlc.total_value_received; - - if let OnionPayload::Spontaneous(_) = &htlc.onion_payload { - // We don't currently support MPP for spontaneous payments, so just check - // that there's one payment here and move on. - if sources.len() != 1 { - log_error!(self.logger, "Somehow ended up with an MPP spontaneous payment - this should not be reachable!"); - debug_assert!(false); - valid_mpp = false; - break; - } - } - claimable_amt_msat += htlc.value; } mem::drop(per_peer_state); @@ -4340,7 +4492,7 @@ where let mut peer_state_lock = peer_state_opt.unwrap(); let peer_state = &mut *peer_state_lock; if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(chan_id) { - let counterparty_node_id = chan.get().get_counterparty_node_id(); + let counterparty_node_id = chan.get().context.get_counterparty_node_id(); let fulfill_res = chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger); if let UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } = fulfill_res { @@ -4409,16 +4561,16 @@ where Some(claimed_htlc_value - forwarded_htlc_value) } else { None }; - let prev_channel_id = Some(prev_outpoint.to_channel_id()); - let next_channel_id = Some(next_channel_id); - - Some(MonitorUpdateCompletionAction::EmitEvent { event: events::Event::PaymentForwarded { - fee_earned_msat, - claim_from_onchain_tx: from_onchain, - prev_channel_id, - next_channel_id, - outbound_amount_forwarded_msat: forwarded_htlc_value_msat, - }}) + Some(MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel { + event: events::Event::PaymentForwarded { + fee_earned_msat, + claim_from_onchain_tx: from_onchain, + prev_channel_id: Some(prev_outpoint.to_channel_id()), + next_channel_id: Some(next_channel_id), + outbound_amount_forwarded_msat: forwarded_htlc_value_msat, + }, + downstream_counterparty_and_funding_outpoint: None, + }) } else { None } }); if let Err((pk, err)) = res { @@ -4445,8 +4597,13 @@ where }, None)); } }, - MonitorUpdateCompletionAction::EmitEvent { event } => { + MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel { + event, downstream_counterparty_and_funding_outpoint + } => { self.pending_events.lock().unwrap().push_back((event, None)); + if let Some((node_id, funding_outpoint, blocker)) = downstream_counterparty_and_funding_outpoint { + self.handle_monitor_update_release(node_id, funding_outpoint, Some(blocker)); + } }, } } @@ -4461,7 +4618,7 @@ where channel_ready: Option, announcement_sigs: Option) -> Option<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> { log_trace!(self.logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {}broadcasting funding, {} channel ready, {} announcement", - log_bytes!(channel.channel_id()), + log_bytes!(channel.context.channel_id()), if raa.is_some() { "an" } else { "no" }, if commitment_update.is_some() { "a" } else { "no" }, pending_forwards.len(), if funding_broadcastable.is_some() { "" } else { "not " }, @@ -4470,10 +4627,10 @@ where let mut htlc_forwards = None; - let counterparty_node_id = channel.get_counterparty_node_id(); + let counterparty_node_id = channel.context.get_counterparty_node_id(); if !pending_forwards.is_empty() { - htlc_forwards = Some((channel.get_short_channel_id().unwrap_or(channel.outbound_scid_alias()), - channel.get_funding_txo().unwrap(), channel.get_user_id(), pending_forwards)); + htlc_forwards = Some((channel.context.get_short_channel_id().unwrap_or(channel.context.outbound_scid_alias()), + channel.context.get_funding_txo().unwrap(), channel.context.get_user_id(), pending_forwards)); } if let Some(msg) = channel_ready { @@ -4515,7 +4672,7 @@ where if let Some(tx) = funding_broadcastable { log_info!(self.logger, "Broadcasting funding transaction with txid {}", tx.txid()); - self.tx_broadcaster.broadcast_transaction(&tx); + self.tx_broadcaster.broadcast_transactions(&[&tx]); } { @@ -4555,8 +4712,8 @@ where } }; log_trace!(self.logger, "ChannelMonitor updated to {}. Current highest is {}", - highest_applied_update_id, channel.get().get_latest_monitor_update_id()); - if !channel.get().is_awaiting_monitor_update() || channel.get().get_latest_monitor_update_id() != highest_applied_update_id { + highest_applied_update_id, channel.get().context.get_latest_monitor_update_id()); + if !channel.get().is_awaiting_monitor_update() || channel.get().context.get_latest_monitor_update_id() != highest_applied_update_id { return; } handle_monitor_update_completion!(self, highest_applied_update_id, peer_state_lock, peer_state, per_peer_state, channel.get_mut()); @@ -4605,7 +4762,7 @@ where } fn do_accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, accept_0conf: bool, user_channel_id: u128) -> Result<(), APIError> { - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let peers_without_funded_channels = self.peers_without_funded_channels(|peer| !peer.channel_by_id.is_empty()); let per_peer_state = self.per_peer_state.read().unwrap(); @@ -4621,9 +4778,9 @@ where } if accept_0conf { channel.get_mut().set_0conf(); - } else if channel.get().get_channel_type().requires_zero_conf() { + } else if channel.get().context.get_channel_type().requires_zero_conf() { let send_msg_err_event = events::MessageSendEvent::HandleError { - node_id: channel.get().get_counterparty_node_id(), + node_id: channel.get().context.get_counterparty_node_id(), action: msgs::ErrorAction::SendErrorMessage{ msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "No zero confirmation channels accepted".to_owned(), } } @@ -4637,7 +4794,7 @@ where // channels per-peer we can accept channels from a peer with existing ones. if is_only_peer_channel && peers_without_funded_channels >= MAX_UNFUNDED_CHANNEL_PEERS { let send_msg_err_event = events::MessageSendEvent::HandleError { - node_id: channel.get().get_counterparty_node_id(), + node_id: channel.get().context.get_counterparty_node_id(), action: msgs::ErrorAction::SendErrorMessage{ msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "Have too many peers with unfunded channels, not accepting new ones".to_owned(), } } @@ -4649,7 +4806,7 @@ where } peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { - node_id: channel.get().get_counterparty_node_id(), + node_id: channel.get().context.get_counterparty_node_id(), msg: channel.get_mut().accept_inbound_channel(user_channel_id), }); } @@ -4688,8 +4845,8 @@ where ) -> usize { let mut num_unfunded_channels = 0; for (_, chan) in peer.channel_by_id.iter() { - if !chan.is_outbound() && chan.minimum_depth().unwrap_or(1) != 0 && - chan.get_funding_tx_confirmations(best_block_height) == 0 + if !chan.context.is_outbound() && chan.context.minimum_depth().unwrap_or(1) != 0 && + chan.context.get_funding_tx_confirmations(best_block_height) == 0 { num_unfunded_channels += 1; } @@ -4754,14 +4911,14 @@ where }, Ok(res) => res }; - match peer_state.channel_by_id.entry(channel.channel_id()) { + match peer_state.channel_by_id.entry(channel.context.channel_id()) { hash_map::Entry::Occupied(_) => { self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias); return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision for the same peer!".to_owned(), msg.temporary_channel_id.clone())) }, hash_map::Entry::Vacant(entry) => { if !self.default_configuration.manually_accept_inbound_channels { - if channel.get_channel_type().requires_zero_conf() { + if channel.context.get_channel_type().requires_zero_conf() { return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), msg.temporary_channel_id.clone())); } peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { @@ -4775,7 +4932,7 @@ where counterparty_node_id: counterparty_node_id.clone(), funding_satoshis: msg.funding_satoshis, push_msat: msg.push_msat, - channel_type: channel.get_channel_type().clone(), + channel_type: channel.context.get_channel_type().clone(), }, None)); } @@ -4798,7 +4955,7 @@ where match peer_state.channel_by_id.entry(msg.temporary_channel_id) { hash_map::Entry::Occupied(mut chan) => { try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &peer_state.latest_features), chan); - (chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id()) + (chan.get().context.get_value_satoshis(), chan.get().context.get_funding_redeemscript().to_v0_p2wsh(), chan.get().context.get_user_id()) }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)) } @@ -4839,14 +4996,14 @@ where Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id".to_owned(), funding_msg.channel_id)) }, hash_map::Entry::Vacant(e) => { - match self.id_to_peer.lock().unwrap().entry(chan.channel_id()) { + match self.id_to_peer.lock().unwrap().entry(chan.context.channel_id()) { hash_map::Entry::Occupied(_) => { return Err(MsgHandleErrInternal::send_err_msg_no_close( "The funding_created message had the same funding_txid as an existing channel - funding is not possible".to_owned(), funding_msg.channel_id)) }, hash_map::Entry::Vacant(i_e) => { - i_e.insert(chan.get_counterparty_node_id()); + i_e.insert(chan.context.get_counterparty_node_id()); } } @@ -4896,7 +5053,7 @@ where hash_map::Entry::Occupied(mut chan) => { let monitor = try_chan_entry!(self, chan.get_mut().funding_signed(&msg, best_block, &self.signer_provider, &self.logger), chan); - let update_res = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor); + let update_res = self.chain_monitor.watch_channel(chan.get().context.get_funding_txo().unwrap(), monitor); let mut res = handle_new_monitor_update!(self, update_res, 0, peer_state_lock, peer_state, per_peer_state, chan); if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res { // We weren't able to watch the channel to begin with, so no updates should be made on @@ -4926,18 +5083,18 @@ where let announcement_sigs_opt = try_chan_entry!(self, chan.get_mut().channel_ready(&msg, &self.node_signer, self.genesis_hash.clone(), &self.default_configuration, &self.best_block.read().unwrap(), &self.logger), chan); if let Some(announcement_sigs) = announcement_sigs_opt { - log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(chan.get().channel_id())); + log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(chan.get().context.channel_id())); peer_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { node_id: counterparty_node_id.clone(), msg: announcement_sigs, }); - } else if chan.get().is_usable() { + } else if chan.get().context.is_usable() { // If we're sending an announcement_signatures, we'll send the (public) // channel_update after sending a channel_announcement when we receive our // counterparty's announcement_signatures. Thus, we only bother to send a // channel_update here if the channel is not public, i.e. we're not sending an // announcement_signatures. - log_trace!(self.logger, "Sending private initial channel_update for our counterparty on channel {}", log_bytes!(chan.get().channel_id())); + log_trace!(self.logger, "Sending private initial channel_update for our counterparty on channel {}", log_bytes!(chan.get().context.channel_id())); if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) { peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { node_id: counterparty_node_id.clone(), @@ -4977,7 +5134,7 @@ where if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" }); } - let funding_txo_opt = chan_entry.get().get_funding_txo(); + let funding_txo_opt = chan_entry.get().context.get_funding_txo(); let (shutdown, monitor_update_opt, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_entry); dropped_htlcs = htlcs; @@ -5045,7 +5202,7 @@ where }; if let Some(broadcast_tx) = tx { log_info!(self.logger, "Broadcasting {}", log_tx!(broadcast_tx)); - self.tx_broadcaster.broadcast_transaction(&broadcast_tx); + self.tx_broadcaster.broadcast_transactions(&[&broadcast_tx]); } if let Some(chan) = chan_option { if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { @@ -5183,7 +5340,7 @@ where let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { - let funding_txo = chan.get().get_funding_txo(); + let funding_txo = chan.get().context.get_funding_txo(); let monitor_update_opt = try_chan_entry!(self, chan.get_mut().commitment_signed(&msg, &self.logger), chan); if let Some(monitor_update) = monitor_update_opt { let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), monitor_update); @@ -5293,6 +5450,24 @@ where } } + /// Checks whether [`ChannelMonitorUpdate`]s generated by the receipt of a remote + /// [`msgs::RevokeAndACK`] should be held for the given channel until some other event + /// completes. Note that this needs to happen in the same [`PeerState`] mutex as any release of + /// the [`ChannelMonitorUpdate`] in question. + fn raa_monitor_updates_held(&self, + actions_blocking_raa_monitor_updates: &BTreeMap<[u8; 32], Vec>, + channel_funding_outpoint: OutPoint, counterparty_node_id: PublicKey + ) -> bool { + actions_blocking_raa_monitor_updates + .get(&channel_funding_outpoint.to_channel_id()).map(|v| !v.is_empty()).unwrap_or(false) + || self.pending_events.lock().unwrap().iter().any(|(_, action)| { + action == &Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate { + channel_funding_outpoint, + counterparty_node_id, + }) + }) + } + fn internal_revoke_and_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), MsgHandleErrInternal> { let (htlcs_to_fail, res) = { let per_peer_state = self.per_peer_state.read().unwrap(); @@ -5304,7 +5479,7 @@ where let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { - let funding_txo = chan.get().get_funding_txo(); + let funding_txo = chan.get().context.get_funding_txo(); let (htlcs_to_fail, monitor_update_opt) = try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.logger), chan); let res = if let Some(monitor_update) = monitor_update_opt { let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), monitor_update); @@ -5350,7 +5525,7 @@ where let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { - if !chan.get().is_usable() { + if !chan.get().context.is_usable() { return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it".to_owned(), action: msgs::ErrorAction::IgnoreError})); } @@ -5387,8 +5562,8 @@ where let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(chan_id) { hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - if chan.get().should_announce() { + if chan.get().context.get_counterparty_node_id() != *counterparty_node_id { + if chan.get().context.should_announce() { // If the announcement is about a channel of ours which is public, some // other peer may simply be forwarding all its gossip to us. Don't provide // a scary-looking error message and return Ok instead. @@ -5396,7 +5571,7 @@ where } return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id)); } - let were_node_one = self.get_our_node_id().serialize()[..] < chan.get().get_counterparty_node_id().serialize()[..]; + let were_node_one = self.get_our_node_id().serialize()[..] < chan.get().context.get_counterparty_node_id().serialize()[..]; let msg_from_node_one = msg.contents.flags & 1 == 0; if were_node_one == msg_from_node_one { return Ok(NotifyOption::SkipPersist); @@ -5437,18 +5612,18 @@ where node_id: counterparty_node_id.clone(), msg, }); - } else if chan.get().is_usable() { + } else if chan.get().context.is_usable() { // If the channel is in a usable state (ie the channel is not being shut // down), send a unicast channel_update to our counterparty to make sure // they have the latest channel parameters. if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) { channel_update = Some(events::MessageSendEvent::SendChannelUpdate { - node_id: chan.get().get_counterparty_node_id(), + node_id: chan.get().context.get_counterparty_node_id(), msg, }); } } - let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take(); + let need_lnd_workaround = chan.get_mut().context.workaround_lnd_bug_4006.take(); htlc_forwards = self.handle_channel_resumption( &mut peer_state.pending_msg_events, chan.get_mut(), responses.raa, responses.commitment_update, responses.order, Vec::new(), None, responses.channel_ready, responses.announcement_sigs); @@ -5524,9 +5699,9 @@ where }; self.issue_channel_close_events(&chan, reason); pending_msg_events.push(events::MessageSendEvent::HandleError { - node_id: chan.get_counterparty_node_id(), + node_id: chan.context.get_counterparty_node_id(), action: msgs::ErrorAction::SendErrorMessage { - msg: msgs::ErrorMessage { channel_id: chan.channel_id(), data: "Channel force-closed".to_owned() } + msg: msgs::ErrorMessage { channel_id: chan.context.channel_id(), data: "Channel force-closed".to_owned() } }, }); } @@ -5552,13 +5727,8 @@ where /// update events as a separate process method here. #[cfg(fuzzing)] pub fn process_monitor_events(&self) { - PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || { - if self.process_pending_monitor_events() { - NotifyOption::DoPersist - } else { - NotifyOption::SkipPersist - } - }); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); + self.process_pending_monitor_events(); } /// Check the holding cell in each channel and free any pending HTLCs in them if possible. @@ -5580,8 +5750,8 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state: &mut PeerState<_> = &mut *peer_state_lock; for (channel_id, chan) in peer_state.channel_by_id.iter_mut() { - let counterparty_node_id = chan.get_counterparty_node_id(); - let funding_txo = chan.get_funding_txo(); + let counterparty_node_id = chan.context.get_counterparty_node_id(); + let funding_txo = chan.context.get_funding_txo(); let (monitor_opt, holding_cell_failed_htlcs) = chan.maybe_free_holding_cell_htlcs(&self.logger); if !holding_cell_failed_htlcs.is_empty() { @@ -5640,7 +5810,7 @@ where if let Some(msg) = msg_opt { has_update = true; pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { - node_id: chan.get_counterparty_node_id(), msg, + node_id: chan.context.get_counterparty_node_id(), msg, }); } if let Some(tx) = tx_opt { @@ -5655,7 +5825,7 @@ where self.issue_channel_close_events(chan, ClosureReason::CooperativeClosure); log_info!(self.logger, "Broadcasting {}", log_tx!(tx)); - self.tx_broadcaster.broadcast_transaction(&tx); + self.tx_broadcaster.broadcast_transactions(&[&tx]); update_maps_on_chan_removal!(self, chan); false } else { true } @@ -5663,7 +5833,7 @@ where Err(e) => { has_update = true; let (close_channel, res) = convert_chan_err!(self, e, chan, channel_id); - handle_errors.push((chan.get_counterparty_node_id(), Err(res))); + handle_errors.push((chan.context.get_counterparty_node_id(), Err(res))); !close_channel } } @@ -5690,12 +5860,15 @@ where // Channel::force_shutdown tries to make us do) as we may still be in initialization, // so we track the update internally and handle it when the user next calls // timer_tick_occurred, guaranteeing we're running normally. - if let Some((funding_txo, update)) = failure.0.take() { + if let Some((counterparty_node_id, funding_txo, update)) = failure.0.take() { assert_eq!(update.updates.len(), 1); if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] { assert!(should_broadcast); } else { unreachable!(); } - self.pending_background_events.lock().unwrap().push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup((funding_txo, update))); + self.pending_background_events.lock().unwrap().push( + BackgroundEvent::MonitorUpdateRegeneratedOnStartup { + counterparty_node_id, funding_txo, update + }); } self.finish_force_close_channel(failure); } @@ -5710,7 +5883,7 @@ where let payment_secret = PaymentSecret(self.entropy_source.get_secure_random_bytes()); - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let mut payment_secrets = self.pending_inbound_payments.lock().unwrap(); match payment_secrets.entry(payment_hash) { hash_map::Entry::Vacant(e) => { @@ -5959,27 +6132,39 @@ where self.pending_outbound_payments.clear_pending_payments() } - fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey, channel_funding_outpoint: OutPoint) { + /// When something which was blocking a channel from updating its [`ChannelMonitor`] (e.g. an + /// [`Event`] being handled) completes, this should be called to restore the channel to normal + /// operation. It will double-check that nothing *else* is also blocking the same channel from + /// making progress and then any blocked [`ChannelMonitorUpdate`]s fly. + fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey, channel_funding_outpoint: OutPoint, mut completed_blocker: Option) { let mut errors = Vec::new(); loop { let per_peer_state = self.per_peer_state.read().unwrap(); if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) { let mut peer_state_lck = peer_state_mtx.lock().unwrap(); let peer_state = &mut *peer_state_lck; - if self.pending_events.lock().unwrap().iter() - .any(|(_ev, action_opt)| action_opt == &Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate { - channel_funding_outpoint, counterparty_node_id - })) - { - // Check that, while holding the peer lock, we don't have another event - // blocking any monitor updates for this channel. If we do, let those - // events be the ones that ultimately release the monitor update(s). - log_trace!(self.logger, "Delaying monitor unlock for channel {} as another event is pending", + + if let Some(blocker) = completed_blocker.take() { + // Only do this on the first iteration of the loop. + if let Some(blockers) = peer_state.actions_blocking_raa_monitor_updates + .get_mut(&channel_funding_outpoint.to_channel_id()) + { + blockers.retain(|iter| iter != &blocker); + } + } + + if self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates, + channel_funding_outpoint, counterparty_node_id) { + // Check that, while holding the peer lock, we don't have anything else + // blocking monitor updates for this channel. If we do, release the monitor + // update(s) when those blockers complete. + log_trace!(self.logger, "Delaying monitor unlock for channel {} as another channel's mon update needs to complete first", log_bytes!(&channel_funding_outpoint.to_channel_id()[..])); break; } + if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(channel_funding_outpoint.to_channel_id()) { - debug_assert_eq!(chan.get().get_funding_txo().unwrap(), channel_funding_outpoint); + debug_assert_eq!(chan.get().context.get_funding_txo().unwrap(), channel_funding_outpoint); if let Some((monitor_update, further_update_exists)) = chan.get_mut().unblock_next_blocked_monitor_update() { log_debug!(self.logger, "Unlocking monitor updating for channel {} and updating monitor", log_bytes!(&channel_funding_outpoint.to_channel_id()[..])); @@ -6019,7 +6204,7 @@ where EventCompletionAction::ReleaseRAAChannelMonitorUpdate { channel_funding_outpoint, counterparty_node_id } => { - self.handle_monitor_update_release(counterparty_node_id, channel_funding_outpoint); + self.handle_monitor_update_release(counterparty_node_id, channel_funding_outpoint, None); } } } @@ -6064,7 +6249,7 @@ where fn get_and_clear_pending_msg_events(&self) -> Vec { let events = RefCell::new(Vec::new()); PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || { - let mut result = NotifyOption::SkipPersist; + let mut result = self.process_background_events(); // TODO: This behavior should be documented. It's unintuitive that we query // ChannelMonitors when clearing other events. @@ -6145,7 +6330,8 @@ where } fn block_disconnected(&self, header: &BlockHeader, height: u32) { - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + let _persistence_guard = PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, + &self.persistence_notifier, || -> NotifyOption { NotifyOption::DoPersist }); let new_height = height - 1; { let mut best_block = self.best_block.write().unwrap(); @@ -6179,7 +6365,8 @@ where let block_hash = header.block_hash(); log_trace!(self.logger, "{} transactions included in block {} at height {} provided", txdata.len(), block_hash, height); - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + let _persistence_guard = PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, + &self.persistence_notifier, || -> NotifyOption { NotifyOption::DoPersist }); self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger) .map(|(a, b)| (a, Vec::new(), b))); @@ -6198,8 +6385,8 @@ where let block_hash = header.block_hash(); log_trace!(self.logger, "New best block: {} at height {}", block_hash, height); - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - + let _persistence_guard = PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, + &self.persistence_notifier, || -> NotifyOption { NotifyOption::DoPersist }); *self.best_block.write().unwrap() = BestBlock::new(block_hash, height); self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger)); @@ -6233,7 +6420,7 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; for chan in peer_state.channel_by_id.values() { - if let (Some(funding_txo), Some(block_hash)) = (chan.get_funding_txo(), chan.get_funding_tx_confirmed_in()) { + if let (Some(funding_txo), Some(block_hash)) = (chan.context.get_funding_txo(), chan.context.get_funding_tx_confirmed_in()) { res.push((funding_txo.txid, Some(block_hash))); } } @@ -6242,9 +6429,10 @@ where } fn transaction_unconfirmed(&self, txid: &Txid) { - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + let _persistence_guard = PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, + &self.persistence_notifier, || -> NotifyOption { NotifyOption::DoPersist }); self.do_chain_event(None, |channel| { - if let Some(funding_txo) = channel.get_funding_txo() { + if let Some(funding_txo) = channel.context.get_funding_txo() { if funding_txo.txid == *txid { channel.funding_transaction_unconfirmed(&self.logger).map(|()| (None, Vec::new(), None)) } else { Ok((None, Vec::new(), None)) } @@ -6287,20 +6475,20 @@ where for (source, payment_hash) in timed_out_pending_htlcs.drain(..) { let (failure_code, data) = self.get_htlc_inbound_temp_fail_err_and_data(0x1000|14 /* expiry_too_soon */, &channel); timed_out_htlcs.push((source, payment_hash, HTLCFailReason::reason(failure_code, data), - HTLCDestination::NextHopChannel { node_id: Some(channel.get_counterparty_node_id()), channel_id: channel.channel_id() })); + HTLCDestination::NextHopChannel { node_id: Some(channel.context.get_counterparty_node_id()), channel_id: channel.context.channel_id() })); } if let Some(channel_ready) = channel_ready_opt { send_channel_ready!(self, pending_msg_events, channel, channel_ready); - if channel.is_usable() { - log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id())); + if channel.context.is_usable() { + log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.context.channel_id())); if let Ok(msg) = self.get_channel_update_for_unicast(channel) { pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { - node_id: channel.get_counterparty_node_id(), + node_id: channel.context.get_counterparty_node_id(), msg, }); } } else { - log_trace!(self.logger, "Sending channel_ready WITHOUT channel_update for {}", log_bytes!(channel.channel_id())); + log_trace!(self.logger, "Sending channel_ready WITHOUT channel_update for {}", log_bytes!(channel.context.channel_id())); } } @@ -6310,9 +6498,9 @@ where } if let Some(announcement_sigs) = announcement_sigs { - log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(channel.channel_id())); + log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(channel.context.channel_id())); pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { - node_id: channel.get_counterparty_node_id(), + node_id: channel.context.get_counterparty_node_id(), msg: announcement_sigs, }); if let Some(height) = height_opt { @@ -6327,7 +6515,7 @@ where } } if channel.is_our_channel_ready() { - if let Some(real_scid) = channel.get_short_channel_id() { + if let Some(real_scid) = channel.context.get_short_channel_id() { // If we sent a 0conf channel_ready, and now have an SCID, we add it // to the short_to_chan_info map here. Note that we check whether we // can relay using the real SCID at relay-time (i.e. @@ -6335,8 +6523,8 @@ where // un-confirmed we force-close the channel, ensuring short_to_chan_info // is always consistent. let mut short_to_chan_info = self.short_to_chan_info.write().unwrap(); - let scid_insert = short_to_chan_info.insert(real_scid, (channel.get_counterparty_node_id(), channel.channel_id())); - assert!(scid_insert.is_none() || scid_insert.unwrap() == (channel.get_counterparty_node_id(), channel.channel_id()), + let scid_insert = short_to_chan_info.insert(real_scid, (channel.context.get_counterparty_node_id(), channel.context.channel_id())); + assert!(scid_insert.is_none() || scid_insert.unwrap() == (channel.context.get_counterparty_node_id(), channel.context.channel_id()), "SCIDs should never collide - ensure you weren't behind by a full {} blocks when creating channels", fake_scid::MAX_SCID_BLOCKS_FROM_NOW); } @@ -6354,9 +6542,9 @@ where let reason_message = format!("{}", reason); self.issue_channel_close_events(channel, reason); pending_msg_events.push(events::MessageSendEvent::HandleError { - node_id: channel.get_counterparty_node_id(), + node_id: channel.context.get_counterparty_node_id(), action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { - channel_id: channel.channel_id(), + channel_id: channel.context.channel_id(), data: reason_message, } }, }); @@ -6486,7 +6674,7 @@ where L::Target: Logger, { fn handle_open_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannel) { - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let _ = handle_error!(self, self.internal_open_channel(counterparty_node_id, msg), *counterparty_node_id); } @@ -6497,7 +6685,7 @@ where } fn handle_accept_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannel) { - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let _ = handle_error!(self, self.internal_accept_channel(counterparty_node_id, msg), *counterparty_node_id); } @@ -6508,74 +6696,75 @@ where } fn handle_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) { - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let _ = handle_error!(self, self.internal_funding_created(counterparty_node_id, msg), *counterparty_node_id); } fn handle_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) { - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let _ = handle_error!(self, self.internal_funding_signed(counterparty_node_id, msg), *counterparty_node_id); } fn handle_channel_ready(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReady) { - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let _ = handle_error!(self, self.internal_channel_ready(counterparty_node_id, msg), *counterparty_node_id); } fn handle_shutdown(&self, counterparty_node_id: &PublicKey, msg: &msgs::Shutdown) { - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let _ = handle_error!(self, self.internal_shutdown(counterparty_node_id, msg), *counterparty_node_id); } fn handle_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) { - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let _ = handle_error!(self, self.internal_closing_signed(counterparty_node_id, msg), *counterparty_node_id); } fn handle_update_add_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) { - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let _ = handle_error!(self, self.internal_update_add_htlc(counterparty_node_id, msg), *counterparty_node_id); } fn handle_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) { - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let _ = handle_error!(self, self.internal_update_fulfill_htlc(counterparty_node_id, msg), *counterparty_node_id); } fn handle_update_fail_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) { - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let _ = handle_error!(self, self.internal_update_fail_htlc(counterparty_node_id, msg), *counterparty_node_id); } fn handle_update_fail_malformed_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) { - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let _ = handle_error!(self, self.internal_update_fail_malformed_htlc(counterparty_node_id, msg), *counterparty_node_id); } fn handle_commitment_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::CommitmentSigned) { - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let _ = handle_error!(self, self.internal_commitment_signed(counterparty_node_id, msg), *counterparty_node_id); } fn handle_revoke_and_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::RevokeAndACK) { - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let _ = handle_error!(self, self.internal_revoke_and_ack(counterparty_node_id, msg), *counterparty_node_id); } fn handle_update_fee(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFee) { - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let _ = handle_error!(self, self.internal_update_fee(counterparty_node_id, msg), *counterparty_node_id); } fn handle_announcement_signatures(&self, counterparty_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) { - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let _ = handle_error!(self, self.internal_announcement_signatures(counterparty_node_id, msg), *counterparty_node_id); } fn handle_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) { PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || { + let force_persist = self.process_background_events(); if let Ok(persist) = handle_error!(self, self.internal_channel_update(counterparty_node_id, msg), *counterparty_node_id) { - persist + if force_persist == NotifyOption::DoPersist { NotifyOption::DoPersist } else { persist } } else { NotifyOption::SkipPersist } @@ -6583,12 +6772,12 @@ where } fn handle_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) { - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let _ = handle_error!(self, self.internal_channel_reestablish(counterparty_node_id, msg), *counterparty_node_id); } fn peer_disconnected(&self, counterparty_node_id: &PublicKey) { - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let mut failed_channels = Vec::new(); let mut per_peer_state = self.per_peer_state.write().unwrap(); let remove_peer = { @@ -6670,7 +6859,7 @@ where return Err(()); } - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); // If we have too many peers connected which don't have funded channels, disconnect the // peer immediately (as long as it doesn't have funded channels). If we have a bunch of @@ -6691,6 +6880,7 @@ where latest_features: init_msg.features.clone(), pending_msg_events: Vec::new(), monitor_update_blocked_actions: BTreeMap::new(), + actions_blocking_raa_monitor_updates: BTreeMap::new(), is_connected: true, })); }, @@ -6720,8 +6910,8 @@ where let peer_state = &mut *peer_state_lock; let pending_msg_events = &mut peer_state.pending_msg_events; peer_state.channel_by_id.retain(|_, chan| { - let retain = if chan.get_counterparty_node_id() == *counterparty_node_id { - if !chan.have_received_message() { + let retain = if chan.context.get_counterparty_node_id() == *counterparty_node_id { + if !chan.context.have_received_message() { // If we created this (outbound) channel while we were disconnected from the // peer we probably failed to send the open_channel message, which is now // lost. We can't have had anything pending related to this channel, so we just @@ -6729,13 +6919,13 @@ where false } else { pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish { - node_id: chan.get_counterparty_node_id(), + node_id: chan.context.get_counterparty_node_id(), msg: chan.get_channel_reestablish(&self.logger), }); true } } else { true }; - if retain && chan.get_counterparty_node_id() != *counterparty_node_id { + if retain && chan.context.get_counterparty_node_id() != *counterparty_node_id { if let Some(msg) = chan.get_signed_channel_announcement(&self.node_signer, self.genesis_hash.clone(), self.best_block.read().unwrap().height(), &self.default_configuration) { if let Ok(update_msg) = self.get_channel_update_for_broadcast(chan) { pending_msg_events.push(events::MessageSendEvent::SendChannelAnnouncement { @@ -6753,7 +6943,7 @@ where } fn handle_error(&self, counterparty_node_id: &PublicKey, msg: &msgs::ErrorMessage) { - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); if msg.channel_id == [0; 32] { let channel_ids: Vec<[u8; 32]> = { @@ -6800,6 +6990,10 @@ where provided_init_features(&self.default_configuration) } + fn get_genesis_hashes(&self) -> Option> { + Some(vec![ChainHash::from(&self.genesis_hash[..])]) + } + fn handle_tx_add_input(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxAddInput) { let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close( "Dual-funded channels not supported".to_owned(), @@ -6949,10 +7143,9 @@ impl Writeable for ChannelDetails { (14, user_channel_id_low, required), (16, self.balance_msat, required), (18, self.outbound_capacity_msat, required), - // Note that by the time we get past the required read above, outbound_capacity_msat will be - // filled in, so we can safely unwrap it here. - (19, self.next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat.0.unwrap() as u64)), + (19, self.next_outbound_htlc_limit_msat, required), (20, self.inbound_capacity_msat, required), + (21, self.next_outbound_htlc_minimum_msat, required), (22, self.confirmations_required, option), (24, self.force_close_spend_delay, option), (26, self.is_outbound, required), @@ -6989,6 +7182,7 @@ impl Readable for ChannelDetails { // filled in, so we can safely unwrap it here. (19, next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat.0.unwrap() as u64)), (20, inbound_capacity_msat, required), + (21, next_outbound_htlc_minimum_msat, (default_value, 0)), (22, confirmations_required, option), (24, force_close_spend_delay, option), (26, is_outbound, required), @@ -7022,6 +7216,7 @@ impl Readable for ChannelDetails { balance_msat: balance_msat.0.unwrap(), outbound_capacity_msat: outbound_capacity_msat.0.unwrap(), next_outbound_htlc_limit_msat: next_outbound_htlc_limit_msat.0.unwrap(), + next_outbound_htlc_minimum_msat: next_outbound_htlc_minimum_msat.0.unwrap(), inbound_capacity_msat: inbound_capacity_msat.0.unwrap(), confirmations_required, confirmations, @@ -7058,6 +7253,7 @@ impl_writeable_tlv_based_enum!(PendingHTLCRouting, (0, payment_preimage, required), (2, incoming_cltv_expiry, required), (3, payment_metadata, option), + (4, payment_data, option), // Added in 0.0.116 }, ;); @@ -7357,7 +7553,7 @@ where } number_of_channels += peer_state.channel_by_id.len(); for (_, channel) in peer_state.channel_by_id.iter() { - if !channel.is_funding_initiated() { + if !channel.context.is_funding_initiated() { unfunded_channels += 1; } } @@ -7369,7 +7565,7 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; for (_, channel) in peer_state.channel_by_id.iter() { - if channel.is_funding_initiated() { + if channel.context.is_funding_initiated() { channel.write(writer)?; } } @@ -7655,7 +7851,7 @@ where pub default_config: UserConfig, /// A map from channel funding outpoints to ChannelMonitors for those channels (ie - /// value.get_funding_txo() should be the key). + /// value.context.get_funding_txo() should be the key). /// /// If a monitor is inconsistent with the channel state during deserialization the channel will /// be force-closed using the data in the ChannelMonitor and the channel will be dropped. This @@ -7745,14 +7941,14 @@ where let mut channel: Channel<::Signer> = Channel::read(reader, ( &args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config) ))?; - let funding_txo = channel.get_funding_txo().ok_or(DecodeError::InvalidValue)?; + let funding_txo = channel.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?; funding_txo_set.insert(funding_txo.clone()); if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) { if channel.get_latest_complete_monitor_update_id() > monitor.get_latest_update_id() { // If the channel is ahead of the monitor, return InvalidValue: log_error!(args.logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!"); log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.", - log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_complete_monitor_update_id()); + log_bytes!(channel.context.channel_id()), monitor.get_latest_update_id(), channel.get_latest_complete_monitor_update_id()); log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,"); log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); log_error!(args.logger, " Without the latest ChannelMonitor we cannot continue without risking funds."); @@ -7761,20 +7957,22 @@ where } else if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() || channel.get_revoked_counterparty_commitment_transaction_number() > monitor.get_min_seen_secret() || channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() || - channel.get_latest_monitor_update_id() < monitor.get_latest_update_id() { + channel.context.get_latest_monitor_update_id() < monitor.get_latest_update_id() { // But if the channel is behind of the monitor, close the channel: log_error!(args.logger, "A ChannelManager is stale compared to the current ChannelMonitor!"); log_error!(args.logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast."); log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.", - log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id()); + log_bytes!(channel.context.channel_id()), monitor.get_latest_update_id(), channel.context.get_latest_monitor_update_id()); let (monitor_update, mut new_failed_htlcs) = channel.force_shutdown(true); - if let Some(monitor_update) = monitor_update { - pending_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup(monitor_update)); + if let Some((counterparty_node_id, funding_txo, update)) = monitor_update { + pending_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup { + counterparty_node_id, funding_txo, update + }); } failed_htlcs.append(&mut new_failed_htlcs); channel_closures.push_back((events::Event::ChannelClosed { - channel_id: channel.channel_id(), - user_channel_id: channel.get_user_id(), + channel_id: channel.context.channel_id(), + user_channel_id: channel.context.get_user_id(), reason: ClosureReason::OutdatedChannelManager }, None)); for (channel_htlc_source, payment_hash) in channel.inflight_htlc_sources() { @@ -7792,26 +7990,29 @@ where // backwards leg of the HTLC will simply be rejected. log_info!(args.logger, "Failing HTLC with hash {} as it is missing in the ChannelMonitor for channel {} but was present in the (stale) ChannelManager", - log_bytes!(channel.channel_id()), log_bytes!(payment_hash.0)); - failed_htlcs.push((channel_htlc_source.clone(), *payment_hash, channel.get_counterparty_node_id(), channel.channel_id())); + log_bytes!(channel.context.channel_id()), log_bytes!(payment_hash.0)); + failed_htlcs.push((channel_htlc_source.clone(), *payment_hash, channel.context.get_counterparty_node_id(), channel.context.channel_id())); } } } else { - log_info!(args.logger, "Successfully loaded channel {}", log_bytes!(channel.channel_id())); - if let Some(short_channel_id) = channel.get_short_channel_id() { - short_to_chan_info.insert(short_channel_id, (channel.get_counterparty_node_id(), channel.channel_id())); + log_info!(args.logger, "Successfully loaded channel {} at update_id {} against monitor at update id {}", + log_bytes!(channel.context.channel_id()), channel.context.get_latest_monitor_update_id(), + monitor.get_latest_update_id()); + channel.complete_all_mon_updates_through(monitor.get_latest_update_id()); + if let Some(short_channel_id) = channel.context.get_short_channel_id() { + short_to_chan_info.insert(short_channel_id, (channel.context.get_counterparty_node_id(), channel.context.channel_id())); } - if channel.is_funding_initiated() { - id_to_peer.insert(channel.channel_id(), channel.get_counterparty_node_id()); + if channel.context.is_funding_initiated() { + id_to_peer.insert(channel.context.channel_id(), channel.context.get_counterparty_node_id()); } - match peer_channels.entry(channel.get_counterparty_node_id()) { + match peer_channels.entry(channel.context.get_counterparty_node_id()) { hash_map::Entry::Occupied(mut entry) => { let by_id_map = entry.get_mut(); - by_id_map.insert(channel.channel_id(), channel); + by_id_map.insert(channel.context.channel_id(), channel); }, hash_map::Entry::Vacant(entry) => { let mut by_id_map = HashMap::new(); - by_id_map.insert(channel.channel_id(), channel); + by_id_map.insert(channel.context.channel_id(), channel); entry.insert(by_id_map); } } @@ -7822,12 +8023,12 @@ where // safely discard the channel. let _ = channel.force_shutdown(false); channel_closures.push_back((events::Event::ChannelClosed { - channel_id: channel.channel_id(), - user_channel_id: channel.get_user_id(), + channel_id: channel.context.channel_id(), + user_channel_id: channel.context.get_user_id(), reason: ClosureReason::DisconnectedPeer, }, None)); } else { - log_error!(args.logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", log_bytes!(channel.channel_id())); + log_error!(args.logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", log_bytes!(channel.context.channel_id())); log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,"); log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); log_error!(args.logger, " Without the ChannelMonitor we cannot continue without risking funds."); @@ -7844,7 +8045,7 @@ where update_id: CLOSED_CHANNEL_UPDATE_ID, updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }], }; - pending_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup((*funding_txo, monitor_update))); + pending_background_events.push(BackgroundEvent::ClosingMonitorUpdateRegeneratedOnStartup((*funding_txo, monitor_update))); } } @@ -7882,6 +8083,7 @@ where latest_features: Readable::read(reader)?, pending_msg_events: Vec::new(), monitor_update_blocked_actions: BTreeMap::new(), + actions_blocking_raa_monitor_updates: BTreeMap::new(), is_connected: false, }; per_peer_state.insert(peer_pubkey, Mutex::new(peer_state)); @@ -7911,6 +8113,24 @@ where } } + for (node_id, peer_mtx) in per_peer_state.iter() { + let peer_state = peer_mtx.lock().unwrap(); + for (_, chan) in peer_state.channel_by_id.iter() { + for update in chan.uncompleted_unblocked_mon_updates() { + if let Some(funding_txo) = chan.context.get_funding_txo() { + log_trace!(args.logger, "Replaying ChannelMonitorUpdate {} for channel {}", + update.update_id, log_bytes!(funding_txo.to_channel_id())); + pending_background_events.push( + BackgroundEvent::MonitorUpdateRegeneratedOnStartup { + counterparty_node_id: *node_id, funding_txo, update: update.clone(), + }); + } else { + return Err(DecodeError::InvalidValue); + } + } + } + } + let _last_node_announcement_serial: u32 = Readable::read(reader)?; // Only used < 0.0.111 let highest_seen_timestamp: u32 = Readable::read(reader)?; @@ -7945,7 +8165,7 @@ where let mut claimable_htlc_purposes = None; let mut claimable_htlc_onion_fields = None; let mut pending_claiming_payments = Some(HashMap::new()); - let mut monitor_update_blocked_actions_per_peer = Some(Vec::new()); + let mut monitor_update_blocked_actions_per_peer: Option>)>> = Some(Vec::new()); let mut events_override = None; read_tlv_fields!(reader, { (1, pending_outbound_payments_no_retry, option), @@ -8189,25 +8409,25 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; for (chan_id, chan) in peer_state.channel_by_id.iter_mut() { - if chan.outbound_scid_alias() == 0 { + if chan.context.outbound_scid_alias() == 0 { let mut outbound_scid_alias; loop { outbound_scid_alias = fake_scid::Namespace::OutboundAlias .get_fake_scid(best_block_height, &genesis_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.entropy_source); if outbound_scid_aliases.insert(outbound_scid_alias) { break; } } - chan.set_outbound_scid_alias(outbound_scid_alias); - } else if !outbound_scid_aliases.insert(chan.outbound_scid_alias()) { + chan.context.set_outbound_scid_alias(outbound_scid_alias); + } else if !outbound_scid_aliases.insert(chan.context.outbound_scid_alias()) { // Note that in rare cases its possible to hit this while reading an older // channel if we just happened to pick a colliding outbound alias above. - log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.outbound_scid_alias()); + log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.context.outbound_scid_alias()); return Err(DecodeError::InvalidValue); } - if chan.is_usable() { - if short_to_chan_info.insert(chan.outbound_scid_alias(), (chan.get_counterparty_node_id(), *chan_id)).is_some() { + if chan.context.is_usable() { + if short_to_chan_info.insert(chan.context.outbound_scid_alias(), (chan.context.get_counterparty_node_id(), *chan_id)).is_some() { // Note that in rare cases its possible to hit this while reading an older // channel if we just happened to pick a colliding outbound alias above. - log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.outbound_scid_alias()); + log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.context.outbound_scid_alias()); return Err(DecodeError::InvalidValue); } } @@ -8270,7 +8490,21 @@ where } for (node_id, monitor_update_blocked_actions) in monitor_update_blocked_actions_per_peer.unwrap() { - if let Some(peer_state) = per_peer_state.get_mut(&node_id) { + if let Some(peer_state) = per_peer_state.get(&node_id) { + for (_, actions) in monitor_update_blocked_actions.iter() { + for action in actions.iter() { + if let MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel { + downstream_counterparty_and_funding_outpoint: + Some((blocked_node_id, blocked_channel_outpoint, blocking_action)), .. + } = action { + if let Some(blocked_peer_state) = per_peer_state.get(&blocked_node_id) { + blocked_peer_state.lock().unwrap().actions_blocking_raa_monitor_updates + .entry(blocked_channel_outpoint.to_channel_id()) + .or_insert_with(Vec::new).push(blocking_action.clone()); + } + } + } + } peer_state.lock().unwrap().monitor_update_blocked_actions = monitor_update_blocked_actions; } else { log_error!(args.logger, "Got blocked actions without a per-peer-state for {}", node_id); @@ -8312,6 +8546,8 @@ where pending_events_processor: AtomicBool::new(false), pending_background_events: Mutex::new(pending_background_events), total_consistency_lock: RwLock::new(()), + #[cfg(debug_assertions)] + background_events_processed_since_startup: AtomicBool::new(false), persistence_notifier: Notifier::new(), entropy_source: args.entropy_source, @@ -8351,7 +8587,7 @@ mod tests { use crate::routing::router::{PaymentParameters, RouteParameters, find_route}; use crate::util::errors::APIError; use crate::util::test_utils; - use crate::util::config::ChannelConfig; + use crate::util::config::{ChannelConfig, ChannelConfigUpdate}; use crate::sign::EntropySource; #[test] @@ -8560,13 +8796,26 @@ mod tests { #[test] fn test_keysend_dup_payment_hash() { + do_test_keysend_dup_payment_hash(false); + do_test_keysend_dup_payment_hash(true); + } + + fn do_test_keysend_dup_payment_hash(accept_mpp_keysend: bool) { // (1): Test that a keysend payment with a duplicate payment hash to an existing pending // outbound regular payment fails as expected. // (2): Test that a regular payment with a duplicate payment hash to an existing keysend payment // fails as expected. + // (3): Test that a keysend payment with a duplicate payment hash to an existing keysend + // payment fails as expected. When `accept_mpp_keysend` is false, this tests that we + // reject MPP keysend payments, since in this case where the payment has no payment + // secret, a keysend payment with a duplicate hash is basically an MPP keysend. If + // `accept_mpp_keysend` is true, this tests that we only accept MPP keysends with + // payment secrets and reject otherwise. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut mpp_keysend_cfg = test_default_channel_config(); + mpp_keysend_cfg.accept_mpp_keysend = accept_mpp_keysend; + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(mpp_keysend_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes(&nodes, 0, 1); let scorer = test_utils::TestScorer::new(); @@ -8655,6 +8904,53 @@ mod tests { // Finally, succeed the keysend payment. claim_payment(&nodes[0], &expected_route, payment_preimage); + + // To start (3), send a keysend payment but don't claim it. + let payment_id_1 = PaymentId([44; 32]); + let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage), + RecipientOnionFields::spontaneous_empty(), payment_id_1).unwrap(); + check_added_monitors!(nodes[0], 1); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let event = events.pop().unwrap(); + let path = vec![&nodes[1]]; + pass_along_path(&nodes[0], &path, 100_000, payment_hash, None, event, true, Some(payment_preimage)); + + // Next, attempt a keysend payment and make sure it fails. + let route_params = RouteParameters { + payment_params: PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(), TEST_FINAL_CLTV, false), + final_value_msat: 100_000, + }; + let route = find_route( + &nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph, + None, nodes[0].logger, &scorer, &(), &random_seed_bytes + ).unwrap(); + let payment_id_2 = PaymentId([45; 32]); + nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage), + RecipientOnionFields::spontaneous_empty(), payment_id_2).unwrap(); + check_added_monitors!(nodes[0], 1); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let ev = events.drain(..).next().unwrap(); + let payment_event = SendEvent::from_event(ev); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + check_added_monitors!(nodes[1], 0); + commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]); + check_added_monitors!(nodes[1], 1); + let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert_eq!(updates.update_fail_htlcs.len(), 1); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true); + expect_payment_failed!(nodes[0], payment_hash, true); + + // Finally, claim the original payment. + claim_payment(&nodes[0], &expected_route, payment_preimage); } #[test] @@ -8704,10 +9000,13 @@ mod tests { #[test] fn test_keysend_msg_with_secret_err() { - // Test that we error as expected if we receive a keysend payment that includes a payment secret. + // Test that we error as expected if we receive a keysend payment that includes a payment + // secret when we don't support MPP keysend. + let mut reject_mpp_keysend_cfg = test_default_channel_config(); + reject_mpp_keysend_cfg.accept_mpp_keysend = false; let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(reject_mpp_keysend_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let payer_pubkey = nodes[0].node.get_our_node_id(); @@ -9070,12 +9369,14 @@ mod tests { &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap()); peer_pks.push(random_pk); nodes[1].node.peer_connected(&random_pk, &msgs::Init { - features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap(); + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); } let last_random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx, &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap()); nodes[1].node.peer_connected(&last_random_pk, &msgs::Init { - features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap_err(); + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap_err(); // Also importantly, because nodes[0] isn't "protected", we will refuse a reconnection from // them if we have too many un-channel'd peers. @@ -9086,13 +9387,16 @@ mod tests { if let Event::ChannelClosed { .. } = ev { } else { panic!(); } } nodes[1].node.peer_connected(&last_random_pk, &msgs::Init { - features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap(); + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { - features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap_err(); + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap_err(); // but of course if the connection is outbound its allowed... nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { - features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); // Now nodes[0] is disconnected but still has a pending, un-funded channel lying around. @@ -9116,7 +9420,8 @@ mod tests { // "protected" and can connect again. mine_transaction(&nodes[1], funding_tx.as_ref().unwrap()); nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { - features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap(); + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); // Further, because the first channel was funded, we can open another channel with @@ -9181,7 +9486,8 @@ mod tests { let random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx, &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap()); nodes[1].node.peer_connected(&random_pk, &msgs::Init { - features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap(); + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); nodes[1].node.handle_open_channel(&random_pk, &open_channel_msg); let events = nodes[1].node.get_and_clear_pending_events(); @@ -9199,7 +9505,8 @@ mod tests { let last_random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx, &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap()); nodes[1].node.peer_connected(&last_random_pk, &msgs::Init { - features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap(); + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg); let events = nodes[1].node.get_and_clear_pending_events(); match events[0] { @@ -9262,9 +9569,65 @@ mod tests { check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed); } + + #[test] + fn test_update_channel_config() { + let chanmon_cfg = create_chanmon_cfgs(2); + let node_cfg = create_node_cfgs(2, &chanmon_cfg); + let mut user_config = test_default_channel_config(); + let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[Some(user_config), Some(user_config)]); + let nodes = create_network(2, &node_cfg, &node_chanmgr); + let _ = create_announced_chan_between_nodes(&nodes, 0, 1); + let channel = &nodes[0].node.list_channels()[0]; + + nodes[0].node.update_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &user_config.channel_config).unwrap(); + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 0); + + user_config.channel_config.forwarding_fee_base_msat += 10; + nodes[0].node.update_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &user_config.channel_config).unwrap(); + assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_base_msat, user_config.channel_config.forwarding_fee_base_msat); + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match &events[0] { + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + _ => panic!("expected BroadcastChannelUpdate event"), + } + + nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &ChannelConfigUpdate::default()).unwrap(); + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 0); + + let new_cltv_expiry_delta = user_config.channel_config.cltv_expiry_delta + 6; + nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &ChannelConfigUpdate { + cltv_expiry_delta: Some(new_cltv_expiry_delta), + ..Default::default() + }).unwrap(); + assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().cltv_expiry_delta, new_cltv_expiry_delta); + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match &events[0] { + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + _ => panic!("expected BroadcastChannelUpdate event"), + } + + let new_fee = user_config.channel_config.forwarding_fee_proportional_millionths + 100; + nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &ChannelConfigUpdate { + forwarding_fee_proportional_millionths: Some(new_fee), + ..Default::default() + }).unwrap(); + assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().cltv_expiry_delta, new_cltv_expiry_delta); + assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_proportional_millionths, new_fee); + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match &events[0] { + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + _ => panic!("expected BroadcastChannelUpdate event"), + } + } } -#[cfg(all(any(test, feature = "_test_utils"), feature = "_bench_unstable"))] +#[cfg(ldk_bench)] pub mod bench { use crate::chain::Listen; use crate::chain::chainmonitor::{ChainMonitor, Persist}; @@ -9284,7 +9647,7 @@ pub mod bench { use crate::sync::{Arc, Mutex}; - use test::Bencher; + use criterion::Criterion; type Manager<'a, P> = ChannelManager< &'a ChainMonitor Option<&test_utils::TestChainMonitor> { None } } - #[cfg(test)] - #[bench] - fn bench_sends(bench: &mut Bencher) { - bench_two_sends(bench, test_utils::TestPersister::new(), test_utils::TestPersister::new()); + pub fn bench_sends(bench: &mut Criterion) { + bench_two_sends(bench, "bench_sends", test_utils::TestPersister::new(), test_utils::TestPersister::new()); } - pub fn bench_two_sends>(bench: &mut Bencher, persister_a: P, persister_b: P) { + pub fn bench_two_sends>(bench: &mut Criterion, bench_name: &str, persister_a: P, persister_b: P) { // Do a simple benchmark of sending a payment back and forth between two nodes. // Note that this is unrealistic as each payment send will require at least two fsync // calls per node. @@ -9345,8 +9706,12 @@ pub mod bench { }); let node_b_holder = ANodeHolder { node: &node_b }; - node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: node_b.init_features(), remote_network_address: None }, true).unwrap(); - node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: node_a.init_features(), remote_network_address: None }, false).unwrap(); + node_a.peer_connected(&node_b.get_our_node_id(), &Init { + features: node_b.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); + node_b.peer_connected(&node_a.get_our_node_id(), &Init { + features: node_a.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); node_a.create_channel(node_b.get_our_node_id(), 8_000_000, 100_000_000, 42, None).unwrap(); node_b.handle_open_channel(&node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id())); node_a.handle_accept_channel(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id())); @@ -9464,9 +9829,9 @@ pub mod bench { } } - bench.iter(|| { + bench.bench_function(bench_name, |b| b.iter(|| { send_payment!(node_a, node_b); send_payment!(node_b, node_a); - }); + })); } }