X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=cfb462b0120cac784c2393e4e27450c14747a6bc;hb=aa4b429eb213101f64a828dc80a6438912e8664e;hp=7b2865bb1475c6e80f96b4859a9eafa10116c839;hpb=b8ca7c9256067c4bb4f2cbbda8e58209020cd0d2;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 7b2865bb..cfb462b0 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -26,12 +26,10 @@ use bitcoin::network::constants::Network; use bitcoin::hashes::Hash; use bitcoin::hashes::sha256::Hash as Sha256; -use bitcoin::hashes::sha256d::Hash as Sha256dHash; use bitcoin::hash_types::{BlockHash, Txid}; use bitcoin::secp256k1::{SecretKey,PublicKey}; use bitcoin::secp256k1::Secp256k1; -use bitcoin::secp256k1::ecdh::SharedSecret; use bitcoin::{LockTime, secp256k1, Sequence}; use crate::chain; @@ -47,7 +45,7 @@ use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, No #[cfg(any(feature = "_test_utils", test))] use crate::ln::features::InvoiceFeatures; use crate::routing::gossip::NetworkGraph; -use crate::routing::router::{DefaultRouter, InFlightHtlcs, PaymentParameters, Route, RouteHop, RoutePath, Router}; +use crate::routing::router::{DefaultRouter, InFlightHtlcs, PaymentParameters, Route, RouteHop, RouteParameters, RoutePath, Router}; use crate::routing::scoring::ProbabilisticScorer; use crate::ln::msgs; use crate::ln::onion_utils; @@ -55,9 +53,9 @@ use crate::ln::onion_utils::HTLCFailReason; use crate::ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, MAX_VALUE_MSAT}; #[cfg(test)] use crate::ln::outbound_payment; -use crate::ln::outbound_payment::{OutboundPayments, PendingOutboundPayment}; +use crate::ln::outbound_payment::{OutboundPayments, PaymentAttempts, PendingOutboundPayment}; use crate::ln::wire::Encode; -use crate::chain::keysinterface::{EntropySource, KeysInterface, KeysManager, NodeSigner, Recipient, Sign, SignerProvider}; +use crate::chain::keysinterface::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider, ChannelSigner}; use crate::util::config::{UserConfig, ChannelConfig}; use crate::util::events::{Event, EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination}; use crate::util::events; @@ -72,13 +70,13 @@ use crate::prelude::*; use core::{cmp, mem}; use core::cell::RefCell; use crate::io::Read; -use crate::sync::{Arc, Mutex, MutexGuard, RwLock, RwLockReadGuard, FairRwLock}; +use crate::sync::{Arc, Mutex, RwLock, RwLockReadGuard, FairRwLock}; use core::sync::atomic::{AtomicUsize, Ordering}; use core::time::Duration; use core::ops::Deref; // Re-export this for use in the public API. -pub use crate::ln::outbound_payment::PaymentSendFailure; +pub use crate::ln::outbound_payment::{PaymentSendFailure, Retry}; // We hold various information about HTLC relay in the HTLC objects in Channel itself: // @@ -247,6 +245,10 @@ pub(crate) enum HTLCSource { first_hop_htlc_msat: u64, payment_id: PaymentId, payment_secret: Option, + /// Note that this is now "deprecated" - we write it for forwards (and read it for + /// backwards) compatibility reasons, but prefer to use the data in the + /// [`super::outbound_payment`] module, which stores per-payment data once instead of in + /// each HTLC. payment_params: Option, }, } @@ -291,6 +293,25 @@ struct ReceiveError { msg: &'static str, } +/// This enum is used to specify which error data to send to peers when failing back an HTLC +/// using [`ChannelManager::fail_htlc_backwards_with_reason`]. +/// +/// For more info on failure codes, see . +#[derive(Clone, Copy)] +pub enum FailureCode { + /// We had a temporary error processing the payment. Useful if no other error codes fit + /// and you want to indicate that the payer may want to retry. + TemporaryNodeFailure = 0x2000 | 2, + /// We have a required feature which was not in this onion. For example, you may require + /// some additional metadata that was not provided with this payment. + RequiredNodeFeatureMissing = 0x4000 | 0x2000 | 3, + /// You may wish to use this when a `payment_preimage` is unknown, or the CLTV expiry of + /// the HTLC is too close to the current block height for safe handling. + /// Using this failure code in [`ChannelManager::fail_htlc_backwards_with_reason`] is + /// equivalent to calling [`ChannelManager::fail_htlc_backwards`]. + IncorrectOrUnknownPaymentDetails = 0x4000 | 15, +} + type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>); /// Error type returned across the peer_state mutex boundary. When an Err is generated for a @@ -390,7 +411,7 @@ impl MsgHandleErrInternal { /// Event::PendingHTLCsForwardable for the API guidelines indicating how long should be waited). /// This provides some limited amount of privacy. Ideally this would range from somewhere like one /// second to 30 seconds, but people expect lightning to be, you know, kinda fast, sadly. -const MIN_HTLC_RELAY_HOLDING_CELL_MILLIS: u64 = 100; +pub(super) const MIN_HTLC_RELAY_HOLDING_CELL_MILLIS: u64 = 100; /// For events which result in both a RevokeAndACK and a CommitmentUpdate, by default they should /// be sent in the order they appear in the return value, however sometimes the order needs to be @@ -434,13 +455,6 @@ struct ClaimablePayments { pending_claiming_payments: HashMap, } -// Note this is only exposed in cfg(test): -pub(super) struct ChannelHolder { - /// Messages to send to peers - pushed to in the same lock that they are generated in (except - /// for broadcast messages, where ordering isn't as strict). - pub(super) pending_msg_events: Vec, -} - /// Events which we process internally but cannot be procsesed immediately at the generation site /// for some reason. They are handled in timer_tick_occurred, so may be processed with /// quite some time lag. @@ -461,7 +475,7 @@ pub(crate) enum MonitorUpdateCompletionAction { } /// State we hold per-peer. -pub(super) struct PeerState { +pub(super) struct PeerState { /// `temporary_channel_id` or `channel_id` -> `channel`. /// /// Holds all channels where the peer is the counterparty. Once a channel has been assigned a @@ -470,6 +484,9 @@ pub(super) struct PeerState { pub(super) channel_by_id: HashMap<[u8; 32], Channel>, /// The latest `InitFeatures` we heard from the peer. latest_features: InitFeatures, + /// Messages to send to the peer - pushed to in the same lock that they are generated in (except + /// for broadcast messages, where ordering isn't as strict). + pub(super) pending_msg_events: Vec, } /// Stores a PaymentSecret and any other data we may need to validate an inbound payment is @@ -506,6 +523,8 @@ pub type SimpleArcChannelManager = ChannelManager< Arc, Arc, Arc, + Arc, + Arc, Arc, Arc>>, @@ -525,7 +544,7 @@ pub type SimpleArcChannelManager = ChannelManager< /// type alias chooses the concrete types of KeysManager and DefaultRouter. /// /// (C-not exported) as Arcs don't make sense in bindings -pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = ChannelManager<&'a M, &'b T, &'c KeysManager, &'d F, &'e DefaultRouter<&'f NetworkGraph<&'g L>, &'g L, &'h Mutex, &'g L>>>, &'g L>; +pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = ChannelManager<&'a M, &'b T, &'c KeysManager, &'c KeysManager, &'c KeysManager, &'d F, &'e DefaultRouter<&'f NetworkGraph<&'g L>, &'g L, &'h Mutex, &'g L>>>, &'g L>; /// Manager which keeps track of a number of channels and sends messages to the appropriate /// channel, also tracking HTLC preimages and forwarding onion packets appropriately. @@ -579,35 +598,35 @@ pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = C // | | // | |__`pending_intercepted_htlcs` // | -// |__`pending_inbound_payments` -// | | -// | |__`claimable_payments` +// |__`per_peer_state` // | | -// | |__`pending_outbound_payments` // This field's struct contains a map of pending outbounds +// | |__`pending_inbound_payments` // | | -// | |__`channel_state` +// | |__`claimable_payments` +// | | +// | |__`pending_outbound_payments` // This field's struct contains a map of pending outbounds // | | -// | |__`per_peer_state` +// | |__`peer_state` // | | -// | |__`peer_state` -// | | -// | |__`id_to_peer` -// | | -// | |__`short_to_chan_info` -// | | -// | |__`outbound_scid_aliases` -// | | -// | |__`best_block` +// | |__`id_to_peer` +// | | +// | |__`short_to_chan_info` +// | | +// | |__`outbound_scid_aliases` +// | | +// | |__`best_block` +// | | +// | |__`pending_events` // | | -// | |__`pending_events` -// | | -// | |__`pending_background_events` +// | |__`pending_background_events` // -pub struct ChannelManager +pub struct ChannelManager where - M::Target: chain::Watch<::Signer>, + M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + ES::Target: EntropySource, + NS::Target: NodeSigner, + SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, L::Target: Logger, @@ -627,12 +646,6 @@ where best_block: RwLock, secp_ctx: Secp256k1, - /// See `ChannelManager` struct-level documentation for lock order requirements. - #[cfg(any(test, feature = "_test_utils"))] - pub(super) channel_state: Mutex, - #[cfg(not(any(test, feature = "_test_utils")))] - channel_state: Mutex, - /// Storage for PaymentSecrets and any requirements on future inbound payments before we will /// expose them to users via a PaymentClaimable event. HTLCs which do not meet the requirements /// here are failed when we process them as pending-forwardable-HTLCs, and entries are removed @@ -699,6 +712,9 @@ where /// the corresponding channel for the event, as we only have access to the `channel_id` during /// the handling of the events. /// + /// Note that no consistency guarantees are made about the existence of a peer with the + /// `counterparty_node_id` in our other maps. + /// /// TODO: /// The `counterparty_node_id` isn't passed with `MonitorEvent`s currently. To pass it, we need /// to make `counterparty_node_id`'s a required field in `ChannelMonitor`s, which unfortunately @@ -726,7 +742,6 @@ where #[cfg(not(test))] short_to_chan_info: FairRwLock>, - our_network_key: SecretKey, our_network_pubkey: PublicKey, inbound_payment_key: inbound_payment::ExpandedKey, @@ -763,9 +778,9 @@ where /// /// See `ChannelManager` struct-level documentation for lock order requirements. #[cfg(not(any(test, feature = "_test_utils")))] - per_peer_state: FairRwLock::Signer>>>>, + per_peer_state: FairRwLock::Signer>>>>, #[cfg(any(test, feature = "_test_utils"))] - pub(super) per_peer_state: FairRwLock::Signer>>>>, + pub(super) per_peer_state: FairRwLock::Signer>>>>, /// See `ChannelManager` struct-level documentation for lock order requirements. pending_events: Mutex>, @@ -781,7 +796,9 @@ where persistence_notifier: Notifier, - keys_manager: K, + entropy_source: ES, + node_signer: NS, + signer_provider: SP, logger: L, } @@ -880,12 +897,12 @@ pub const MIN_CLTV_EXPIRY_DELTA: u16 = 6*7; pub(super) const CLTV_FAR_FAR_AWAY: u32 = 14 * 24 * 6; /// Minimum CLTV difference between the current block height and received inbound payments. -/// Invoices generated for payment to us must set their `min_final_cltv_expiry` field to at least +/// Invoices generated for payment to us must set their `min_final_cltv_expiry_delta` field to at least /// this value. // Note that we fail if exactly HTLC_FAIL_BACK_BUFFER + 1 was used, so we need to add one for // any payments to succeed. Further, we don't want payments to fail if a block was found while // a payment was being routed, so we add an extra block to be safe. -pub const MIN_FINAL_CLTV_EXPIRY: u32 = HTLC_FAIL_BACK_BUFFER + 3; +pub const MIN_FINAL_CLTV_EXPIRY_DELTA: u16 = HTLC_FAIL_BACK_BUFFER as u16 + 3; // Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + ANTI_REORG_DELAY + LATENCY_GRACE_PERIOD_BLOCKS, // ie that if the next-hop peer fails the HTLC within @@ -1137,6 +1154,36 @@ impl ChannelDetails { } } +/// Used by [`ChannelManager::list_recent_payments`] to express the status of recent payments. +/// These include payments that have yet to find a successful path, or have unresolved HTLCs. +#[derive(Debug, PartialEq)] +pub enum RecentPaymentDetails { + /// When a payment is still being sent and awaiting successful delivery. + Pending { + /// Hash of the payment that is currently being sent but has yet to be fulfilled or + /// abandoned. + payment_hash: PaymentHash, + /// Total amount (in msat, excluding fees) across all paths for this payment, + /// not just the amount currently inflight. + total_msat: u64, + }, + /// When a pending payment is fulfilled, we continue tracking it until all pending HTLCs have + /// been resolved. Upon receiving [`Event::PaymentSent`], we delay for a few minutes before the + /// payment is removed from tracking. + Fulfilled { + /// Hash of the payment that was claimed. `None` for serializations of [`ChannelManager`] + /// made before LDK version 0.0.104. + payment_hash: Option, + }, + /// After a payment is explicitly abandoned by calling [`ChannelManager::abandon_payment`], it + /// is marked as abandoned until an [`Event::PaymentFailed`] is generated. A payment could also + /// be marked as abandoned if pathfinding fails repeatedly or retries have been exhausted. + Abandoned { + /// Hash of the payment that we have given up trying to send. + payment_hash: PaymentHash, + }, +} + /// Route hints used in constructing invoices for [phantom node payents]. /// /// [phantom node payments]: crate::chain::keysinterface::PhantomKeysManager @@ -1156,12 +1203,12 @@ macro_rules! handle_error { match $internal { Ok(msg) => Ok(msg), Err(MsgHandleErrInternal { err, chan_id, shutdown_finish }) => { - #[cfg(debug_assertions)] + #[cfg(any(feature = "_test_utils", test))] { // In testing, ensure there are no deadlocks where the lock is already held upon // entering the macro. - assert!($self.channel_state.try_lock().is_ok()); - assert!($self.pending_events.try_lock().is_ok()); + debug_assert!($self.pending_events.try_lock().is_ok()); + debug_assert!($self.per_peer_state.try_write().is_ok()); } let mut msg_events = Vec::with_capacity(2); @@ -1191,7 +1238,31 @@ macro_rules! handle_error { } if !msg_events.is_empty() { - $self.channel_state.lock().unwrap().pending_msg_events.append(&mut msg_events); + let per_peer_state = $self.per_peer_state.read().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(&$counterparty_node_id) { + let mut peer_state = peer_state_mutex.lock().unwrap(); + peer_state.pending_msg_events.append(&mut msg_events); + } + #[cfg(any(feature = "_test_utils", test))] + { + if let None = per_peer_state.get(&$counterparty_node_id) { + // This shouldn't occour in tests unless an unkown counterparty_node_id + // has been passed to our message handling functions. + let expected_error_str = format!("Can't find a peer matching the passed counterparty node_id {}", $counterparty_node_id); + match err.action { + msgs::ErrorAction::SendErrorMessage { + msg: msgs::ErrorMessage { ref channel_id, ref data } + } + => { + assert_eq!(*data, expected_error_str); + if let Some((err_channel_id, _user_channel_id)) = chan_id { + debug_assert_eq!(*channel_id, err_channel_id); + } + } + _ => debug_assert!(false, "Unexpected event"), + } + } + } } // Return error in case higher-API need one @@ -1391,11 +1462,13 @@ macro_rules! emit_channel_ready_event { } } -impl ChannelManager +impl ChannelManager where - M::Target: chain::Watch<::Signer>, + M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + ES::Target: EntropySource, + NS::Target: NodeSigner, + SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, L::Target: Logger, @@ -1410,10 +1483,10 @@ where /// Users need to notify the new ChannelManager when a new block is connected or /// disconnected using its `block_connected` and `block_disconnected` methods, starting /// from after `params.latest_hash`. - pub fn new(fee_est: F, chain_monitor: M, tx_broadcaster: T, router: R, logger: L, keys_manager: K, config: UserConfig, params: ChainParameters) -> Self { + pub fn new(fee_est: F, chain_monitor: M, tx_broadcaster: T, router: R, logger: L, entropy_source: ES, node_signer: NS, signer_provider: SP, config: UserConfig, params: ChainParameters) -> Self { let mut secp_ctx = Secp256k1::new(); - secp_ctx.seeded_randomize(&keys_manager.get_secure_random_bytes()); - let inbound_pmt_key_material = keys_manager.get_inbound_payment_key_material(); + secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes()); + let inbound_pmt_key_material = node_signer.get_inbound_payment_key_material(); let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material); ChannelManager { default_configuration: config.clone(), @@ -1425,9 +1498,6 @@ where best_block: RwLock::new(params.best_block), - channel_state: Mutex::new(ChannelHolder{ - pending_msg_events: Vec::new(), - }), outbound_scid_aliases: Mutex::new(HashSet::new()), pending_inbound_payments: Mutex::new(HashMap::new()), pending_outbound_payments: OutboundPayments::new(), @@ -1437,14 +1507,13 @@ where id_to_peer: Mutex::new(HashMap::new()), short_to_chan_info: FairRwLock::new(HashMap::new()), - our_network_key: keys_manager.get_node_secret(Recipient::Node).unwrap(), - our_network_pubkey: PublicKey::from_secret_key(&secp_ctx, &keys_manager.get_node_secret(Recipient::Node).unwrap()), + our_network_pubkey: node_signer.get_node_id(Recipient::Node).unwrap(), secp_ctx, inbound_payment_key: expanded_inbound_key, - fake_scid_rand_bytes: keys_manager.get_secure_random_bytes(), + fake_scid_rand_bytes: entropy_source.get_secure_random_bytes(), - probing_cookie_secret: keys_manager.get_secure_random_bytes(), + probing_cookie_secret: entropy_source.get_secure_random_bytes(), highest_seen_timestamp: AtomicUsize::new(0), @@ -1455,7 +1524,9 @@ where total_consistency_lock: RwLock::new(()), persistence_notifier: Notifier::new(), - keys_manager, + entropy_source, + node_signer, + signer_provider, logger, } @@ -1474,7 +1545,7 @@ where if cfg!(fuzzing) { // fuzzing chacha20 doesn't use the key at all so we always get the same alias outbound_scid_alias += 1; } else { - outbound_scid_alias = fake_scid::Namespace::OutboundAlias.get_fake_scid(height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.keys_manager); + outbound_scid_alias = fake_scid::Namespace::OutboundAlias.get_fake_scid(height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.entropy_source); } if outbound_scid_alias != 0 && self.outbound_scid_aliases.lock().unwrap().insert(outbound_scid_alias) { break; @@ -1515,41 +1586,37 @@ where return Err(APIError::APIMisuseError { err: format!("Channel value must be at least 1000 satoshis. It was {}", channel_value_satoshis) }); } + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + // We want to make sure the lock is actually acquired by PersistenceNotifierGuard. + debug_assert!(&self.total_consistency_lock.try_write().is_err()); + + let per_peer_state = self.per_peer_state.read().unwrap(); + + let peer_state_mutex_opt = per_peer_state.get(&their_network_key); + if let None = peer_state_mutex_opt { + return Err(APIError::APIMisuseError { err: format!("Not connected to node: {}", their_network_key) }); + } + + let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap(); let channel = { - let per_peer_state = self.per_peer_state.read().unwrap(); - match per_peer_state.get(&their_network_key) { - Some(peer_state) => { - let outbound_scid_alias = self.create_and_insert_outbound_scid_alias(); - let peer_state = peer_state.lock().unwrap(); - let their_features = &peer_state.latest_features; - let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration }; - match Channel::new_outbound(&self.fee_estimator, &self.keys_manager, their_network_key, - their_features, channel_value_satoshis, push_msat, user_channel_id, config, - self.best_block.read().unwrap().height(), outbound_scid_alias) - { - Ok(res) => res, - Err(e) => { - self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias); - return Err(e); - }, - } + let outbound_scid_alias = self.create_and_insert_outbound_scid_alias(); + let their_features = &peer_state.latest_features; + let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration }; + match Channel::new_outbound(&self.fee_estimator, &self.entropy_source, &self.signer_provider, their_network_key, + their_features, channel_value_satoshis, push_msat, user_channel_id, config, + self.best_block.read().unwrap().height(), outbound_scid_alias) + { + Ok(res) => res, + Err(e) => { + self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias); + return Err(e); }, - None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", their_network_key) }), } }; let res = channel.get_open_channel(self.genesis_hash.clone()); - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - // We want to make sure the lock is actually acquired by PersistenceNotifierGuard. - debug_assert!(&self.total_consistency_lock.try_write().is_err()); - let temporary_channel_id = channel.channel_id(); - let mut channel_state = self.channel_state.lock().unwrap(); - let per_peer_state = self.per_peer_state.read().unwrap(); - if let Some(peer_state_mutex) = per_peer_state.get(&their_network_key){ - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; - match peer_state.channel_by_id.entry(temporary_channel_id) { + match peer_state.channel_by_id.entry(temporary_channel_id) { hash_map::Entry::Occupied(_) => { if cfg!(fuzzing) { return Err(APIError::APIMisuseError { err: "Fuzzy bad RNG".to_owned() }); @@ -1558,16 +1625,16 @@ where } }, hash_map::Entry::Vacant(entry) => { entry.insert(channel); } - } - } else { return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", their_network_key) }) } - channel_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { + } + + peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { node_id: their_network_key, msg: res, }); Ok(temporary_channel_id) } - fn list_channels_with_filter::Signer>)) -> bool + Copy>(&self, f: Fn) -> Vec { + fn list_channels_with_filter::Signer>)) -> bool + Copy>(&self, f: Fn) -> Vec { let mut res = Vec::new(); // Allocate our best estimate of the number of channels we have in the `res` // Vec. Sadly the `short_to_chan_info` map doesn't cover channels without @@ -1654,8 +1721,36 @@ where self.list_channels_with_filter(|&(_, ref channel)| channel.is_live()) } + /// Returns in an undefined order recent payments that -- if not fulfilled -- have yet to find a + /// successful path, or have unresolved HTLCs. + /// + /// This can be useful for payments that may have been prepared, but ultimately not sent, as a + /// result of a crash. If such a payment exists, is not listed here, and an + /// [`Event::PaymentSent`] has not been received, you may consider retrying the payment. + /// + /// [`Event::PaymentSent`]: events::Event::PaymentSent + pub fn list_recent_payments(&self) -> Vec { + self.pending_outbound_payments.pending_outbound_payments.lock().unwrap().iter() + .filter_map(|(_, pending_outbound_payment)| match pending_outbound_payment { + PendingOutboundPayment::Retryable { payment_hash, total_msat, .. } => { + Some(RecentPaymentDetails::Pending { + payment_hash: *payment_hash, + total_msat: *total_msat, + }) + }, + PendingOutboundPayment::Abandoned { payment_hash, .. } => { + Some(RecentPaymentDetails::Abandoned { payment_hash: *payment_hash }) + }, + PendingOutboundPayment::Fulfilled { payment_hash, .. } => { + Some(RecentPaymentDetails::Fulfilled { payment_hash: *payment_hash }) + }, + PendingOutboundPayment::Legacy { .. } => None + }) + .collect() + } + /// Helper function that issues the channel close events - fn issue_channel_close_events(&self, channel: &Channel<::Signer>, closure_reason: ClosureReason) { + fn issue_channel_close_events(&self, channel: &Channel<::Signer>, closure_reason: ClosureReason) { let mut pending_events_lock = self.pending_events.lock().unwrap(); match channel.unbroadcasted_funding() { Some(transaction) => { @@ -1675,51 +1770,48 @@ where let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>; let result: Result<(), _> = loop { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; let per_peer_state = self.per_peer_state.read().unwrap(); - if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; - match peer_state.channel_by_id.entry(channel_id.clone()) { - hash_map::Entry::Occupied(mut chan_entry) => { - if *counterparty_node_id != chan_entry.get().get_counterparty_node_id(){ - return Err(APIError::APIMisuseError { err: "The passed counterparty_node_id doesn't match the channel's counterparty node_id".to_owned() }); - } - let (shutdown_msg, monitor_update, htlcs) = chan_entry.get_mut().get_shutdown(&self.keys_manager, &peer_state.latest_features, target_feerate_sats_per_1000_weight)?; - failed_htlcs = htlcs; - - // Update the monitor with the shutdown script if necessary. - if let Some(monitor_update) = monitor_update { - let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update); - let (result, is_permanent) = - handle_monitor_update_res!(self, update_res, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE); - if is_permanent { - remove_channel!(self, chan_entry); - break result; - } + + let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); + if let None = peer_state_mutex_opt { + return Err(APIError::APIMisuseError { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) }); + } + + let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(channel_id.clone()) { + hash_map::Entry::Occupied(mut chan_entry) => { + let (shutdown_msg, monitor_update, htlcs) = chan_entry.get_mut().get_shutdown(&self.signer_provider, &peer_state.latest_features, target_feerate_sats_per_1000_weight)?; + failed_htlcs = htlcs; + + // Update the monitor with the shutdown script if necessary. + if let Some(monitor_update) = monitor_update { + let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), &monitor_update); + let (result, is_permanent) = + handle_monitor_update_res!(self, update_res, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE); + if is_permanent { + remove_channel!(self, chan_entry); + break result; } + } - channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { - node_id: *counterparty_node_id, - msg: shutdown_msg - }); + peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { + node_id: *counterparty_node_id, + msg: shutdown_msg + }); - if chan_entry.get().is_shutdown() { - let channel = remove_channel!(self, chan_entry); - if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) { - channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: channel_update - }); - } - self.issue_channel_close_events(&channel, ClosureReason::HolderForceClosed); + if chan_entry.get().is_shutdown() { + let channel = remove_channel!(self, chan_entry); + if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) { + peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: channel_update + }); } - break Ok(()); - }, - hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable { err: "No such channel".to_owned() }) - } - } else { - return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", counterparty_node_id) }); + self.issue_channel_close_events(&channel, ClosureReason::HolderForceClosed); + } + break Ok(()); + }, + hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable{err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*channel_id), counterparty_node_id) }) } }; @@ -1792,7 +1884,7 @@ where // force-closing. The monitor update on the required in-memory copy should broadcast // the latest local state, which is the best we can do anyway. Thus, it is safe to // ignore the result here. - let _ = self.chain_monitor.update_channel(funding_txo, monitor_update); + let _ = self.chain_monitor.update_channel(funding_txo, &monitor_update); } } @@ -1800,33 +1892,30 @@ where /// user closes, which will be re-exposed as the `ChannelClosed` reason. fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: &PublicKey, peer_msg: Option<&String>, broadcast: bool) -> Result { + let per_peer_state = self.per_peer_state.read().unwrap(); + let peer_state_mutex_opt = per_peer_state.get(peer_node_id); let mut chan = { - let per_peer_state = self.per_peer_state.read().unwrap(); - if let Some(peer_state_mutex) = per_peer_state.get(peer_node_id) { - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; - if let hash_map::Entry::Occupied(chan) = peer_state.channel_by_id.entry(channel_id.clone()) { - if chan.get().get_counterparty_node_id() != *peer_node_id { - return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()}); - } - if let Some(peer_msg) = peer_msg { - self.issue_channel_close_events(chan.get(),ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() }); - } else { - self.issue_channel_close_events(chan.get(),ClosureReason::HolderForceClosed); - } - remove_channel!(self, chan) + if let None = peer_state_mutex_opt { + return Err(APIError::APIMisuseError{ err: format!("Can't find a peer matching the passed counterparty node_id {}", peer_node_id) }); + } + let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); + let peer_state = &mut *peer_state_lock; + if let hash_map::Entry::Occupied(chan) = peer_state.channel_by_id.entry(channel_id.clone()) { + if let Some(peer_msg) = peer_msg { + self.issue_channel_close_events(chan.get(),ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() }); } else { - return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()}); + self.issue_channel_close_events(chan.get(),ClosureReason::HolderForceClosed); } + remove_channel!(self, chan) } else { - return Err(APIError::APIMisuseError{ err: format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", peer_node_id) }); + return Err(APIError::ChannelUnavailable{ err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*channel_id), peer_node_id) }); } }; log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..])); self.finish_force_close_channel(chan.force_shutdown(broadcast)); if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { - let mut channel_state = self.channel_state.lock().unwrap(); - channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap(); + peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); } @@ -1838,14 +1927,18 @@ where let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); match self.force_close_channel_with_peer(channel_id, counterparty_node_id, None, broadcast) { Ok(counterparty_node_id) => { - self.channel_state.lock().unwrap().pending_msg_events.push( - events::MessageSendEvent::HandleError { - node_id: counterparty_node_id, - action: msgs::ErrorAction::SendErrorMessage { - msg: msgs::ErrorMessage { channel_id: *channel_id, data: "Channel force-closed".to_owned() } - }, - } - ); + let per_peer_state = self.per_peer_state.read().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { + let mut peer_state = peer_state_mutex.lock().unwrap(); + peer_state.pending_msg_events.push( + events::MessageSendEvent::HandleError { + node_id: counterparty_node_id, + action: msgs::ErrorAction::SendErrorMessage { + msg: msgs::ErrorMessage { channel_id: *channel_id, data: "Channel force-closed".to_owned() } + }, + } + ); + } Ok(()) }, Err(e) => Err(e) @@ -1902,6 +1995,7 @@ where // final_expiry_too_soon // We have to have some headroom to broadcast on chain if we have the preimage, so make sure // we have at least HTLC_FAIL_BACK_BUFFER blocks to go. + // // Also, ensure that, in the case of an unknown preimage for the received payment hash, our // payment logic has enough time to fail the HTLC backward before our onchain logic triggers a // channel closure (see HTLC_FAIL_BACK_BUFFER rationale). @@ -2001,7 +2095,9 @@ where return_malformed_err!("invalid ephemeral pubkey", 0x8000 | 0x4000 | 6); } - let shared_secret = SharedSecret::new(&msg.onion_routing_packet.public_key.unwrap(), &self.our_network_key).secret_bytes(); + let shared_secret = self.node_signer.ecdh( + Recipient::Node, &msg.onion_routing_packet.public_key.unwrap(), None + ).unwrap().secret_bytes(); if msg.onion_routing_packet.version != 0 { //TODO: Spec doesn't indicate if we should only hash hop_data here (and in other @@ -2086,7 +2182,7 @@ where // short_channel_id is non-0 in any ::Forward. if let &PendingHTLCRouting::Forward { ref short_channel_id, .. } = routing { if let Some((err, mut code, chan_update)) = loop { - let id_option = self.short_to_chan_info.read().unwrap().get(&short_channel_id).cloned(); + let id_option = self.short_to_chan_info.read().unwrap().get(short_channel_id).cloned(); let forwarding_chan_info_opt = match id_option { None => { // unknown_next_peer // Note that this is likely a timing oracle for detecting whether an scid is a @@ -2104,11 +2200,11 @@ where }; let chan_update_opt = if let Some((counterparty_node_id, forwarding_id)) = forwarding_chan_info_opt { let per_peer_state = self.per_peer_state.read().unwrap(); - if let None = per_peer_state.get(&counterparty_node_id) { + let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id); + if let None = peer_state_mutex_opt { break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None)); } - let peer_state_mutex = per_peer_state.get(&counterparty_node_id).unwrap(); - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); let peer_state = &mut *peer_state_lock; let chan = match peer_state.channel_by_id.get_mut(&forwarding_id) { None => { @@ -2220,7 +2316,7 @@ where /// [`MessageSendEvent::BroadcastChannelUpdate`] event. /// /// May be called with peer_state already locked! - fn get_channel_update_for_broadcast(&self, chan: &Channel<::Signer>) -> Result { + fn get_channel_update_for_broadcast(&self, chan: &Channel<::Signer>) -> Result { if !chan.should_announce() { return Err(LightningError { err: "Cannot broadcast a channel_update for a private channel".to_owned(), @@ -2239,7 +2335,7 @@ where /// and thus MUST NOT be called unless the recipient of the resulting message has already /// provided evidence that they know about the existence of the channel. /// May be called with peer_state already locked! - fn get_channel_update_for_unicast(&self, chan: &Channel<::Signer>) -> Result { + fn get_channel_update_for_unicast(&self, chan: &Channel<::Signer>) -> Result { log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.channel_id())); let short_channel_id = match chan.get_short_channel_id().or(chan.latest_inbound_scid_alias()) { None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}), @@ -2248,9 +2344,9 @@ where self.get_channel_update_for_onion(short_channel_id, chan) } - fn get_channel_update_for_onion(&self, short_channel_id: u64, chan: &Channel<::Signer>) -> Result { + fn get_channel_update_for_onion(&self, short_channel_id: u64, chan: &Channel<::Signer>) -> Result { log_trace!(self.logger, "Generating channel update for channel {}", log_bytes!(chan.channel_id())); - let were_node_one = PublicKey::from_secret_key(&self.secp_ctx, &self.our_network_key).serialize()[..] < chan.get_counterparty_node_id().serialize()[..]; + let were_node_one = self.our_network_pubkey.serialize()[..] < chan.get_counterparty_node_id().serialize()[..]; let unsigned = msgs::UnsignedChannelUpdate { chain_hash: self.genesis_hash, @@ -2264,9 +2360,11 @@ where fee_proportional_millionths: chan.get_fee_proportional_millionths(), excess_data: Vec::new(), }; - - let msg_hash = Sha256dHash::hash(&unsigned.encode()[..]); - let sig = self.secp_ctx.sign_ecdsa(&hash_to_message!(&msg_hash[..]), &self.our_network_key); + // Panic on failure to signal LDK should be restarted to retry signing the `ChannelUpdate`. + // If we returned an error and the `node_signer` cannot provide a signature for whatever + // reason`, we wouldn't be able to receive inbound payments through the corresponding + // channel. + let sig = self.node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelUpdate(&unsigned)).unwrap(); Ok(msgs::ChannelUpdate { signature: sig, @@ -2277,7 +2375,7 @@ where // Only public for testing, this should otherwise never be called direcly pub(crate) fn send_payment_along_path(&self, path: &Vec, payment_params: &Option, payment_hash: &PaymentHash, payment_secret: &Option, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option, session_priv_bytes: [u8; 32]) -> Result<(), APIError> { log_trace!(self.logger, "Attempting to send payment for path with next hop {}", path.first().unwrap().short_channel_id); - let prng_seed = self.keys_manager.get_secure_random_bytes(); + let prng_seed = self.entropy_source.get_secure_random_bytes(); let session_priv = SecretKey::from_slice(&session_priv_bytes[..]).expect("RNG is busted"); let onion_keys = onion_utils::construct_onion_keys(&self.secp_ctx, &path, &session_priv) @@ -2296,72 +2394,72 @@ where Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()), }; - let mut channel_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_lock; let per_peer_state = self.per_peer_state.read().unwrap(); - if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; - if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(id) { - match { - if !chan.get().is_live() { - return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!".to_owned()}); + let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id); + if let None = peer_state_mutex_opt { + return Err(APIError::InvalidRoute{err: "No peer matching the path's first hop found!" }); + } + let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); + let peer_state = &mut *peer_state_lock; + if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(id) { + match { + if !chan.get().is_live() { + return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!".to_owned()}); + } + break_chan_entry!(self, chan.get_mut().send_htlc_and_commit( + htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute { + path: path.clone(), + session_priv: session_priv.clone(), + first_hop_htlc_msat: htlc_msat, + payment_id, + payment_secret: payment_secret.clone(), + payment_params: payment_params.clone(), + }, onion_packet, &self.logger), + chan) + } { + Some((update_add, commitment_signed, monitor_update)) => { + let update_err = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &monitor_update); + let chan_id = chan.get().channel_id(); + match (update_err, + handle_monitor_update_res!(self, update_err, chan, + RAACommitmentOrder::CommitmentFirst, false, true)) + { + (ChannelMonitorUpdateStatus::PermanentFailure, Err(e)) => break Err(e), + (ChannelMonitorUpdateStatus::Completed, Ok(())) => {}, + (ChannelMonitorUpdateStatus::InProgress, Err(_)) => { + // Note that MonitorUpdateInProgress here indicates (per function + // docs) that we will resend the commitment update once monitor + // updating completes. Therefore, we must return an error + // indicating that it is unsafe to retry the payment wholesale, + // which we do in the send_payment check for + // MonitorUpdateInProgress, below. + return Err(APIError::MonitorUpdateInProgress); + }, + _ => unreachable!(), } - break_chan_entry!(self, chan.get_mut().send_htlc_and_commit( - htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute { - path: path.clone(), - session_priv: session_priv.clone(), - first_hop_htlc_msat: htlc_msat, - payment_id, - payment_secret: payment_secret.clone(), - payment_params: payment_params.clone(), - }, onion_packet, &self.logger), - chan) - } { - Some((update_add, commitment_signed, monitor_update)) => { - let update_err = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update); - let chan_id = chan.get().channel_id(); - match (update_err, - handle_monitor_update_res!(self, update_err, chan, - RAACommitmentOrder::CommitmentFirst, false, true)) - { - (ChannelMonitorUpdateStatus::PermanentFailure, Err(e)) => break Err(e), - (ChannelMonitorUpdateStatus::Completed, Ok(())) => {}, - (ChannelMonitorUpdateStatus::InProgress, Err(_)) => { - // Note that MonitorUpdateInProgress here indicates (per function - // docs) that we will resend the commitment update once monitor - // updating completes. Therefore, we must return an error - // indicating that it is unsafe to retry the payment wholesale, - // which we do in the send_payment check for - // MonitorUpdateInProgress, below. - return Err(APIError::MonitorUpdateInProgress); - }, - _ => unreachable!(), - } - log_debug!(self.logger, "Sending payment along path resulted in a commitment_signed for channel {}", log_bytes!(chan_id)); - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: path.first().unwrap().pubkey, - updates: msgs::CommitmentUpdate { - update_add_htlcs: vec![update_add], - update_fulfill_htlcs: Vec::new(), - update_fail_htlcs: Vec::new(), - update_fail_malformed_htlcs: Vec::new(), - update_fee: None, - commitment_signed, - }, - }); - }, - None => { }, - } - } else { - // The channel was likely removed after we fetched the id from the - // `short_to_chan_info` map, but before we successfully locked the - // `channel_by_id` map. - // This can occur as no consistency guarantees exists between the two maps. - return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()}); + log_debug!(self.logger, "Sending payment along path resulted in a commitment_signed for channel {}", log_bytes!(chan_id)); + peer_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: path.first().unwrap().pubkey, + updates: msgs::CommitmentUpdate { + update_add_htlcs: vec![update_add], + update_fulfill_htlcs: Vec::new(), + update_fail_htlcs: Vec::new(), + update_fail_malformed_htlcs: Vec::new(), + update_fee: None, + commitment_signed, + }, + }); + }, + None => { }, } - } else { return Err(APIError::InvalidRoute{err: "No peer matching the path's first hop found!" })} + } else { + // The channel was likely removed after we fetched the id from the + // `short_to_chan_info` map, but before we successfully locked the + // `channel_by_id` map. + // This can occur as no consistency guarantees exists between the two maps. + return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()}); + } return Ok(()); }; @@ -2375,9 +2473,14 @@ where /// Sends a payment along a given route. /// - /// Value parameters are provided via the last hop in route, see documentation for RouteHop + /// Value parameters are provided via the last hop in route, see documentation for [`RouteHop`] /// fields for more info. /// + /// May generate SendHTLCs message(s) event on success, which should be relayed (e.g. via + /// [`PeerManager::process_events`]). + /// + /// # Avoiding Duplicate Payments + /// /// If a pending payment is currently in-flight with the same [`PaymentId`] provided, this /// method will error with an [`APIError::InvalidRoute`]. Note, however, that once a payment /// is no longer pending (either via [`ChannelManager::abandon_payment`], or handling of an @@ -2390,12 +2493,16 @@ where /// consider using the [`PaymentHash`] as the key for tracking payments. In that case, the /// [`PaymentId`] should be a copy of the [`PaymentHash`] bytes. /// - /// May generate SendHTLCs message(s) event on success, which should be relayed (e.g. via - /// [`PeerManager::process_events`]). + /// Additionally, in the scenario where we begin the process of sending a payment, but crash + /// before `send_payment` returns (or prior to [`ChannelMonitorUpdate`] persistence if you're + /// using [`ChannelMonitorUpdateStatus::InProgress`]), the payment may be lost on restart. See + /// [`ChannelManager::list_recent_payments`] for more information. + /// + /// # Possible Error States on [`PaymentSendFailure`] /// /// Each path may have a different return value, and PaymentSendValue may return a Vec with /// each entry matching the corresponding-index entry in the route paths, see - /// PaymentSendFailure for more info. + /// [`PaymentSendFailure`] for more info. /// /// In general, a path may raise: /// * [`APIError::InvalidRoute`] when an invalid route or forwarding parameter (cltv_delta, fee, @@ -2410,22 +2517,37 @@ where /// irrevocably committed to on our end. In such a case, do NOT retry the payment with a /// different route unless you intend to pay twice! /// - /// payment_secret is unrelated to payment_hash (or PaymentPreimage) and exists to authenticate - /// the sender to the recipient and prevent payment-probing (deanonymization) attacks. For - /// newer nodes, it will be provided to you in the invoice. If you do not have one, the Route - /// must not contain multiple paths as multi-path payments require a recipient-provided - /// payment_secret. + /// # A caution on `payment_secret` + /// + /// `payment_secret` is unrelated to `payment_hash` (or [`PaymentPreimage`]) and exists to + /// authenticate the sender to the recipient and prevent payment-probing (deanonymization) + /// attacks. For newer nodes, it will be provided to you in the invoice. If you do not have one, + /// the [`Route`] must not contain multiple paths as multi-path payments require a + /// recipient-provided `payment_secret`. /// - /// If a payment_secret *is* provided, we assume that the invoice had the payment_secret feature - /// bit set (either as required or as available). If multiple paths are present in the Route, - /// we assume the invoice had the basic_mpp feature set. + /// If a `payment_secret` *is* provided, we assume that the invoice had the payment_secret + /// feature bit set (either as required or as available). If multiple paths are present in the + /// [`Route`], we assume the invoice had the basic_mpp feature set. /// /// [`Event::PaymentSent`]: events::Event::PaymentSent /// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events + /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress pub fn send_payment(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option, payment_id: PaymentId) -> Result<(), PaymentSendFailure> { let best_block_height = self.best_block.read().unwrap().height(); self.pending_outbound_payments - .send_payment_with_route(route, payment_hash, payment_secret, payment_id, &self.keys_manager, best_block_height, + .send_payment_with_route(route, payment_hash, payment_secret, payment_id, &self.entropy_source, &self.node_signer, best_block_height, + |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| + self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) + } + + /// Similar to [`ChannelManager::send_payment`], but will automatically find a route based on + /// `route_params` and retry failed payment paths based on `retry_strategy`. + pub fn send_payment_with_retry(&self, payment_hash: PaymentHash, payment_secret: &Option, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result<(), PaymentSendFailure> { + let best_block_height = self.best_block.read().unwrap().height(); + self.pending_outbound_payments + .send_payment(payment_hash, payment_secret, payment_id, retry_strategy, route_params, + &self.router, self.list_usable_channels(), self.compute_inflight_htlcs(), + &self.entropy_source, &self.node_signer, best_block_height, &self.logger, |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) } @@ -2433,7 +2555,7 @@ where #[cfg(test)] fn test_send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option, keysend_preimage: Option, payment_id: PaymentId, recv_value_msat: Option, onion_session_privs: Vec<[u8; 32]>) -> Result<(), PaymentSendFailure> { let best_block_height = self.best_block.read().unwrap().height(); - self.pending_outbound_payments.test_send_payment_internal(route, payment_hash, payment_secret, keysend_preimage, payment_id, recv_value_msat, onion_session_privs, &self.keys_manager, best_block_height, + self.pending_outbound_payments.test_send_payment_internal(route, payment_hash, payment_secret, keysend_preimage, payment_id, recv_value_msat, onion_session_privs, &self.node_signer, best_block_height, |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) } @@ -2441,7 +2563,7 @@ where #[cfg(test)] pub(crate) fn test_add_new_pending_payment(&self, payment_hash: PaymentHash, payment_secret: Option, payment_id: PaymentId, route: &Route) -> Result, PaymentSendFailure> { let best_block_height = self.best_block.read().unwrap().height(); - self.pending_outbound_payments.test_add_new_pending_payment(payment_hash, payment_secret, payment_id, route, &self.keys_manager, best_block_height) + self.pending_outbound_payments.test_add_new_pending_payment(payment_hash, payment_secret, payment_id, route, None, &self.entropy_source, best_block_height) } @@ -2457,7 +2579,7 @@ where /// [`abandon_payment`]: [`ChannelManager::abandon_payment`] pub fn retry_payment(&self, route: &Route, payment_id: PaymentId) -> Result<(), PaymentSendFailure> { let best_block_height = self.best_block.read().unwrap().height(); - self.pending_outbound_payments.retry_payment_with_route(route, payment_id, &self.keys_manager, best_block_height, + self.pending_outbound_payments.retry_payment_with_route(route, payment_id, &self.entropy_source, &self.node_signer, best_block_height, |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) } @@ -2506,7 +2628,26 @@ where /// [`send_payment`]: Self::send_payment pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option, payment_id: PaymentId) -> Result { let best_block_height = self.best_block.read().unwrap().height(); - self.pending_outbound_payments.send_spontaneous_payment(route, payment_preimage, payment_id, &self.keys_manager, best_block_height, + self.pending_outbound_payments.send_spontaneous_payment_with_route( + route, payment_preimage, payment_id, &self.entropy_source, &self.node_signer, + best_block_height, + |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| + self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) + } + + /// Similar to [`ChannelManager::send_spontaneous_payment`], but will automatically find a route + /// based on `route_params` and retry failed payment paths based on `retry_strategy`. + /// + /// See [`PaymentParameters::for_keysend`] for help in constructing `route_params` for spontaneous + /// payments. + /// + /// [`PaymentParameters::for_keysend`]: crate::routing::router::PaymentParameters::for_keysend + pub fn send_spontaneous_payment_with_retry(&self, payment_preimage: Option, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result { + let best_block_height = self.best_block.read().unwrap().height(); + self.pending_outbound_payments.send_spontaneous_payment(payment_preimage, payment_id, + retry_strategy, route_params, &self.router, self.list_usable_channels(), + self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height, + &self.logger, |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) } @@ -2516,7 +2657,7 @@ where /// us to easily discern them from real payments. pub fn send_probe(&self, hops: Vec) -> Result<(PaymentHash, PaymentId), PaymentSendFailure> { let best_block_height = self.best_block.read().unwrap().height(); - self.pending_outbound_payments.send_probe(hops, self.probing_cookie_secret, &self.keys_manager, best_block_height, + self.pending_outbound_payments.send_probe(hops, self.probing_cookie_secret, &self.entropy_source, &self.node_signer, best_block_height, |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) } @@ -2530,29 +2671,30 @@ where /// Handles the generation of a funding transaction, optionally (for tests) with a function /// which checks the correctness of the funding transaction given the associated channel. - fn funding_transaction_generated_intern::Signer>, &Transaction) -> Result>( + fn funding_transaction_generated_intern::Signer>, &Transaction) -> Result>( &self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, funding_transaction: Transaction, find_funding_output: FundingOutput ) -> Result<(), APIError> { + let per_peer_state = self.per_peer_state.read().unwrap(); + let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); + if let None = peer_state_mutex_opt { + return Err(APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) }) + } + + let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); + let peer_state = &mut *peer_state_lock; let (chan, msg) = { let (res, chan) = { - let per_peer_state = self.per_peer_state.read().unwrap(); - if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; - match peer_state.channel_by_id.remove(temporary_channel_id) { - Some(mut chan) => { - let funding_txo = find_funding_output(&chan, &funding_transaction)?; - - (chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger) - .map_err(|e| if let ChannelError::Close(msg) = e { - MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.get_user_id(), chan.force_shutdown(true), None) - } else { unreachable!(); }) - , chan) - }, - None => { return Err(APIError::ChannelUnavailable { err: "No such channel".to_owned() }) }, - } - } else { - return Err(APIError::APIMisuseError { err: format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id) }) + match peer_state.channel_by_id.remove(temporary_channel_id) { + Some(mut chan) => { + let funding_txo = find_funding_output(&chan, &funding_transaction)?; + + (chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger) + .map_err(|e| if let ChannelError::Close(msg) = e { + MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.get_user_id(), chan.force_shutdown(true), None) + } else { unreachable!(); }) + , chan) + }, + None => { return Err(APIError::ChannelUnavailable { err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*temporary_channel_id), counterparty_node_id) }) }, } }; match handle_error!(self, res, chan.get_counterparty_node_id()) { @@ -2565,29 +2707,22 @@ where } }; - let mut channel_state = self.channel_state.lock().unwrap(); - channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingCreated { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingCreated { node_id: chan.get_counterparty_node_id(), msg, }); - mem::drop(channel_state); - let per_peer_state = self.per_peer_state.read().unwrap(); - if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; - match peer_state.channel_by_id.entry(chan.channel_id()) { - hash_map::Entry::Occupied(_) => { - panic!("Generated duplicate funding txid?"); - }, - hash_map::Entry::Vacant(e) => { - let mut id_to_peer = self.id_to_peer.lock().unwrap(); - if id_to_peer.insert(chan.channel_id(), chan.get_counterparty_node_id()).is_some() { - panic!("id_to_peer map already contained funding txid, which shouldn't be possible"); - } - e.insert(chan); + match peer_state.channel_by_id.entry(chan.channel_id()) { + hash_map::Entry::Occupied(_) => { + panic!("Generated duplicate funding txid?"); + }, + hash_map::Entry::Vacant(e) => { + let mut id_to_peer = self.id_to_peer.lock().unwrap(); + if id_to_peer.insert(chan.channel_id(), chan.get_counterparty_node_id()).is_some() { + panic!("id_to_peer map already contained funding txid, which shouldn't be possible"); } + e.insert(chan); } - } else { return Err(APIError::ChannelUnavailable { err: format!("Peer with counterparty_node_id {} disconnected and closed the channel", counterparty_node_id) }) } + } Ok(()) } @@ -2710,36 +2845,32 @@ where let _persistence_guard = PersistenceNotifierGuard::notify_on_drop( &self.total_consistency_lock, &self.persistence_notifier, ); - { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; - let per_peer_state = self.per_peer_state.read().unwrap(); - if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; - for channel_id in channel_ids { - if !peer_state.channel_by_id.contains_key(channel_id) { - return Err(APIError::ChannelUnavailable { - err: format!("Channel with ID {} was not found", log_bytes!(*channel_id)), - }); - } - } - for channel_id in channel_ids { - let channel = peer_state.channel_by_id.get_mut(channel_id).unwrap(); - if !channel.update_config(config) { - continue; - } - if let Ok(msg) = self.get_channel_update_for_broadcast(channel) { - channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg }); - } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { - node_id: channel.get_counterparty_node_id(), - msg, - }); - } - } - } else { - return Err(APIError::APIMisuseError{ err: format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id) }); + let per_peer_state = self.per_peer_state.read().unwrap(); + let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); + if let None = peer_state_mutex_opt { + return Err(APIError::APIMisuseError{ err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) }); + } + let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); + let peer_state = &mut *peer_state_lock; + for channel_id in channel_ids { + if !peer_state.channel_by_id.contains_key(channel_id) { + return Err(APIError::ChannelUnavailable { + err: format!("Channel with ID {} was not found for the passed counterparty_node_id {}", log_bytes!(*channel_id), counterparty_node_id), + }); + } + } + for channel_id in channel_ids { + let channel = peer_state.channel_by_id.get_mut(channel_id).unwrap(); + if !channel.update_config(config) { + continue; + } + if let Ok(msg) = self.get_channel_update_for_broadcast(channel) { + peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg }); + } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { + node_id: channel.get_counterparty_node_id(), + msg, + }); } } Ok(()) @@ -2785,11 +2916,11 @@ where chan.get_short_channel_id().unwrap_or(chan.outbound_scid_alias()) }, None => return Err(APIError::ChannelUnavailable { - err: format!("Channel with id {} not found", log_bytes!(*next_hop_channel_id)) + err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*next_hop_channel_id), next_node_id) }) } } else { - return Err(APIError::APIMisuseError{ err: format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", next_node_id) }); + return Err(APIError::APIMisuseError{ err: format!("Can't find a peer matching the passed counterparty node_id {}", next_node_id) }); } }; @@ -2917,9 +3048,9 @@ where } } if let PendingHTLCRouting::Forward { onion_packet, .. } = routing { - let phantom_secret_res = self.keys_manager.get_node_secret(Recipient::PhantomNode); - if phantom_secret_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id, &self.genesis_hash) { - let phantom_shared_secret = SharedSecret::new(&onion_packet.public_key.unwrap(), &phantom_secret_res.unwrap()).secret_bytes(); + let phantom_pubkey_res = self.node_signer.get_node_id(Recipient::PhantomNode); + if phantom_pubkey_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id, &self.genesis_hash) { + let phantom_shared_secret = self.node_signer.ecdh(Recipient::PhantomNode, &onion_packet.public_key.unwrap(), None).unwrap().secret_bytes(); let next_hop = match onion_utils::decode_next_payment_hop(phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac, payment_hash) { Ok(res) => res, Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => { @@ -2968,12 +3099,12 @@ where } }; let per_peer_state = self.per_peer_state.read().unwrap(); - if let None = per_peer_state.get(&counterparty_node_id) { + let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id); + if let None = peer_state_mutex_opt { forwarding_channel_not_found!(); continue; } - let peer_state_mutex = per_peer_state.get(&counterparty_node_id).unwrap(); - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(forward_chan_id) { hash_map::Entry::Vacant(_) => { @@ -3095,7 +3226,7 @@ where let phantom_shared_secret = claimable_htlc.prev_hop.phantom_shared_secret; let mut receiver_node_id = self.our_network_pubkey; if phantom_shared_secret.is_some() { - receiver_node_id = self.keys_manager.get_node_id(Recipient::PhantomNode) + receiver_node_id = self.node_signer.get_node_id(Recipient::PhantomNode) .expect("Failed to get node_id for phantom node recipient"); } @@ -3175,13 +3306,23 @@ where match claimable_htlc.onion_payload { OnionPayload::Invoice { .. } => { let payment_data = payment_data.unwrap(); - let payment_preimage = match inbound_payment::verify(payment_hash, &payment_data, self.highest_seen_timestamp.load(Ordering::Acquire) as u64, &self.inbound_payment_key, &self.logger) { - Ok(payment_preimage) => payment_preimage, + let (payment_preimage, min_final_cltv_expiry_delta) = match inbound_payment::verify(payment_hash, &payment_data, self.highest_seen_timestamp.load(Ordering::Acquire) as u64, &self.inbound_payment_key, &self.logger) { + Ok(result) => result, Err(()) => { + log_trace!(self.logger, "Failing new HTLC with payment_hash {} as payment verification failed", log_bytes!(payment_hash.0)); fail_htlc!(claimable_htlc, payment_hash); continue } }; + if let Some(min_final_cltv_expiry_delta) = min_final_cltv_expiry_delta { + let expected_min_expiry_height = (self.current_best_block().height() + min_final_cltv_expiry_delta as u32) as u64; + if (cltv_expiry as u64) < expected_min_expiry_height { + log_trace!(self.logger, "Failing new HTLC with payment_hash {} as its CLTV expiry was too soon (had {}, earliest expected {})", + log_bytes!(payment_hash.0), cltv_expiry, expected_min_expiry_height); + fail_htlc!(claimable_htlc, payment_hash); + continue; + } + } check_total_value!(payment_data, payment_preimage); }, OnionPayload::Spontaneous(preimage) => { @@ -3244,6 +3385,12 @@ where } } + let best_block_height = self.best_block.read().unwrap().height(); + self.pending_outbound_payments.check_retry_payments(&self.router, || self.list_usable_channels(), + || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height, &self.logger, + |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| + self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)); + for (htlc_source, payment_hash, failure_reason, destination) in failed_forwards.drain(..) { self.fail_htlc_backwards_internal(&htlc_source, &payment_hash, &failure_reason, destination); } @@ -3278,7 +3425,7 @@ where BackgroundEvent::ClosingMonitorUpdate((funding_txo, update)) => { // The channel has already been closed, so no use bothering to care about the // monitor updating completing. - let _ = self.chain_monitor.update_channel(funding_txo, update); + let _ = self.chain_monitor.update_channel(funding_txo, &update); }, } } @@ -3291,7 +3438,7 @@ where self.process_background_events(); } - fn update_channel_fee(&self, chan_id: &[u8; 32], chan: &mut Channel<::Signer>, new_feerate: u32) -> NotifyOption { + fn update_channel_fee(&self, chan_id: &[u8; 32], chan: &mut Channel<::Signer>, new_feerate: u32) -> NotifyOption { if !chan.is_outbound() { return NotifyOption::SkipPersist; } // If the feerate has decreased by less than half, don't bother if new_feerate <= chan.get_feerate() && new_feerate * 2 > chan.get_feerate() { @@ -3358,13 +3505,11 @@ where let mut handle_errors: Vec<(Result<(), _>, _)> = Vec::new(); let mut timed_out_mpp_htlcs = Vec::new(); { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; - let pending_msg_events = &mut channel_state.pending_msg_events; let per_peer_state = self.per_peer_state.read().unwrap(); for (counterparty_node_id, peer_state_mutex) in per_peer_state.iter() { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; + let pending_msg_events = &mut peer_state.pending_msg_events; peer_state.channel_by_id.retain(|chan_id, chan| { let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate); if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } @@ -3468,27 +3613,46 @@ where /// [`events::Event::PaymentClaimed`] events even for payments you intend to fail, especially on /// startup during which time claims that were in-progress at shutdown may be replayed. pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) { + self.fail_htlc_backwards_with_reason(payment_hash, &FailureCode::IncorrectOrUnknownPaymentDetails); + } + + /// This is a variant of [`ChannelManager::fail_htlc_backwards`] that allows you to specify the + /// reason for the failure. + /// + /// See [`FailureCode`] for valid failure codes. + pub fn fail_htlc_backwards_with_reason(&self, payment_hash: &PaymentHash, failure_code: &FailureCode) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); let removed_source = self.claimable_payments.lock().unwrap().claimable_htlcs.remove(payment_hash); if let Some((_, mut sources)) = removed_source { for htlc in sources.drain(..) { - let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec(); - htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height().to_be_bytes()); + let reason = self.get_htlc_fail_reason_from_failure_code(failure_code, &htlc); let source = HTLCSource::PreviousHopData(htlc.prev_hop); - let reason = HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data); let receiver = HTLCDestination::FailedPayment { payment_hash: *payment_hash }; self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } } } + /// Gets error data to form an [`HTLCFailReason`] given a [`FailureCode`] and [`ClaimableHTLC`]. + fn get_htlc_fail_reason_from_failure_code(&self, failure_code: &FailureCode, htlc: &ClaimableHTLC) -> HTLCFailReason { + match failure_code { + FailureCode::TemporaryNodeFailure => HTLCFailReason::from_failure_code(*failure_code as u16), + FailureCode::RequiredNodeFeatureMissing => HTLCFailReason::from_failure_code(*failure_code as u16), + FailureCode::IncorrectOrUnknownPaymentDetails => { + let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec(); + htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height().to_be_bytes()); + HTLCFailReason::reason(*failure_code as u16, htlc_msat_height_data) + } + } + } + /// Gets an HTLC onion failure code and error data for an `UPDATE` error, given the error code /// that we want to return and a channel. /// /// This is for failures on the channel on which the HTLC was *received*, not failures /// forwarding - fn get_htlc_inbound_temp_fail_err_and_data(&self, desired_err_code: u16, chan: &Channel<::Signer>) -> (u16, Vec) { + fn get_htlc_inbound_temp_fail_err_and_data(&self, desired_err_code: u16, chan: &Channel<::Signer>) -> (u16, Vec) { // We can't be sure what SCID was used when relaying inbound towards us, so we have to // guess somewhat. If its a public channel, we figure best to just use the real SCID (as // we're not leaking that we have a channel with the counterparty), otherwise we try to use @@ -3508,7 +3672,7 @@ where /// Gets an HTLC onion failure code and error data for an `UPDATE` error, given the error code /// that we want to return and a channel. - fn get_htlc_temp_fail_err_and_data(&self, desired_err_code: u16, scid: u64, chan: &Channel<::Signer>) -> (u16, Vec) { + fn get_htlc_temp_fail_err_and_data(&self, desired_err_code: u16, scid: u64, chan: &Channel<::Signer>) -> (u16, Vec) { debug_assert_eq!(desired_err_code & 0x1000, 0x1000); if let Ok(upd) = self.get_channel_update_for_onion(scid, chan) { let mut enc = VecWriter(Vec::with_capacity(upd.serialized_length() + 6)); @@ -3561,15 +3725,17 @@ where /// Fails an HTLC backwards to the sender of it to us. /// Note that we do not assume that channels corresponding to failed HTLCs are still available. fn fail_htlc_backwards_internal(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) { - #[cfg(debug_assertions)] + #[cfg(any(feature = "_test_utils", test))] { - // Ensure that the `channel_state` and no peer state channel storage lock is not held - // when calling this function. + // Ensure that no peer state channel storage lock is not held when calling this + // function. // This ensures that future code doesn't introduce a lock_order requirement for - // `forward_htlcs` to be locked after the `channel_state` and `per_peer_state` locks, - // which calling this function with the locks aquired would. - assert!(self.channel_state.try_lock().is_ok()); - assert!(self.per_peer_state.try_write().is_ok()); + // `forward_htlcs` to be locked after the `per_peer_state` peer locks, which calling + // this function with any `per_peer_state` peer lock aquired would. + let per_peer_state = self.per_peer_state.read().unwrap(); + for (_, peer) in per_peer_state.iter() { + debug_assert!(peer.try_lock().is_ok()); + } } //TODO: There is a timing attack here where if a node fails an HTLC back to us they can @@ -3644,7 +3810,7 @@ where let mut receiver_node_id = self.our_network_pubkey; for htlc in sources.iter() { if htlc.prev_hop.phantom_shared_secret.is_some() { - let phantom_pubkey = self.keys_manager.get_node_id(Recipient::PhantomNode) + let phantom_pubkey = self.node_signer.get_node_id(Recipient::PhantomNode) .expect("Failed to get node_id for phantom node recipient"); receiver_node_id = phantom_pubkey; break; @@ -3684,7 +3850,6 @@ where let mut expected_amt_msat = None; let mut valid_mpp = true; let mut errs = Vec::new(); - let mut channel_state = Some(self.channel_state.lock().unwrap()); let mut per_peer_state = Some(self.per_peer_state.read().unwrap()); for htlc in sources.iter() { let (counterparty_node_id, chan_id) = match self.short_to_chan_info.read().unwrap().get(&htlc.prev_hop.short_channel_id) { @@ -3731,14 +3896,12 @@ where claimable_amt_msat += htlc.value; } if sources.is_empty() || expected_amt_msat.is_none() { - mem::drop(channel_state); mem::drop(per_peer_state); self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash); log_info!(self.logger, "Attempted to claim an incomplete payment which no longer had any available HTLCs!"); return; } if claimable_amt_msat != expected_amt_msat.unwrap() { - mem::drop(channel_state); mem::drop(per_peer_state); self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash); log_info!(self.logger, "Attempted to claim an incomplete payment, expected {} msat, had {} available to claim.", @@ -3747,9 +3910,8 @@ where } if valid_mpp { for htlc in sources.drain(..) { - if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); } if per_peer_state.is_none() { per_peer_state = Some(self.per_peer_state.read().unwrap()); } - if let Err((pk, err)) = self.claim_funds_from_hop(channel_state.take().unwrap(), per_peer_state.take().unwrap(), + if let Err((pk, err)) = self.claim_funds_from_hop(per_peer_state.take().unwrap(), htlc.prev_hop, payment_preimage, |_| Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash })) { @@ -3761,7 +3923,6 @@ where } } } - mem::drop(channel_state); mem::drop(per_peer_state); if !valid_mpp { for htlc in sources.drain(..) { @@ -3783,14 +3944,12 @@ where } fn claim_funds_from_hop) -> Option>(&self, - mut channel_state_lock: MutexGuard, - per_peer_state_lock: RwLockReadGuard::Signer>>>>, + per_peer_state_lock: RwLockReadGuard::Signer>>>>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage, completion_action: ComplFunc) -> Result<(), (PublicKey, MsgHandleErrInternal)> { //TODO: Delay the claimed_funds relaying just like we do outbound relay! let chan_id = prev_hop.outpoint.to_channel_id(); - let channel_state = &mut *channel_state_lock; let counterparty_node_id_opt = match self.short_to_chan_info.read().unwrap().get(&prev_hop.short_channel_id) { Some((cp_id, _dup_chan_id)) => Some(cp_id.clone()), @@ -3805,19 +3964,19 @@ where } else { (false, None) }; if found_channel { - if let hash_map::Entry::Occupied(mut chan) = peer_state_opt.as_mut().unwrap().channel_by_id.entry(chan_id) { + let peer_state = &mut *peer_state_opt.as_mut().unwrap(); + if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(chan_id) { let counterparty_node_id = chan.get().get_counterparty_node_id(); match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) { Ok(msgs_monitor_option) => { if let UpdateFulfillCommitFetch::NewClaim { msgs, htlc_value_msat, monitor_update } = msgs_monitor_option { - match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { + match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &monitor_update) { ChannelMonitorUpdateStatus::Completed => {}, e => { log_given_level!(self.logger, if e == ChannelMonitorUpdateStatus::PermanentFailure { Level::Error } else { Level::Debug }, "Failed to update channel monitor with preimage {:?}: {:?}", payment_preimage, e); let err = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(); - mem::drop(channel_state_lock); mem::drop(peer_state_opt); mem::drop(per_peer_state_lock); self.handle_monitor_update_completion_actions(completion_action(Some(htlc_value_msat))); @@ -3827,8 +3986,8 @@ where if let Some((msg, commitment_signed)) = msgs { log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}", log_bytes!(payment_preimage.0), log_bytes!(chan.get().channel_id())); - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: chan.get().get_counterparty_node_id(), + peer_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: counterparty_node_id, updates: msgs::CommitmentUpdate { update_add_htlcs: Vec::new(), update_fulfill_htlcs: vec![msg], @@ -3839,7 +3998,6 @@ where } }); } - mem::drop(channel_state_lock); mem::drop(peer_state_opt); mem::drop(per_peer_state_lock); self.handle_monitor_update_completion_actions(completion_action(Some(htlc_value_msat))); @@ -3849,7 +4007,7 @@ where } }, Err((e, monitor_update)) => { - match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { + match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &monitor_update) { ChannelMonitorUpdateStatus::Completed => {}, e => { // TODO: This needs to be handled somehow - if we receive a monitor update @@ -3865,7 +4023,6 @@ where if drop { chan.remove_entry(); } - mem::drop(channel_state_lock); mem::drop(peer_state_opt); mem::drop(per_peer_state_lock); self.handle_monitor_update_completion_actions(completion_action(None)); @@ -3886,7 +4043,7 @@ where }; // We update the ChannelMonitor on the backward link, after // receiving an `update_fulfill_htlc` from the forward link. - let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, preimage_update); + let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, &preimage_update); if update_res != ChannelMonitorUpdateStatus::Completed { // TODO: This needs to be handled somehow - if we receive a monitor update // with a preimage we *must* somehow manage to propagate it to the upstream @@ -3895,7 +4052,6 @@ where log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}", payment_preimage, update_res); } - mem::drop(channel_state_lock); mem::drop(peer_state_opt); mem::drop(per_peer_state_lock); // Note that we do process the completion action here. This totally could be a @@ -3912,15 +4068,14 @@ where self.pending_outbound_payments.finalize_claims(sources, &self.pending_events); } - fn claim_funds_internal(&self, channel_state_lock: MutexGuard, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option, from_onchain: bool, next_channel_id: [u8; 32]) { + fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option, from_onchain: bool, next_channel_id: [u8; 32]) { match source { HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => { - mem::drop(channel_state_lock); self.pending_outbound_payments.claim_htlc(payment_id, payment_preimage, session_priv, path, from_onchain, &self.pending_events, &self.logger); }, HTLCSource::PreviousHopData(hop_data) => { let prev_outpoint = hop_data.outpoint; - let res = self.claim_funds_from_hop(channel_state_lock, self.per_peer_state.read().unwrap(), hop_data, payment_preimage, + let res = self.claim_funds_from_hop(self.per_peer_state.read().unwrap(), hop_data, payment_preimage, |htlc_claim_value_msat| { if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat { let fee_earned_msat = if let Some(claimed_htlc_value) = htlc_claim_value_msat { @@ -3972,7 +4127,7 @@ where /// Handles a channel reentering a functional state, either due to reconnect or a monitor /// update completion. fn handle_channel_resumption(&self, pending_msg_events: &mut Vec, - channel: &mut Channel<::Signer>, raa: Option, + channel: &mut Channel<::Signer>, raa: Option, commitment_update: Option, order: RAACommitmentOrder, pending_forwards: Vec<(PendingHTLCInfo, u64)>, funding_broadcastable: Option, channel_ready: Option, announcement_sigs: Option) @@ -4037,8 +4192,6 @@ where let htlc_forwards; let (mut pending_failures, finalized_claims, counterparty_node_id) = { - let mut channel_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_lock; let counterparty_node_id = match counterparty_node_id { Some(cp_id) => cp_id.clone(), None => { @@ -4053,21 +4206,21 @@ where }; let per_peer_state = self.per_peer_state.read().unwrap(); let mut peer_state_lock; + let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id); + if let None = peer_state_mutex_opt { return } + peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); + let peer_state = &mut *peer_state_lock; let mut channel = { - if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { - peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; - match peer_state.channel_by_id.entry(funding_txo.to_channel_id()){ - hash_map::Entry::Occupied(chan) => chan, - hash_map::Entry::Vacant(_) => return, - } - } else { return } + match peer_state.channel_by_id.entry(funding_txo.to_channel_id()){ + hash_map::Entry::Occupied(chan) => chan, + hash_map::Entry::Vacant(_) => return, + } }; if !channel.get().is_awaiting_monitor_update() || channel.get().get_latest_monitor_update_id() != highest_applied_update_id { return; } - let updates = channel.get_mut().monitor_updating_restored(&self.logger, self.get_our_node_id(), self.genesis_hash, self.best_block.read().unwrap().height()); + let updates = channel.get_mut().monitor_updating_restored(&self.logger, &self.node_signer, self.genesis_hash, &self.default_configuration, self.best_block.read().unwrap().height()); let channel_update = if updates.channel_ready.is_some() && channel.get().is_usable() { // We only send a channel_update in the case where we are just now sending a // channel_ready and the channel is in a usable state. We may re-send a @@ -4081,9 +4234,9 @@ where }) } else { None } } else { None }; - htlc_forwards = self.handle_channel_resumption(&mut channel_state.pending_msg_events, channel.get_mut(), updates.raa, updates.commitment_update, updates.order, updates.accepted_htlcs, updates.funding_broadcastable, updates.channel_ready, updates.announcement_sigs); + htlc_forwards = self.handle_channel_resumption(&mut peer_state.pending_msg_events, channel.get_mut(), updates.raa, updates.commitment_update, updates.order, updates.accepted_htlcs, updates.funding_broadcastable, updates.channel_ready, updates.announcement_sigs); if let Some(upd) = channel_update { - channel_state.pending_msg_events.push(upd); + peer_state.pending_msg_events.push(upd); } (updates.failed_htlcs, updates.finalized_claimed_htlcs, counterparty_node_id) @@ -4143,50 +4296,45 @@ where fn do_accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, accept_0conf: bool, user_channel_id: u128) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; let per_peer_state = self.per_peer_state.read().unwrap(); - if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; - match peer_state.channel_by_id.entry(temporary_channel_id.clone()) { - hash_map::Entry::Occupied(mut channel) => { - if !channel.get().inbound_is_awaiting_accept() { - return Err(APIError::APIMisuseError { err: "The channel isn't currently awaiting to be accepted.".to_owned() }); - } - if *counterparty_node_id != channel.get().get_counterparty_node_id() { - return Err(APIError::APIMisuseError { err: "The passed counterparty_node_id doesn't match the channel's counterparty node_id".to_owned() }); - } - if accept_0conf { - channel.get_mut().set_0conf(); - } else if channel.get().get_channel_type().requires_zero_conf() { - let send_msg_err_event = events::MessageSendEvent::HandleError { - node_id: channel.get().get_counterparty_node_id(), - action: msgs::ErrorAction::SendErrorMessage{ - msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "No zero confirmation channels accepted".to_owned(), } - } - }; - channel_state.pending_msg_events.push(send_msg_err_event); - let _ = remove_channel!(self, channel); - return Err(APIError::APIMisuseError { err: "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned() }); - } - - channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { - node_id: channel.get().get_counterparty_node_id(), - msg: channel.get_mut().accept_inbound_channel(user_channel_id), - }); + let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); + if let None = peer_state_mutex_opt { + return Err(APIError::APIMisuseError { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) }); + } + let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(temporary_channel_id.clone()) { + hash_map::Entry::Occupied(mut channel) => { + if !channel.get().inbound_is_awaiting_accept() { + return Err(APIError::APIMisuseError { err: "The channel isn't currently awaiting to be accepted.".to_owned() }); } - hash_map::Entry::Vacant(_) => { - return Err(APIError::ChannelUnavailable { err: "Can't accept a channel that doesn't exist".to_owned() }); + if accept_0conf { + channel.get_mut().set_0conf(); + } else if channel.get().get_channel_type().requires_zero_conf() { + let send_msg_err_event = events::MessageSendEvent::HandleError { + node_id: channel.get().get_counterparty_node_id(), + action: msgs::ErrorAction::SendErrorMessage{ + msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "No zero confirmation channels accepted".to_owned(), } + } + }; + peer_state.pending_msg_events.push(send_msg_err_event); + let _ = remove_channel!(self, channel); + return Err(APIError::APIMisuseError { err: "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned() }); } + + peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { + node_id: channel.get().get_counterparty_node_id(), + msg: channel.get_mut().accept_inbound_channel(user_channel_id), + }); + } + hash_map::Entry::Vacant(_) => { + return Err(APIError::ChannelUnavailable { err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*temporary_channel_id), counterparty_node_id) }); } - } else { - return Err(APIError::APIMisuseError { err: format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id) }); } Ok(()) } - fn internal_open_channel(&self, counterparty_node_id: &PublicKey, their_features: InitFeatures, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> { + fn internal_open_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> { if msg.chain_hash != self.genesis_hash { return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), msg.temporary_channel_id.clone())); } @@ -4196,12 +4344,19 @@ where } let mut random_bytes = [0u8; 16]; - random_bytes.copy_from_slice(&self.keys_manager.get_secure_random_bytes()[..16]); + random_bytes.copy_from_slice(&self.entropy_source.get_secure_random_bytes()[..16]); let user_channel_id = u128::from_be_bytes(random_bytes); let outbound_scid_alias = self.create_and_insert_outbound_scid_alias(); - let mut channel = match Channel::new_from_req(&self.fee_estimator, &self.keys_manager, - counterparty_node_id.clone(), &their_features, msg, user_channel_id, &self.default_configuration, + let per_peer_state = self.per_peer_state.read().unwrap(); + let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); + if let None = peer_state_mutex_opt { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id.clone())) + } + let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); + let peer_state = &mut *peer_state_lock; + let mut channel = match Channel::new_from_req(&self.fee_estimator, &self.entropy_source, &self.signer_provider, + counterparty_node_id.clone(), &self.channel_type_features(), &peer_state.latest_features, msg, user_channel_id, &self.default_configuration, self.best_block.read().unwrap().height(), &self.logger, outbound_scid_alias) { Err(e) => { @@ -4210,66 +4365,54 @@ where }, Ok(res) => res }; - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; - let per_peer_state = self.per_peer_state.read().unwrap(); - if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; - match peer_state.channel_by_id.entry(channel.channel_id()) { - hash_map::Entry::Occupied(_) => { - self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias); - return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision for the same peer!".to_owned(), msg.temporary_channel_id.clone())) - }, - hash_map::Entry::Vacant(entry) => { - if !self.default_configuration.manually_accept_inbound_channels { - if channel.get_channel_type().requires_zero_conf() { - return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), msg.temporary_channel_id.clone())); - } - channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { - node_id: counterparty_node_id.clone(), - msg: channel.accept_inbound_channel(user_channel_id), - }); - } else { - let mut pending_events = self.pending_events.lock().unwrap(); - pending_events.push( - events::Event::OpenChannelRequest { - temporary_channel_id: msg.temporary_channel_id.clone(), - counterparty_node_id: counterparty_node_id.clone(), - funding_satoshis: msg.funding_satoshis, - push_msat: msg.push_msat, - channel_type: channel.get_channel_type().clone(), - } - ); + match peer_state.channel_by_id.entry(channel.channel_id()) { + hash_map::Entry::Occupied(_) => { + self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias); + return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision for the same peer!".to_owned(), msg.temporary_channel_id.clone())) + }, + hash_map::Entry::Vacant(entry) => { + if !self.default_configuration.manually_accept_inbound_channels { + if channel.get_channel_type().requires_zero_conf() { + return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), msg.temporary_channel_id.clone())); } - - entry.insert(channel); + peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { + node_id: counterparty_node_id.clone(), + msg: channel.accept_inbound_channel(user_channel_id), + }); + } else { + let mut pending_events = self.pending_events.lock().unwrap(); + pending_events.push( + events::Event::OpenChannelRequest { + temporary_channel_id: msg.temporary_channel_id.clone(), + counterparty_node_id: counterparty_node_id.clone(), + funding_satoshis: msg.funding_satoshis, + push_msat: msg.push_msat, + channel_type: channel.get_channel_type().clone(), + } + ); } + + entry.insert(channel); } - } else { - return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id.clone())) } Ok(()) } - fn internal_accept_channel(&self, counterparty_node_id: &PublicKey, their_features: InitFeatures, msg: &msgs::AcceptChannel) -> Result<(), MsgHandleErrInternal> { + fn internal_accept_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannel) -> Result<(), MsgHandleErrInternal> { let (value, output_script, user_id) = { let per_peer_state = self.per_peer_state.read().unwrap(); - if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; - match peer_state.channel_by_id.entry(msg.temporary_channel_id) { - hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.temporary_channel_id)); - } - try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &their_features), chan); - (chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id()) - }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id)) - } - } else { - return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)) + let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); + if let None = peer_state_mutex_opt { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id)) + } + let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(msg.temporary_channel_id) { + hash_map::Entry::Occupied(mut chan) => { + try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &peer_state.latest_features), chan); + (chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id()) + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)) } }; let mut pending_events = self.pending_events.lock().unwrap(); @@ -4284,25 +4427,20 @@ where } fn internal_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; let per_peer_state = self.per_peer_state.read().unwrap(); + let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); + if let None = peer_state_mutex_opt { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id)) + } let ((funding_msg, monitor, mut channel_ready), mut chan) = { let best_block = *self.best_block.read().unwrap(); - if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; - match peer_state.channel_by_id.entry(msg.temporary_channel_id) { - hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.temporary_channel_id)); - } - (try_chan_entry!(self, chan.get_mut().funding_created(msg, best_block, &self.keys_manager, &self.logger), chan), chan.remove()) - }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id)) - } - } else { - return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)) + let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(msg.temporary_channel_id) { + hash_map::Entry::Occupied(mut chan) => { + (try_chan_entry!(self, chan.get_mut().funding_created(msg, best_block, &self.signer_provider, &self.logger), chan), chan.remove()) + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)) } }; // Because we have exclusive ownership of the channel here we can release the peer_state @@ -4333,8 +4471,7 @@ where // It's safe to unwrap as we've held the `per_peer_state` read lock since checking that the // peer exists, despite the inner PeerState potentially having no channels after removing // the channel above. - let peer_state_mutex = per_peer_state.get(counterparty_node_id).unwrap(); - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(funding_msg.channel_id) { hash_map::Entry::Occupied(_) => { @@ -4352,12 +4489,12 @@ where i_e.insert(chan.get_counterparty_node_id()); } } - channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned { node_id: counterparty_node_id.clone(), msg: funding_msg, }); if let Some(msg) = channel_ready { - send_channel_ready!(self, channel_state.pending_msg_events, chan, msg); + send_channel_ready!(self, peer_state.pending_msg_events, chan, msg); } e.insert(chan); } @@ -4368,45 +4505,41 @@ where fn internal_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> { let funding_tx = { let best_block = *self.best_block.read().unwrap(); - let mut channel_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_lock; let per_peer_state = self.per_peer_state.read().unwrap(); - if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; - match peer_state.channel_by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } - let (monitor, funding_tx, channel_ready) = match chan.get_mut().funding_signed(&msg, best_block, &self.keys_manager, &self.logger) { - Ok(update) => update, - Err(e) => try_chan_entry!(self, Err(e), chan), - }; - match self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) { - ChannelMonitorUpdateStatus::Completed => {}, - e => { - let mut res = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::RevokeAndACKFirst, channel_ready.is_some(), OPTIONALLY_RESEND_FUNDING_LOCKED); - if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res { - // We weren't able to watch the channel to begin with, so no updates should be made on - // it. Previously, full_stack_target found an (unreachable) panic when the - // monitor update contained within `shutdown_finish` was applied. - if let Some((ref mut shutdown_finish, _)) = shutdown_finish { - shutdown_finish.0.take(); - } + let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); + if let None = peer_state_mutex_opt { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)) + } + + let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + let (monitor, funding_tx, channel_ready) = match chan.get_mut().funding_signed(&msg, best_block, &self.signer_provider, &self.logger) { + Ok(update) => update, + Err(e) => try_chan_entry!(self, Err(e), chan), + }; + match self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) { + ChannelMonitorUpdateStatus::Completed => {}, + e => { + let mut res = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::RevokeAndACKFirst, channel_ready.is_some(), OPTIONALLY_RESEND_FUNDING_LOCKED); + if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res { + // We weren't able to watch the channel to begin with, so no updates should be made on + // it. Previously, full_stack_target found an (unreachable) panic when the + // monitor update contained within `shutdown_finish` was applied. + if let Some((ref mut shutdown_finish, _)) = shutdown_finish { + shutdown_finish.0.take(); } - return res - }, - } - if let Some(msg) = channel_ready { - send_channel_ready!(self, channel_state.pending_msg_events, chan.get(), msg); - } - funding_tx - }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) - } - } else { - return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + } + return res + }, + } + if let Some(msg) = channel_ready { + send_channel_ready!(self, peer_state.pending_msg_events, chan.get(), msg); + } + funding_tx + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } }; log_info!(self.logger, "Broadcasting funding transaction with txid {}", funding_tx.txid()); @@ -4415,99 +4548,89 @@ where } fn internal_channel_ready(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReady) -> Result<(), MsgHandleErrInternal> { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; let per_peer_state = self.per_peer_state.read().unwrap(); - if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; - match peer_state.channel_by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } - let announcement_sigs_opt = try_chan_entry!(self, chan.get_mut().channel_ready(&msg, self.get_our_node_id(), - self.genesis_hash.clone(), &self.best_block.read().unwrap(), &self.logger), chan); - if let Some(announcement_sigs) = announcement_sigs_opt { - log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(chan.get().channel_id())); - channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { + let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); + if let None = peer_state_mutex_opt { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)); + } + let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + let announcement_sigs_opt = try_chan_entry!(self, chan.get_mut().channel_ready(&msg, &self.node_signer, + self.genesis_hash.clone(), &self.default_configuration, &self.best_block.read().unwrap(), &self.logger), chan); + if let Some(announcement_sigs) = announcement_sigs_opt { + log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(chan.get().channel_id())); + peer_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { + node_id: counterparty_node_id.clone(), + msg: announcement_sigs, + }); + } else if chan.get().is_usable() { + // If we're sending an announcement_signatures, we'll send the (public) + // channel_update after sending a channel_announcement when we receive our + // counterparty's announcement_signatures. Thus, we only bother to send a + // channel_update here if the channel is not public, i.e. we're not sending an + // announcement_signatures. + log_trace!(self.logger, "Sending private initial channel_update for our counterparty on channel {}", log_bytes!(chan.get().channel_id())); + if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { node_id: counterparty_node_id.clone(), - msg: announcement_sigs, + msg, }); - } else if chan.get().is_usable() { - // If we're sending an announcement_signatures, we'll send the (public) - // channel_update after sending a channel_announcement when we receive our - // counterparty's announcement_signatures. Thus, we only bother to send a - // channel_update here if the channel is not public, i.e. we're not sending an - // announcement_signatures. - log_trace!(self.logger, "Sending private initial channel_update for our counterparty on channel {}", log_bytes!(chan.get().channel_id())); - if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { - node_id: counterparty_node_id.clone(), - msg, - }); - } } + } - emit_channel_ready_event!(self, chan.get_mut()); + emit_channel_ready_event!(self, chan.get_mut()); - Ok(()) - }, - hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) - } - } else { - Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + Ok(()) + }, + hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } } - fn internal_shutdown(&self, counterparty_node_id: &PublicKey, their_features: &InitFeatures, msg: &msgs::Shutdown) -> Result<(), MsgHandleErrInternal> { + fn internal_shutdown(&self, counterparty_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(), MsgHandleErrInternal> { let mut dropped_htlcs: Vec<(HTLCSource, PaymentHash)>; let result: Result<(), _> = loop { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; let per_peer_state = self.per_peer_state.read().unwrap(); - if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; - match peer_state.channel_by_id.entry(msg.channel_id.clone()) { - hash_map::Entry::Occupied(mut chan_entry) => { - if chan_entry.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } + let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); + if let None = peer_state_mutex_opt { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)) + } + let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(msg.channel_id.clone()) { + hash_map::Entry::Occupied(mut chan_entry) => { - if !chan_entry.get().received_shutdown() { - log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.", - log_bytes!(msg.channel_id), - if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" }); - } + if !chan_entry.get().received_shutdown() { + log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.", + log_bytes!(msg.channel_id), + if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" }); + } - let (shutdown, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.keys_manager, &their_features, &msg), chan_entry); - dropped_htlcs = htlcs; - - // Update the monitor with the shutdown script if necessary. - if let Some(monitor_update) = monitor_update { - let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update); - let (result, is_permanent) = - handle_monitor_update_res!(self, update_res, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE); - if is_permanent { - remove_channel!(self, chan_entry); - break result; - } + let (shutdown, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_entry); + dropped_htlcs = htlcs; + + // Update the monitor with the shutdown script if necessary. + if let Some(monitor_update) = monitor_update { + let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), &monitor_update); + let (result, is_permanent) = + handle_monitor_update_res!(self, update_res, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE); + if is_permanent { + remove_channel!(self, chan_entry); + break result; } + } - if let Some(msg) = shutdown { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { - node_id: *counterparty_node_id, - msg, - }); - } + if let Some(msg) = shutdown { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { + node_id: *counterparty_node_id, + msg, + }); + } - break Ok(()); - }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) - } - } else { - return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + break Ok(()); + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } }; for htlc_source in dropped_htlcs.drain(..) { @@ -4521,38 +4644,33 @@ where } fn internal_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), MsgHandleErrInternal> { + let per_peer_state = self.per_peer_state.read().unwrap(); + let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); + if let None = peer_state_mutex_opt { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)) + } let (tx, chan_option) = { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; - let per_peer_state = self.per_peer_state.read().unwrap(); - if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; - match peer_state.channel_by_id.entry(msg.channel_id.clone()) { - hash_map::Entry::Occupied(mut chan_entry) => { - if chan_entry.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } - let (closing_signed, tx) = try_chan_entry!(self, chan_entry.get_mut().closing_signed(&self.fee_estimator, &msg), chan_entry); - if let Some(msg) = closing_signed { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { - node_id: counterparty_node_id.clone(), - msg, - }); - } - if tx.is_some() { - // We're done with this channel, we've got a signed closing transaction and - // will send the closing_signed back to the remote peer upon return. This - // also implies there are no pending HTLCs left on the channel, so we can - // fully delete it from tracking (the channel monitor is still around to - // watch for old state broadcasts)! - (tx, Some(remove_channel!(self, chan_entry))) - } else { (tx, None) } - }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) - } - } else { - return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(msg.channel_id.clone()) { + hash_map::Entry::Occupied(mut chan_entry) => { + let (closing_signed, tx) = try_chan_entry!(self, chan_entry.get_mut().closing_signed(&self.fee_estimator, &msg), chan_entry); + if let Some(msg) = closing_signed { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { + node_id: counterparty_node_id.clone(), + msg, + }); + } + if tx.is_some() { + // We're done with this channel, we've got a signed closing transaction and + // will send the closing_signed back to the remote peer upon return. This + // also implies there are no pending HTLCs left on the channel, so we can + // fully delete it from tracking (the channel monitor is still around to + // watch for old state broadcasts)! + (tx, Some(remove_channel!(self, chan_entry))) + } else { (tx, None) } + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } }; if let Some(broadcast_tx) = tx { @@ -4561,8 +4679,9 @@ where } if let Some(chan) = chan_option { if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { - let mut channel_state = self.channel_state.lock().unwrap(); - channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); + let peer_state = &mut *peer_state_lock; + peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); } @@ -4583,166 +4702,148 @@ where let pending_forward_info = self.decode_update_add_htlc_onion(msg); let per_peer_state = self.per_peer_state.read().unwrap(); - if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; - match peer_state.channel_by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); + let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); + if let None = peer_state_mutex_opt { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)) + } + let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + + let create_pending_htlc_status = |chan: &Channel<::Signer>, pending_forward_info: PendingHTLCStatus, error_code: u16| { + // If the update_add is completely bogus, the call will Err and we will close, + // but if we've sent a shutdown and they haven't acknowledged it yet, we just + // want to reject the new HTLC and fail it backwards instead of forwarding. + match pending_forward_info { + PendingHTLCStatus::Forward(PendingHTLCInfo { ref incoming_shared_secret, .. }) => { + let reason = if (error_code & 0x1000) != 0 { + let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan); + HTLCFailReason::reason(real_code, error_data) + } else { + HTLCFailReason::from_failure_code(error_code) + }.get_encrypted_failure_packet(incoming_shared_secret, &None); + let msg = msgs::UpdateFailHTLC { + channel_id: msg.channel_id, + htlc_id: msg.htlc_id, + reason + }; + PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg)) + }, + _ => pending_forward_info } - - let create_pending_htlc_status = |chan: &Channel<::Signer>, pending_forward_info: PendingHTLCStatus, error_code: u16| { - // If the update_add is completely bogus, the call will Err and we will close, - // but if we've sent a shutdown and they haven't acknowledged it yet, we just - // want to reject the new HTLC and fail it backwards instead of forwarding. - match pending_forward_info { - PendingHTLCStatus::Forward(PendingHTLCInfo { ref incoming_shared_secret, .. }) => { - let reason = if (error_code & 0x1000) != 0 { - let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan); - HTLCFailReason::reason(real_code, error_data) - } else { - HTLCFailReason::from_failure_code(error_code) - }.get_encrypted_failure_packet(incoming_shared_secret, &None); - let msg = msgs::UpdateFailHTLC { - channel_id: msg.channel_id, - htlc_id: msg.htlc_id, - reason - }; - PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg)) - }, - _ => pending_forward_info - } - }; - try_chan_entry!(self, chan.get_mut().update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.logger), chan); - }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) - } - } else { - return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + }; + try_chan_entry!(self, chan.get_mut().update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.logger), chan); + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } Ok(()) } fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> { - let channel_lock = self.channel_state.lock().unwrap(); let (htlc_source, forwarded_htlc_value) = { let per_peer_state = self.per_peer_state.read().unwrap(); - if let None = per_peer_state.get(counterparty_node_id) { - return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)); + let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); + if let None = peer_state_mutex_opt { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)); } - let peer_state_mutex = per_peer_state.get(counterparty_node_id).unwrap(); - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } try_chan_entry!(self, chan.get_mut().update_fulfill_htlc(&msg), chan) }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } }; - self.claim_funds_internal(channel_lock, htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, msg.channel_id); + self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, msg.channel_id); Ok(()) } fn internal_update_fail_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), MsgHandleErrInternal> { let per_peer_state = self.per_peer_state.read().unwrap(); - if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; - match peer_state.channel_by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } - try_chan_entry!(self, chan.get_mut().update_fail_htlc(&msg, HTLCFailReason::from_msg(msg)), chan); - }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) - } - } else { - return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)); + let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); + if let None = peer_state_mutex_opt { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)); + } + let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + try_chan_entry!(self, chan.get_mut().update_fail_htlc(&msg, HTLCFailReason::from_msg(msg)), chan); + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } Ok(()) } fn internal_update_fail_malformed_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), MsgHandleErrInternal> { let per_peer_state = self.per_peer_state.read().unwrap(); - if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; - match peer_state.channel_by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } - if (msg.failure_code & 0x8000) == 0 { - let chan_err: ChannelError = ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set".to_owned()); - try_chan_entry!(self, Err(chan_err), chan); - } - try_chan_entry!(self, chan.get_mut().update_fail_malformed_htlc(&msg, HTLCFailReason::reason(msg.failure_code, msg.sha256_of_onion.to_vec())), chan); - Ok(()) - }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) - } - } else { - return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); + if let None = peer_state_mutex_opt { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)) + } + let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if (msg.failure_code & 0x8000) == 0 { + let chan_err: ChannelError = ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set".to_owned()); + try_chan_entry!(self, Err(chan_err), chan); + } + try_chan_entry!(self, chan.get_mut().update_fail_malformed_htlc(&msg, HTLCFailReason::reason(msg.failure_code, msg.sha256_of_onion.to_vec())), chan); + Ok(()) + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } } fn internal_commitment_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(), MsgHandleErrInternal> { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; let per_peer_state = self.per_peer_state.read().unwrap(); - if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; - match peer_state.channel_by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } - let (revoke_and_ack, commitment_signed, monitor_update) = - match chan.get_mut().commitment_signed(&msg, &self.logger) { - Err((None, e)) => try_chan_entry!(self, Err(e), chan), - Err((Some(update), e)) => { - assert!(chan.get().is_awaiting_monitor_update()); - let _ = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), update); - try_chan_entry!(self, Err(e), chan); - unreachable!(); - }, - Ok(res) => res - }; - let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update); - if let Err(e) = handle_monitor_update_res!(self, update_res, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some()) { - return Err(e); - } + let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); + if let None = peer_state_mutex_opt { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)) + } + let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + let (revoke_and_ack, commitment_signed, monitor_update) = + match chan.get_mut().commitment_signed(&msg, &self.logger) { + Err((None, e)) => try_chan_entry!(self, Err(e), chan), + Err((Some(update), e)) => { + assert!(chan.get().is_awaiting_monitor_update()); + let _ = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &update); + try_chan_entry!(self, Err(e), chan); + unreachable!(); + }, + Ok(res) => res + }; + let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &monitor_update); + if let Err(e) = handle_monitor_update_res!(self, update_res, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some()) { + return Err(e); + } - channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK { + node_id: counterparty_node_id.clone(), + msg: revoke_and_ack, + }); + if let Some(msg) = commitment_signed { + peer_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { node_id: counterparty_node_id.clone(), - msg: revoke_and_ack, + updates: msgs::CommitmentUpdate { + update_add_htlcs: Vec::new(), + update_fulfill_htlcs: Vec::new(), + update_fail_htlcs: Vec::new(), + update_fail_malformed_htlcs: Vec::new(), + update_fee: None, + commitment_signed: msg, + }, }); - if let Some(msg) = commitment_signed { - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: counterparty_node_id.clone(), - updates: msgs::CommitmentUpdate { - update_add_htlcs: Vec::new(), - update_fulfill_htlcs: Vec::new(), - update_fail_htlcs: Vec::new(), - update_fail_malformed_htlcs: Vec::new(), - update_fee: None, - commitment_signed: msg, - }, - }); - } - Ok(()) - }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) - } - } else { - return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + } + Ok(()) + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } } @@ -4841,56 +4942,51 @@ where fn internal_revoke_and_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), MsgHandleErrInternal> { let mut htlcs_to_fail = Vec::new(); let res = loop { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; let per_peer_state = self.per_peer_state.read().unwrap(); - if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; - match peer_state.channel_by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - break Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } - let was_paused_for_mon_update = chan.get().is_awaiting_monitor_update(); - let raa_updates = break_chan_entry!(self, - chan.get_mut().revoke_and_ack(&msg, &self.logger), chan); - htlcs_to_fail = raa_updates.holding_cell_failed_htlcs; - let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), raa_updates.monitor_update); - if was_paused_for_mon_update { - assert!(update_res != ChannelMonitorUpdateStatus::Completed); - assert!(raa_updates.commitment_update.is_none()); - assert!(raa_updates.accepted_htlcs.is_empty()); - assert!(raa_updates.failed_htlcs.is_empty()); - assert!(raa_updates.finalized_claimed_htlcs.is_empty()); - break Err(MsgHandleErrInternal::ignore_no_close("Existing pending monitor update prevented responses to RAA".to_owned())); - } - if update_res != ChannelMonitorUpdateStatus::Completed { - if let Err(e) = handle_monitor_update_res!(self, update_res, chan, - RAACommitmentOrder::CommitmentFirst, false, - raa_updates.commitment_update.is_some(), false, - raa_updates.accepted_htlcs, raa_updates.failed_htlcs, - raa_updates.finalized_claimed_htlcs) { - break Err(e); - } else { unreachable!(); } - } - if let Some(updates) = raa_updates.commitment_update { - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: counterparty_node_id.clone(), - updates, - }); - } - break Ok((raa_updates.accepted_htlcs, raa_updates.failed_htlcs, - raa_updates.finalized_claimed_htlcs, - chan.get().get_short_channel_id() - .unwrap_or(chan.get().outbound_scid_alias()), - chan.get().get_funding_txo().unwrap(), - chan.get().get_user_id())) - }, - hash_map::Entry::Vacant(_) => break Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) - } - } else { - break Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); + if let None = peer_state_mutex_opt { + break Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)) + } + let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + let was_paused_for_mon_update = chan.get().is_awaiting_monitor_update(); + let raa_updates = break_chan_entry!(self, + chan.get_mut().revoke_and_ack(&msg, &self.logger), chan); + htlcs_to_fail = raa_updates.holding_cell_failed_htlcs; + let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &raa_updates.monitor_update); + if was_paused_for_mon_update { + assert!(update_res != ChannelMonitorUpdateStatus::Completed); + assert!(raa_updates.commitment_update.is_none()); + assert!(raa_updates.accepted_htlcs.is_empty()); + assert!(raa_updates.failed_htlcs.is_empty()); + assert!(raa_updates.finalized_claimed_htlcs.is_empty()); + break Err(MsgHandleErrInternal::ignore_no_close("Existing pending monitor update prevented responses to RAA".to_owned())); + } + if update_res != ChannelMonitorUpdateStatus::Completed { + if let Err(e) = handle_monitor_update_res!(self, update_res, chan, + RAACommitmentOrder::CommitmentFirst, false, + raa_updates.commitment_update.is_some(), false, + raa_updates.accepted_htlcs, raa_updates.failed_htlcs, + raa_updates.finalized_claimed_htlcs) { + break Err(e); + } else { unreachable!(); } + } + if let Some(updates) = raa_updates.commitment_update { + peer_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: counterparty_node_id.clone(), + updates, + }); + } + break Ok((raa_updates.accepted_htlcs, raa_updates.failed_htlcs, + raa_updates.finalized_claimed_htlcs, + chan.get().get_short_channel_id() + .unwrap_or(chan.get().outbound_scid_alias()), + chan.get().get_funding_txo().unwrap(), + chan.get().get_user_id())) + }, + hash_map::Entry::Vacant(_) => break Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } }; self.fail_holding_cell_htlcs(htlcs_to_fail, msg.channel_id, counterparty_node_id); @@ -4912,52 +5008,46 @@ where fn internal_update_fee(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), MsgHandleErrInternal> { let per_peer_state = self.per_peer_state.read().unwrap(); - if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; - match peer_state.channel_by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } - try_chan_entry!(self, chan.get_mut().update_fee(&self.fee_estimator, &msg, &self.logger), chan); - }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) - } - } else { - return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)); + let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); + if let None = peer_state_mutex_opt { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)); + } + let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + try_chan_entry!(self, chan.get_mut().update_fee(&self.fee_estimator, &msg, &self.logger), chan); + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } Ok(()) } fn internal_announcement_signatures(&self, counterparty_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), MsgHandleErrInternal> { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; let per_peer_state = self.per_peer_state.read().unwrap(); - if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; - match peer_state.channel_by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } - if !chan.get().is_usable() { - return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it".to_owned(), action: msgs::ErrorAction::IgnoreError})); - } + let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); + if let None = peer_state_mutex_opt { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)); + } + let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if !chan.get().is_usable() { + return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it".to_owned(), action: msgs::ErrorAction::IgnoreError})); + } - channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement { - msg: try_chan_entry!(self, chan.get_mut().announcement_signatures( - self.get_our_node_id(), self.genesis_hash.clone(), self.best_block.read().unwrap().height(), msg), chan), - // Note that announcement_signatures fails if the channel cannot be announced, - // so get_channel_update_for_broadcast will never fail by the time we get here. - update_msg: self.get_channel_update_for_broadcast(chan.get()).unwrap(), - }); - }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) - } - } else { - return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)); + peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement { + msg: try_chan_entry!(self, chan.get_mut().announcement_signatures( + &self.node_signer, self.genesis_hash.clone(), self.best_block.read().unwrap().height(), + msg, &self.default_configuration + ), chan), + // Note that announcement_signatures fails if the channel cannot be announced, + // so get_channel_update_for_broadcast will never fail by the time we get here. + update_msg: Some(self.get_channel_update_for_broadcast(chan.get()).unwrap()), + }); + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } Ok(()) } @@ -4972,33 +5062,33 @@ where } }; let per_peer_state = self.per_peer_state.read().unwrap(); - if let Some(peer_state_mutex) = per_peer_state.get(&chan_counterparty_node_id) { - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; - match peer_state.channel_by_id.entry(chan_id) { - hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - if chan.get().should_announce() { - // If the announcement is about a channel of ours which is public, some - // other peer may simply be forwarding all its gossip to us. Don't provide - // a scary-looking error message and return Ok instead. - return Ok(NotifyOption::SkipPersist); - } - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id)); - } - let were_node_one = self.get_our_node_id().serialize()[..] < chan.get().get_counterparty_node_id().serialize()[..]; - let msg_from_node_one = msg.contents.flags & 1 == 0; - if were_node_one == msg_from_node_one { + let peer_state_mutex_opt = per_peer_state.get(&chan_counterparty_node_id); + if let None = peer_state_mutex_opt { + return Ok(NotifyOption::SkipPersist) + } + let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(chan_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_counterparty_node_id() != *counterparty_node_id { + if chan.get().should_announce() { + // If the announcement is about a channel of ours which is public, some + // other peer may simply be forwarding all its gossip to us. Don't provide + // a scary-looking error message and return Ok instead. return Ok(NotifyOption::SkipPersist); - } else { - log_debug!(self.logger, "Received channel_update for channel {}.", log_bytes!(chan_id)); - try_chan_entry!(self, chan.get_mut().channel_update(&msg), chan); } - }, - hash_map::Entry::Vacant(_) => return Ok(NotifyOption::SkipPersist) - } - } else { - return Ok(NotifyOption::SkipPersist) + return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id)); + } + let were_node_one = self.get_our_node_id().serialize()[..] < chan.get().get_counterparty_node_id().serialize()[..]; + let msg_from_node_one = msg.contents.flags & 1 == 0; + if were_node_one == msg_from_node_one { + return Ok(NotifyOption::SkipPersist); + } else { + log_debug!(self.logger, "Received channel_update for channel {}.", log_bytes!(chan_id)); + try_chan_entry!(self, chan.get_mut().channel_update(&msg), chan); + } + }, + hash_map::Entry::Vacant(_) => return Ok(NotifyOption::SkipPersist) } Ok(NotifyOption::DoPersist) } @@ -5006,55 +5096,50 @@ where fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> { let htlc_forwards; let need_lnd_workaround = { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; let per_peer_state = self.per_peer_state.read().unwrap(); - if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; - match peer_state.channel_by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } - // Currently, we expect all holding cell update_adds to be dropped on peer - // disconnect, so Channel's reestablish will never hand us any holding cell - // freed HTLCs to fail backwards. If in the future we no longer drop pending - // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here. - let responses = try_chan_entry!(self, chan.get_mut().channel_reestablish( - msg, &self.logger, self.our_network_pubkey.clone(), self.genesis_hash, - &*self.best_block.read().unwrap()), chan); - let mut channel_update = None; - if let Some(msg) = responses.shutdown_msg { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { - node_id: counterparty_node_id.clone(), + let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); + if let None = peer_state_mutex_opt { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)); + } + let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + // Currently, we expect all holding cell update_adds to be dropped on peer + // disconnect, so Channel's reestablish will never hand us any holding cell + // freed HTLCs to fail backwards. If in the future we no longer drop pending + // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here. + let responses = try_chan_entry!(self, chan.get_mut().channel_reestablish( + msg, &self.logger, &self.node_signer, self.genesis_hash, + &self.default_configuration, &*self.best_block.read().unwrap()), chan); + let mut channel_update = None; + if let Some(msg) = responses.shutdown_msg { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { + node_id: counterparty_node_id.clone(), + msg, + }); + } else if chan.get().is_usable() { + // If the channel is in a usable state (ie the channel is not being shut + // down), send a unicast channel_update to our counterparty to make sure + // they have the latest channel parameters. + if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) { + channel_update = Some(events::MessageSendEvent::SendChannelUpdate { + node_id: chan.get().get_counterparty_node_id(), msg, }); - } else if chan.get().is_usable() { - // If the channel is in a usable state (ie the channel is not being shut - // down), send a unicast channel_update to our counterparty to make sure - // they have the latest channel parameters. - if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) { - channel_update = Some(events::MessageSendEvent::SendChannelUpdate { - node_id: chan.get().get_counterparty_node_id(), - msg, - }); - } - } - let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take(); - htlc_forwards = self.handle_channel_resumption( - &mut channel_state.pending_msg_events, chan.get_mut(), responses.raa, responses.commitment_update, responses.order, - Vec::new(), None, responses.channel_ready, responses.announcement_sigs); - if let Some(upd) = channel_update { - channel_state.pending_msg_events.push(upd); } - need_lnd_workaround - }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) - } - } else { - return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)); + } + let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take(); + htlc_forwards = self.handle_channel_resumption( + &mut peer_state.pending_msg_events, chan.get_mut(), responses.raa, responses.commitment_update, responses.order, + Vec::new(), None, responses.channel_ready, responses.announcement_sigs); + if let Some(upd) = channel_update { + peer_state.pending_msg_events.push(upd); + } + need_lnd_workaround + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } }; @@ -5079,7 +5164,7 @@ where MonitorEvent::HTLCEvent(htlc_update) => { if let Some(preimage) = htlc_update.payment_preimage { log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0)); - self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, funding_outpoint.to_channel_id()); + self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, funding_outpoint.to_channel_id()); } else { log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0)); let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() }; @@ -5089,8 +5174,6 @@ where }, MonitorEvent::CommitmentTxConfirmed(funding_outpoint) | MonitorEvent::UpdateFailed(funding_outpoint) => { - let mut channel_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_lock; let counterparty_node_id_opt = match counterparty_node_id { Some(cp_id) => Some(cp_id), None => { @@ -5103,9 +5186,9 @@ where if let Some(counterparty_node_id) = counterparty_node_id_opt { let per_peer_state = self.per_peer_state.read().unwrap(); if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { - let pending_msg_events = &mut channel_state.pending_msg_events; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; + let pending_msg_events = &mut peer_state.pending_msg_events; if let hash_map::Entry::Occupied(chan_entry) = peer_state.channel_by_id.entry(funding_outpoint.to_channel_id()) { let mut chan = remove_channel!(self, chan_entry); failed_channels.push(chan.force_shutdown(false)); @@ -5160,14 +5243,12 @@ where let mut failed_htlcs = Vec::new(); let mut handle_errors = Vec::new(); { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; - let pending_msg_events = &mut channel_state.pending_msg_events; let per_peer_state = self.per_peer_state.read().unwrap(); for (_cp_id, peer_state_mutex) in per_peer_state.iter() { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; + let pending_msg_events = &mut peer_state.pending_msg_events; peer_state.channel_by_id.retain(|channel_id, chan| { match chan.maybe_free_holding_cell_htlcs(&self.logger) { Ok((commitment_opt, holding_cell_failed_htlcs)) => { @@ -5179,7 +5260,7 @@ where )); } if let Some((commitment_update, monitor_update)) = commitment_opt { - match self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) { + match self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), &monitor_update) { ChannelMonitorUpdateStatus::Completed => { pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { node_id: chan.get_counterparty_node_id(), @@ -5226,14 +5307,12 @@ where let mut handle_errors: Vec<(PublicKey, Result<(), _>)> = Vec::new(); let mut has_update = false; { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; - let pending_msg_events = &mut channel_state.pending_msg_events; let per_peer_state = self.per_peer_state.read().unwrap(); for (_cp_id, peer_state_mutex) in per_peer_state.iter() { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; + let pending_msg_events = &mut peer_state.pending_msg_events; peer_state.channel_by_id.retain(|channel_id, chan| { match chan.maybe_propose_closing_signed(&self.fee_estimator, &self.logger) { Ok((msg_opt, tx_opt)) => { @@ -5308,7 +5387,7 @@ where return Err(APIError::APIMisuseError { err: format!("min_value_msat of {} greater than total 21 million bitcoin supply", min_value_msat.unwrap()) }); } - let payment_secret = PaymentSecret(self.keys_manager.get_secure_random_bytes()); + let payment_secret = PaymentSecret(self.entropy_source.get_secure_random_bytes()); let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); let mut payment_secrets = self.pending_inbound_payments.lock().unwrap(); @@ -5339,7 +5418,8 @@ where /// [`PaymentHash`] and [`PaymentPreimage`] for you. /// /// The [`PaymentPreimage`] will ultimately be returned to you in the [`PaymentClaimable`], which - /// will have the [`PaymentClaimable::payment_preimage`] field filled in. That should then be + /// will have the [`PaymentClaimable::purpose`] be [`PaymentPurpose::InvoicePayment`] with + /// its [`PaymentPurpose::InvoicePayment::payment_preimage`] field filled in. That should then be /// passed directly to [`claim_funds`]. /// /// See [`create_inbound_payment_for_hash`] for detailed documentation on behavior and requirements. @@ -5354,12 +5434,20 @@ where /// /// Errors if `min_value_msat` is greater than total bitcoin supply. /// + /// If `min_final_cltv_expiry_delta` is set to some value, then the payment will not be receivable + /// on versions of LDK prior to 0.0.114. + /// /// [`claim_funds`]: Self::claim_funds /// [`PaymentClaimable`]: events::Event::PaymentClaimable - /// [`PaymentClaimable::payment_preimage`]: events::Event::PaymentClaimable::payment_preimage + /// [`PaymentClaimable::purpose`]: events::Event::PaymentClaimable::purpose + /// [`PaymentPurpose::InvoicePayment`]: events::PaymentPurpose::InvoicePayment + /// [`PaymentPurpose::InvoicePayment::payment_preimage`]: events::PaymentPurpose::InvoicePayment::payment_preimage /// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash - pub fn create_inbound_payment(&self, min_value_msat: Option, invoice_expiry_delta_secs: u32) -> Result<(PaymentHash, PaymentSecret), ()> { - inbound_payment::create(&self.inbound_payment_key, min_value_msat, invoice_expiry_delta_secs, &self.keys_manager, self.highest_seen_timestamp.load(Ordering::Acquire) as u64) + pub fn create_inbound_payment(&self, min_value_msat: Option, invoice_expiry_delta_secs: u32, + min_final_cltv_expiry_delta: Option) -> Result<(PaymentHash, PaymentSecret), ()> { + inbound_payment::create(&self.inbound_payment_key, min_value_msat, invoice_expiry_delta_secs, + &self.entropy_source, self.highest_seen_timestamp.load(Ordering::Acquire) as u64, + min_final_cltv_expiry_delta) } /// Legacy version of [`create_inbound_payment`]. Use this method if you wish to share @@ -5373,7 +5461,7 @@ where /// [`create_inbound_payment`]: Self::create_inbound_payment #[deprecated] pub fn create_inbound_payment_legacy(&self, min_value_msat: Option, invoice_expiry_delta_secs: u32) -> Result<(PaymentHash, PaymentSecret), APIError> { - let payment_preimage = PaymentPreimage(self.keys_manager.get_secure_random_bytes()); + let payment_preimage = PaymentPreimage(self.entropy_source.get_secure_random_bytes()); let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner()); let payment_secret = self.set_payment_hash_secret_map(payment_hash, Some(payment_preimage), min_value_msat, invoice_expiry_delta_secs)?; Ok((payment_hash, payment_secret)) @@ -5407,8 +5495,8 @@ where /// If you need exact expiry semantics, you should enforce them upon receipt of /// [`PaymentClaimable`]. /// - /// Note that invoices generated for inbound payments should have their `min_final_cltv_expiry` - /// set to at least [`MIN_FINAL_CLTV_EXPIRY`]. + /// Note that invoices generated for inbound payments should have their `min_final_cltv_expiry_delta` + /// set to at least [`MIN_FINAL_CLTV_EXPIRY_DELTA`]. /// /// Note that a malicious eavesdropper can intuit whether an inbound payment was created by /// `create_inbound_payment` or `create_inbound_payment_for_hash` based on runtime. @@ -5420,10 +5508,16 @@ where /// /// Errors if `min_value_msat` is greater than total bitcoin supply. /// + /// If `min_final_cltv_expiry_delta` is set to some value, then the payment will not be receivable + /// on versions of LDK prior to 0.0.114. + /// /// [`create_inbound_payment`]: Self::create_inbound_payment /// [`PaymentClaimable`]: events::Event::PaymentClaimable - pub fn create_inbound_payment_for_hash(&self, payment_hash: PaymentHash, min_value_msat: Option, invoice_expiry_delta_secs: u32) -> Result { - inbound_payment::create_from_hash(&self.inbound_payment_key, min_value_msat, payment_hash, invoice_expiry_delta_secs, self.highest_seen_timestamp.load(Ordering::Acquire) as u64) + pub fn create_inbound_payment_for_hash(&self, payment_hash: PaymentHash, min_value_msat: Option, + invoice_expiry_delta_secs: u32, min_final_cltv_expiry: Option) -> Result { + inbound_payment::create_from_hash(&self.inbound_payment_key, min_value_msat, payment_hash, + invoice_expiry_delta_secs, self.highest_seen_timestamp.load(Ordering::Acquire) as u64, + min_final_cltv_expiry) } /// Legacy version of [`create_inbound_payment_for_hash`]. Use this method if you wish to share @@ -5456,7 +5550,7 @@ where let best_block_height = self.best_block.read().unwrap().height(); let short_to_chan_info = self.short_to_chan_info.read().unwrap(); loop { - let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(best_block_height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.keys_manager); + let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(best_block_height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.entropy_source); // Ensure the generated scid doesn't conflict with a real channel. match short_to_chan_info.get(&scid_candidate) { Some(_) => continue, @@ -5486,7 +5580,7 @@ where let best_block_height = self.best_block.read().unwrap().height(); let short_to_chan_info = self.short_to_chan_info.read().unwrap(); loop { - let scid_candidate = fake_scid::Namespace::Intercept.get_fake_scid(best_block_height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.keys_manager); + let scid_candidate = fake_scid::Namespace::Intercept.get_fake_scid(best_block_height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.entropy_source); // Ensure the generated scid doesn't conflict with a real channel. if short_to_chan_info.contains_key(&scid_candidate) { continue } return scid_candidate @@ -5522,6 +5616,12 @@ where events.into_inner() } + #[cfg(feature = "_test_utils")] + pub fn push_pending_event(&self, event: events::Event) { + let mut events = self.pending_events.lock().unwrap(); + events.push(event); + } + #[cfg(test)] pub fn pop_pending_event(&self) -> Option { let mut events = self.pending_events.lock().unwrap(); @@ -5572,15 +5672,30 @@ where } } -impl MessageSendEventsProvider for ChannelManager +impl MessageSendEventsProvider for ChannelManager where - M::Target: chain::Watch<::Signer>, + M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + ES::Target: EntropySource, + NS::Target: NodeSigner, + SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, L::Target: Logger, { + /// Returns `MessageSendEvent`s strictly ordered per-peer, in the order they were generated. + /// The returned array will contain `MessageSendEvent`s for different peers if + /// `MessageSendEvent`s to more than one peer exists, but `MessageSendEvent`s to the same peer + /// is always placed next to each other. + /// + /// Note that that while `MessageSendEvent`s are strictly ordered per-peer, the peer order for + /// the chunks of `MessageSendEvent`s for different peers is random. I.e. if the array contains + /// `MessageSendEvent`s for both `node_a` and `node_b`, the `MessageSendEvent`s for `node_a` + /// will randomly be placed first or last in the returned array. + /// + /// Note that even though `BroadcastChannelAnnouncement` and `BroadcastChannelUpdate` + /// `MessageSendEvent`s are intended to be broadcasted to all peers, they will be pleaced among + /// the `MessageSendEvent`s to the specific peer they were generated under. fn get_and_clear_pending_msg_events(&self) -> Vec { let events = RefCell::new(Vec::new()); PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || { @@ -5600,8 +5715,16 @@ where } let mut pending_events = Vec::new(); - let mut channel_state = self.channel_state.lock().unwrap(); - mem::swap(&mut pending_events, &mut channel_state.pending_msg_events); + let per_peer_state = self.per_peer_state.read().unwrap(); + for (_cp_id, peer_state_mutex) in per_peer_state.iter() { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + if peer_state.pending_msg_events.len() > 0 { + let mut peer_pending_events = Vec::new(); + mem::swap(&mut peer_pending_events, &mut peer_state.pending_msg_events); + pending_events.append(&mut peer_pending_events); + } + } if !pending_events.is_empty() { events.replace(pending_events); @@ -5613,11 +5736,13 @@ where } } -impl EventsProvider for ChannelManager +impl EventsProvider for ChannelManager where - M::Target: chain::Watch<::Signer>, + M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + ES::Target: EntropySource, + NS::Target: NodeSigner, + SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, L::Target: Logger, @@ -5650,11 +5775,13 @@ where } } -impl chain::Listen for ChannelManager +impl chain::Listen for ChannelManager where - M::Target: chain::Watch<::Signer>, + M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + ES::Target: EntropySource, + NS::Target: NodeSigner, + SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, L::Target: Logger, @@ -5684,15 +5811,17 @@ where *best_block = BestBlock::new(header.prev_blockhash, new_height) } - self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time, self.genesis_hash.clone(), self.get_our_node_id(), &self.logger)); + self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger)); } } -impl chain::Confirm for ChannelManager +impl chain::Confirm for ChannelManager where - M::Target: chain::Watch<::Signer>, + M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + ES::Target: EntropySource, + NS::Target: NodeSigner, + SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, L::Target: Logger, @@ -5706,13 +5835,13 @@ where log_trace!(self.logger, "{} transactions included in block {} at height {} provided", txdata.len(), block_hash, height); let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.genesis_hash.clone(), self.get_our_node_id(), &self.logger) + self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger) .map(|(a, b)| (a, Vec::new(), b))); let last_best_block_height = self.best_block.read().unwrap().height(); if height < last_best_block_height { let timestamp = self.highest_seen_timestamp.load(Ordering::Acquire); - self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.genesis_hash.clone(), self.get_our_node_id(), &self.logger)); + self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger)); } } @@ -5728,7 +5857,7 @@ where *self.best_block.write().unwrap() = BestBlock::new(block_hash, height); - self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, self.genesis_hash.clone(), self.get_our_node_id(), &self.logger)); + self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger)); macro_rules! max_time { ($timestamp: expr) => { @@ -5759,8 +5888,8 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; for chan in peer_state.channel_by_id.values() { - if let (Some(funding_txo), block_hash) = (chan.get_funding_txo(), chan.get_funding_tx_confirmed_in()) { - res.push((funding_txo.txid, block_hash)); + if let (Some(funding_txo), Some(block_hash)) = (chan.get_funding_txo(), chan.get_funding_tx_confirmed_in()) { + res.push((funding_txo.txid, Some(block_hash))); } } } @@ -5779,11 +5908,13 @@ where } } -impl ChannelManager +impl ChannelManager where - M::Target: chain::Watch<::Signer>, + M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + ES::Target: EntropySource, + NS::Target: NodeSigner, + SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, L::Target: Logger, @@ -5791,7 +5922,7 @@ where /// Calls a function which handles an on-chain event (blocks dis/connected, transactions /// un/confirmed, etc) on each channel, handling any resulting errors or messages generated by /// the function. - fn do_chain_event::Signer>) -> Result<(Option, Vec<(HTLCSource, PaymentHash)>, Option), ClosureReason>> + fn do_chain_event::Signer>) -> Result<(Option, Vec<(HTLCSource, PaymentHash)>, Option), ClosureReason>> (&self, height_opt: Option, f: FN) { // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called // during initialization prior to the chain_monitor being fully configured in some cases. @@ -5800,13 +5931,11 @@ where let mut failed_channels = Vec::new(); let mut timed_out_htlcs = Vec::new(); { - let mut channel_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_lock; - let pending_msg_events = &mut channel_state.pending_msg_events; let per_peer_state = self.per_peer_state.read().unwrap(); for (_cp_id, peer_state_mutex) in per_peer_state.iter() { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; + let pending_msg_events = &mut peer_state.pending_msg_events; peer_state.channel_by_id.retain(|_, channel| { let res = f(channel); if let Ok((channel_ready_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res { @@ -5839,12 +5968,12 @@ where msg: announcement_sigs, }); if let Some(height) = height_opt { - if let Some(announcement) = channel.get_signed_channel_announcement(self.get_our_node_id(), self.genesis_hash, height) { + if let Some(announcement) = channel.get_signed_channel_announcement(&self.node_signer, self.genesis_hash, height, &self.default_configuration) { pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement { msg: announcement, // Note that announcement_signatures fails if the channel cannot be announced, // so get_channel_update_for_broadcast will never fail by the time we get here. - update_msg: self.get_channel_update_for_broadcast(channel).unwrap(), + update_msg: Some(self.get_channel_update_for_broadcast(channel).unwrap()), }); } } @@ -5983,26 +6112,62 @@ where pub fn current_best_block(&self) -> BestBlock { self.best_block.read().unwrap().clone() } + + /// Fetches the set of [`NodeFeatures`] flags which are provided by or required by + /// [`ChannelManager`]. + pub fn node_features(&self) -> NodeFeatures { + provided_node_features(&self.default_configuration) + } + + /// Fetches the set of [`InvoiceFeatures`] flags which are provided by or required by + /// [`ChannelManager`]. + /// + /// Note that the invoice feature flags can vary depending on if the invoice is a "phantom invoice" + /// or not. Thus, this method is not public. + #[cfg(any(feature = "_test_utils", test))] + pub fn invoice_features(&self) -> InvoiceFeatures { + provided_invoice_features(&self.default_configuration) + } + + /// Fetches the set of [`ChannelFeatures`] flags which are provided by or required by + /// [`ChannelManager`]. + pub fn channel_features(&self) -> ChannelFeatures { + provided_channel_features(&self.default_configuration) + } + + /// Fetches the set of [`ChannelTypeFeatures`] flags which are provided by or required by + /// [`ChannelManager`]. + pub fn channel_type_features(&self) -> ChannelTypeFeatures { + provided_channel_type_features(&self.default_configuration) + } + + /// Fetches the set of [`InitFeatures`] flags which are provided by or required by + /// [`ChannelManager`]. + pub fn init_features(&self) -> InitFeatures { + provided_init_features(&self.default_configuration) + } } -impl - ChannelMessageHandler for ChannelManager +impl + ChannelMessageHandler for ChannelManager where - M::Target: chain::Watch<::Signer>, + M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + ES::Target: EntropySource, + NS::Target: NodeSigner, + SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, L::Target: Logger, { - fn handle_open_channel(&self, counterparty_node_id: &PublicKey, their_features: InitFeatures, msg: &msgs::OpenChannel) { + fn handle_open_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannel) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - let _ = handle_error!(self, self.internal_open_channel(counterparty_node_id, their_features, msg), *counterparty_node_id); + let _ = handle_error!(self, self.internal_open_channel(counterparty_node_id, msg), *counterparty_node_id); } - fn handle_accept_channel(&self, counterparty_node_id: &PublicKey, their_features: InitFeatures, msg: &msgs::AcceptChannel) { + fn handle_accept_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannel) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - let _ = handle_error!(self, self.internal_accept_channel(counterparty_node_id, their_features, msg), *counterparty_node_id); + let _ = handle_error!(self, self.internal_accept_channel(counterparty_node_id, msg), *counterparty_node_id); } fn handle_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) { @@ -6020,9 +6185,9 @@ where let _ = handle_error!(self, self.internal_channel_ready(counterparty_node_id, msg), *counterparty_node_id); } - fn handle_shutdown(&self, counterparty_node_id: &PublicKey, their_features: &InitFeatures, msg: &msgs::Shutdown) { + fn handle_shutdown(&self, counterparty_node_id: &PublicKey, msg: &msgs::Shutdown) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - let _ = handle_error!(self, self.internal_shutdown(counterparty_node_id, their_features, msg), *counterparty_node_id); + let _ = handle_error!(self, self.internal_shutdown(counterparty_node_id, msg), *counterparty_node_id); } fn handle_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) { @@ -6089,54 +6254,51 @@ where let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); let mut failed_channels = Vec::new(); let mut no_channels_remain = true; - let mut channel_state = self.channel_state.lock().unwrap(); let mut per_peer_state = self.per_peer_state.write().unwrap(); { - let pending_msg_events = &mut channel_state.pending_msg_events; log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates. We believe we {} make future connections to this peer.", log_pubkey!(counterparty_node_id), if no_connection_possible { "cannot" } else { "can" }); if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; + let pending_msg_events = &mut peer_state.pending_msg_events; peer_state.channel_by_id.retain(|_, chan| { - if chan.get_counterparty_node_id() == *counterparty_node_id { - chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger); - if chan.is_shutdown() { - update_maps_on_chan_removal!(self, chan); - self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer); - return false; - } else { - no_channels_remain = false; - } + chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger); + if chan.is_shutdown() { + update_maps_on_chan_removal!(self, chan); + self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer); + return false; + } else { + no_channels_remain = false; } true }); + pending_msg_events.retain(|msg| { + match msg { + &events::MessageSendEvent::SendAcceptChannel { .. } => false, + &events::MessageSendEvent::SendOpenChannel { .. } => false, + &events::MessageSendEvent::SendFundingCreated { .. } => false, + &events::MessageSendEvent::SendFundingSigned { .. } => false, + &events::MessageSendEvent::SendChannelReady { .. } => false, + &events::MessageSendEvent::SendAnnouncementSignatures { .. } => false, + &events::MessageSendEvent::UpdateHTLCs { .. } => false, + &events::MessageSendEvent::SendRevokeAndACK { .. } => false, + &events::MessageSendEvent::SendClosingSigned { .. } => false, + &events::MessageSendEvent::SendShutdown { .. } => false, + &events::MessageSendEvent::SendChannelReestablish { .. } => false, + &events::MessageSendEvent::SendChannelAnnouncement { .. } => false, + &events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true, + &events::MessageSendEvent::BroadcastChannelUpdate { .. } => true, + &events::MessageSendEvent::BroadcastNodeAnnouncement { .. } => true, + &events::MessageSendEvent::SendChannelUpdate { .. } => false, + &events::MessageSendEvent::HandleError { .. } => false, + &events::MessageSendEvent::SendChannelRangeQuery { .. } => false, + &events::MessageSendEvent::SendShortIdsQuery { .. } => false, + &events::MessageSendEvent::SendReplyChannelRange { .. } => false, + &events::MessageSendEvent::SendGossipTimestampFilter { .. } => false, + } + }); } - pending_msg_events.retain(|msg| { - match msg { - &events::MessageSendEvent::SendAcceptChannel { ref node_id, .. } => node_id != counterparty_node_id, - &events::MessageSendEvent::SendOpenChannel { ref node_id, .. } => node_id != counterparty_node_id, - &events::MessageSendEvent::SendFundingCreated { ref node_id, .. } => node_id != counterparty_node_id, - &events::MessageSendEvent::SendFundingSigned { ref node_id, .. } => node_id != counterparty_node_id, - &events::MessageSendEvent::SendChannelReady { ref node_id, .. } => node_id != counterparty_node_id, - &events::MessageSendEvent::SendAnnouncementSignatures { ref node_id, .. } => node_id != counterparty_node_id, - &events::MessageSendEvent::UpdateHTLCs { ref node_id, .. } => node_id != counterparty_node_id, - &events::MessageSendEvent::SendRevokeAndACK { ref node_id, .. } => node_id != counterparty_node_id, - &events::MessageSendEvent::SendClosingSigned { ref node_id, .. } => node_id != counterparty_node_id, - &events::MessageSendEvent::SendShutdown { ref node_id, .. } => node_id != counterparty_node_id, - &events::MessageSendEvent::SendChannelReestablish { ref node_id, .. } => node_id != counterparty_node_id, - &events::MessageSendEvent::SendChannelAnnouncement { ref node_id, .. } => node_id != counterparty_node_id, - &events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true, - &events::MessageSendEvent::BroadcastChannelUpdate { .. } => true, - &events::MessageSendEvent::SendChannelUpdate { ref node_id, .. } => node_id != counterparty_node_id, - &events::MessageSendEvent::HandleError { ref node_id, .. } => node_id != counterparty_node_id, - &events::MessageSendEvent::SendChannelRangeQuery { .. } => false, - &events::MessageSendEvent::SendShortIdsQuery { .. } => false, - &events::MessageSendEvent::SendReplyChannelRange { .. } => false, - &events::MessageSendEvent::SendGossipTimestampFilter { .. } => false, - } - }); - mem::drop(channel_state); } if no_channels_remain { per_peer_state.remove(counterparty_node_id); @@ -6165,6 +6327,7 @@ where e.insert(Mutex::new(PeerState { channel_by_id: HashMap::new(), latest_features: init_msg.features.clone(), + pending_msg_events: Vec::new(), })); }, hash_map::Entry::Occupied(e) => { @@ -6173,14 +6336,12 @@ where } } - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; - let pending_msg_events = &mut channel_state.pending_msg_events; let per_peer_state = self.per_peer_state.read().unwrap(); for (_cp_id, peer_state_mutex) in per_peer_state.iter() { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; + let pending_msg_events = &mut peer_state.pending_msg_events; peer_state.channel_by_id.retain(|_, chan| { let retain = if chan.get_counterparty_node_id() == *counterparty_node_id { if !chan.have_received_message() { @@ -6198,7 +6359,7 @@ where } } else { true }; if retain && chan.get_counterparty_node_id() != *counterparty_node_id { - if let Some(msg) = chan.get_signed_channel_announcement(self.get_our_node_id(), self.genesis_hash.clone(), self.best_block.read().unwrap().height()) { + if let Some(msg) = chan.get_signed_channel_announcement(&self.node_signer, self.genesis_hash.clone(), self.best_block.read().unwrap().height(), &self.default_configuration) { if let Ok(update_msg) = self.get_channel_update_for_broadcast(chan) { pending_msg_events.push(events::MessageSendEvent::SendChannelAnnouncement { node_id: *counterparty_node_id, @@ -6218,33 +6379,35 @@ where let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); if msg.channel_id == [0; 32] { - for chan in self.list_channels() { - if chan.counterparty.node_id == *counterparty_node_id { - // Untrusted messages from peer, we throw away the error if id points to a non-existent channel - let _ = self.force_close_channel_with_peer(&chan.channel_id, counterparty_node_id, Some(&msg.data), true); - } + let channel_ids: Vec<[u8; 32]> = { + let per_peer_state = self.per_peer_state.read().unwrap(); + let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); + if let None = peer_state_mutex_opt { return; } + let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); + let peer_state = &mut *peer_state_lock; + peer_state.channel_by_id.keys().cloned().collect() + }; + for channel_id in channel_ids { + // Untrusted messages from peer, we throw away the error if id points to a non-existent channel + let _ = self.force_close_channel_with_peer(&channel_id, counterparty_node_id, Some(&msg.data), true); } } else { { // First check if we can advance the channel type and try again. - let mut channel_state = self.channel_state.lock().unwrap(); let per_peer_state = self.per_peer_state.read().unwrap(); - if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; - if let Some(chan) = peer_state.channel_by_id.get_mut(&msg.channel_id) { - if chan.get_counterparty_node_id() != *counterparty_node_id { - return; - } - if let Ok(msg) = chan.maybe_handle_error_without_close(self.genesis_hash) { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { - node_id: *counterparty_node_id, - msg, - }); - return; - } + let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); + if let None = peer_state_mutex_opt { return; } + let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); + let peer_state = &mut *peer_state_lock; + if let Some(chan) = peer_state.channel_by_id.get_mut(&msg.channel_id) { + if let Ok(msg) = chan.maybe_handle_error_without_close(self.genesis_hash) { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { + node_id: *counterparty_node_id, + msg, + }); + return; } - } else { return; } + } } // Untrusted messages from peer, we throw away the error if id points to a non-existent channel @@ -6253,18 +6416,18 @@ where } fn provided_node_features(&self) -> NodeFeatures { - provided_node_features() + provided_node_features(&self.default_configuration) } fn provided_init_features(&self, _their_init_features: &PublicKey) -> InitFeatures { - provided_init_features() + provided_init_features(&self.default_configuration) } } /// Fetches the set of [`NodeFeatures`] flags which are provided by or required by /// [`ChannelManager`]. -pub fn provided_node_features() -> NodeFeatures { - provided_init_features().to_context() +pub(crate) fn provided_node_features(config: &UserConfig) -> NodeFeatures { + provided_init_features(config).to_context() } /// Fetches the set of [`InvoiceFeatures`] flags which are provided by or required by @@ -6273,19 +6436,25 @@ pub fn provided_node_features() -> NodeFeatures { /// Note that the invoice feature flags can vary depending on if the invoice is a "phantom invoice" /// or not. Thus, this method is not public. #[cfg(any(feature = "_test_utils", test))] -pub fn provided_invoice_features() -> InvoiceFeatures { - provided_init_features().to_context() +pub(crate) fn provided_invoice_features(config: &UserConfig) -> InvoiceFeatures { + provided_init_features(config).to_context() } /// Fetches the set of [`ChannelFeatures`] flags which are provided by or required by /// [`ChannelManager`]. -pub fn provided_channel_features() -> ChannelFeatures { - provided_init_features().to_context() +pub(crate) fn provided_channel_features(config: &UserConfig) -> ChannelFeatures { + provided_init_features(config).to_context() +} + +/// Fetches the set of [`ChannelTypeFeatures`] flags which are provided by or required by +/// [`ChannelManager`]. +pub(crate) fn provided_channel_type_features(config: &UserConfig) -> ChannelTypeFeatures { + ChannelTypeFeatures::from_init(&provided_init_features(config)) } /// Fetches the set of [`InitFeatures`] flags which are provided by or required by /// [`ChannelManager`]. -pub fn provided_init_features() -> InitFeatures { +pub fn provided_init_features(_config: &UserConfig) -> InitFeatures { // Note that if new features are added here which other peers may (eventually) require, we // should also add the corresponding (optional) bit to the ChannelMessageHandler impl for // ErroringMessageHandler. @@ -6301,6 +6470,12 @@ pub fn provided_init_features() -> InitFeatures { features.set_channel_type_optional(); features.set_scid_privacy_optional(); features.set_zero_conf_optional(); + #[cfg(anchors)] + { // Attributes are not allowed on if expressions on our current MSRV of 1.41. + if _config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx { + features.set_anchors_zero_fee_htlc_tx_optional(); + } + } features } @@ -6363,7 +6538,7 @@ impl Writeable for ChannelDetails { impl Readable for ChannelDetails { fn read(reader: &mut R) -> Result { - init_and_read_tlv_fields!(reader, { + _init_and_read_tlv_fields!(reader, { (1, inbound_scid_alias, option), (2, channel_id, required), (3, channel_type, option), @@ -6692,11 +6867,13 @@ impl_writeable_tlv_based!(PendingInboundPayment, { (8, min_value_msat, required), }); -impl Writeable for ChannelManager +impl Writeable for ChannelManager where - M::Target: chain::Watch<::Signer>, + M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + ES::Target: EntropySource, + NS::Target: NodeSigner, + SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, L::Target: Logger, @@ -6753,6 +6930,8 @@ where } } + let per_peer_state = self.per_peer_state.write().unwrap(); + let pending_inbound_payments = self.pending_inbound_payments.lock().unwrap(); let claimable_payments = self.claimable_payments.lock().unwrap(); let pending_outbound_payments = self.pending_outbound_payments.pending_outbound_payments.lock().unwrap(); @@ -6768,7 +6947,6 @@ where htlc_purposes.push(purpose); } - let per_peer_state = self.per_peer_state.write().unwrap(); (per_peer_state.len() as u64).write(writer)?; for (peer_pubkey, peer_state_mutex) in per_peer_state.iter() { peer_pubkey.write(writer)?; @@ -6901,19 +7079,27 @@ where /// which you've already broadcasted the transaction. /// /// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor -pub struct ChannelManagerReadArgs<'a, M: Deref, T: Deref, K: Deref, F: Deref, R: Deref, L: Deref> +pub struct ChannelManagerReadArgs<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> where - M::Target: chain::Watch<::Signer>, + M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + ES::Target: EntropySource, + NS::Target: NodeSigner, + SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, L::Target: Logger, { + /// A cryptographically secure source of entropy. + pub entropy_source: ES, + + /// A signer that is able to perform node-scoped cryptographic operations. + pub node_signer: NS, + /// The keys provider which will give us relevant keys. Some keys will be loaded during /// deserialization and KeysInterface::read_chan_signer will be used to read per-Channel /// signing data. - pub keys_manager: K, + pub signer_provider: SP, /// The fee_estimator for use in the ChannelManager in the future. /// @@ -6954,15 +7140,17 @@ where /// this struct. /// /// (C-not exported) because we have no HashMap bindings - pub channel_monitors: HashMap::Signer>>, + pub channel_monitors: HashMap::Signer>>, } -impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, R: Deref, L: Deref> - ChannelManagerReadArgs<'a, M, T, K, F, R, L> +impl<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> + ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, L> where - M::Target: chain::Watch<::Signer>, + M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + ES::Target: EntropySource, + NS::Target: NodeSigner, + SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, L::Target: Logger, @@ -6970,10 +7158,10 @@ where /// Simple utility function to create a ChannelManagerReadArgs which creates the monitor /// HashMap for you. This is primarily useful for C bindings where it is not practical to /// populate a HashMap directly from C. - pub fn new(keys_manager: K, fee_estimator: F, chain_monitor: M, tx_broadcaster: T, router: R, logger: L, default_config: UserConfig, - mut channel_monitors: Vec<&'a mut ChannelMonitor<::Signer>>) -> Self { + pub fn new(entropy_source: ES, node_signer: NS, signer_provider: SP, fee_estimator: F, chain_monitor: M, tx_broadcaster: T, router: R, logger: L, default_config: UserConfig, + mut channel_monitors: Vec<&'a mut ChannelMonitor<::Signer>>) -> Self { Self { - keys_manager, fee_estimator, chain_monitor, tx_broadcaster, router, logger, default_config, + entropy_source, node_signer, signer_provider, fee_estimator, chain_monitor, tx_broadcaster, router, logger, default_config, channel_monitors: channel_monitors.drain(..).map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect() } } @@ -6981,33 +7169,37 @@ where // Implement ReadableArgs for an Arc'd ChannelManager to make it a bit easier to work with the // SipmleArcChannelManager type: -impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, R: Deref, L: Deref> - ReadableArgs> for (BlockHash, Arc>) +impl<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> + ReadableArgs> for (BlockHash, Arc>) where - M::Target: chain::Watch<::Signer>, + M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + ES::Target: EntropySource, + NS::Target: NodeSigner, + SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, L::Target: Logger, { - fn read(reader: &mut Reader, args: ChannelManagerReadArgs<'a, M, T, K, F, R, L>) -> Result { - let (blockhash, chan_manager) = <(BlockHash, ChannelManager)>::read(reader, args)?; + fn read(reader: &mut Reader, args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, L>) -> Result { + let (blockhash, chan_manager) = <(BlockHash, ChannelManager)>::read(reader, args)?; Ok((blockhash, Arc::new(chan_manager))) } } -impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, R: Deref, L: Deref> - ReadableArgs> for (BlockHash, ChannelManager) +impl<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> + ReadableArgs> for (BlockHash, ChannelManager) where - M::Target: chain::Watch<::Signer>, + M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + ES::Target: EntropySource, + NS::Target: NodeSigner, + SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, L::Target: Logger, { - fn read(reader: &mut Reader, mut args: ChannelManagerReadArgs<'a, M, T, K, F, R, L>) -> Result { + fn read(reader: &mut Reader, mut args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, L>) -> Result { let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION); let genesis_hash: BlockHash = Readable::read(reader)?; @@ -7018,12 +7210,14 @@ where let channel_count: u64 = Readable::read(reader)?; let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128)); - let mut peer_channels: HashMap::Signer>>> = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); + let mut peer_channels: HashMap::Signer>>> = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); let mut id_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); let mut channel_closures = Vec::new(); for _ in 0..channel_count { - let mut channel: Channel<::Signer> = Channel::read(reader, (&args.keys_manager, best_block_height))?; + let mut channel: Channel<::Signer> = Channel::read(reader, ( + &args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config) + ))?; let funding_txo = channel.get_funding_txo().ok_or(DecodeError::InvalidValue)?; funding_txo_set.insert(funding_txo.clone()); if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) { @@ -7116,7 +7310,7 @@ where } } - for (ref funding_txo, ref mut monitor) in args.channel_monitors.iter_mut() { + for (funding_txo, monitor) in args.channel_monitors.iter_mut() { if !funding_txo_set.contains(funding_txo) { log_info!(args.logger, "Broadcasting latest holder commitment transaction for closed channel {}", log_bytes!(funding_txo.to_channel_id())); monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger); @@ -7149,12 +7343,13 @@ where } let peer_count: u64 = Readable::read(reader)?; - let mut per_peer_state = HashMap::with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex::Signer>>)>())); + let mut per_peer_state = HashMap::with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex::Signer>>)>())); for _ in 0..peer_count { let peer_pubkey = Readable::read(reader)?; let peer_state = PeerState { channel_by_id: peer_channels.remove(&peer_pubkey).unwrap_or(HashMap::new()), latest_features: Readable::read(reader)?, + pending_msg_events: Vec::new(), }; per_peer_state.insert(peer_pubkey, Mutex::new(peer_state)); } @@ -7221,11 +7416,11 @@ where (11, probing_cookie_secret, option), }); if fake_scid_rand_bytes.is_none() { - fake_scid_rand_bytes = Some(args.keys_manager.get_secure_random_bytes()); + fake_scid_rand_bytes = Some(args.entropy_source.get_secure_random_bytes()); } if probing_cookie_secret.is_none() { - probing_cookie_secret = Some(args.keys_manager.get_secure_random_bytes()); + probing_cookie_secret = Some(args.entropy_source.get_secure_random_bytes()); } if pending_outbound_payments.is_none() && pending_outbound_payments_no_retry.is_none() { @@ -7265,9 +7460,13 @@ where hash_map::Entry::Vacant(entry) => { let path_fee = path.get_path_fees(); entry.insert(PendingOutboundPayment::Retryable { + retry_strategy: None, + attempts: PaymentAttempts::new(), + payment_params: None, session_privs: [session_priv_bytes].iter().map(|a| *a).collect(), payment_hash: htlc.payment_hash, payment_secret, + keysend_preimage: None, // only used for retries, and we'll never retry on startup pending_amt_msat: path_amt, pending_fee_msat: Some(path_fee), total_msat: path_amt, @@ -7331,7 +7530,7 @@ where }); } - let inbound_pmt_key_material = args.keys_manager.get_inbound_payment_key_material(); + let inbound_pmt_key_material = args.node_signer.get_inbound_payment_key_material(); let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material); let mut claimable_htlcs = HashMap::with_capacity(claimable_htlcs_list.len()); @@ -7356,7 +7555,7 @@ where payment_preimage: match pending_inbound_payments.get(&payment_hash) { Some(inbound_payment) => inbound_payment.payment_preimage, None => match inbound_payment::verify(payment_hash, &hop_data, 0, &expanded_inbound_key, &args.logger) { - Ok(payment_preimage) => payment_preimage, + Ok((payment_preimage, _)) => payment_preimage, Err(()) => { log_error!(args.logger, "Failed to read claimable payment data for HTLC with payment hash {} - was not a pending inbound payment and didn't match our payment key", log_bytes!(payment_hash.0)); return Err(DecodeError::InvalidValue); @@ -7375,17 +7574,16 @@ where } let mut secp_ctx = Secp256k1::new(); - secp_ctx.seeded_randomize(&args.keys_manager.get_secure_random_bytes()); + secp_ctx.seeded_randomize(&args.entropy_source.get_secure_random_bytes()); if !channel_closures.is_empty() { pending_events_read.append(&mut channel_closures); } - let our_network_key = match args.keys_manager.get_node_secret(Recipient::Node) { + let our_network_pubkey = match args.node_signer.get_node_id(Recipient::Node) { Ok(key) => key, Err(()) => return Err(DecodeError::InvalidValue) }; - let our_network_pubkey = PublicKey::from_secret_key(&secp_ctx, &our_network_key); if let Some(network_pubkey) = received_network_pubkey { if network_pubkey != our_network_pubkey { log_error!(args.logger, "Key that was generated does not match the existing key."); @@ -7402,7 +7600,7 @@ where let mut outbound_scid_alias; loop { outbound_scid_alias = fake_scid::Namespace::OutboundAlias - .get_fake_scid(best_block_height, &genesis_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.keys_manager); + .get_fake_scid(best_block_height, &genesis_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.entropy_source); if outbound_scid_aliases.insert(outbound_scid_alias) { break; } } chan.set_outbound_scid_alias(outbound_scid_alias); @@ -7433,7 +7631,7 @@ where let mut receiver_node_id = Some(our_network_pubkey); let phantom_shared_secret = claimable_htlcs[0].prev_hop.phantom_shared_secret; if phantom_shared_secret.is_some() { - let phantom_pubkey = args.keys_manager.get_node_id(Recipient::PhantomNode) + let phantom_pubkey = args.node_signer.get_node_id(Recipient::PhantomNode) .expect("Failed to get node_id for phantom node recipient"); receiver_node_id = Some(phantom_pubkey) } @@ -7487,9 +7685,6 @@ where best_block: RwLock::new(BestBlock::new(best_block_hash, best_block_height)), - channel_state: Mutex::new(ChannelHolder { - pending_msg_events: Vec::new(), - }), inbound_payment_key: expanded_inbound_key, pending_inbound_payments: Mutex::new(pending_inbound_payments), pending_outbound_payments: OutboundPayments { pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()) }, @@ -7504,7 +7699,6 @@ where probing_cookie_secret: probing_cookie_secret.unwrap(), - our_network_key, our_network_pubkey, secp_ctx, @@ -7517,7 +7711,10 @@ where total_consistency_lock: RwLock::new(()), persistence_notifier: Notifier::new(), - keys_manager: args.keys_manager, + entropy_source: args.entropy_source, + node_signer: args.node_signer, + signer_provider: args.signer_provider, + logger: args.logger, default_configuration: args.default_config, }; @@ -7540,18 +7737,25 @@ where mod tests { use bitcoin::hashes::Hash; use bitcoin::hashes::sha256::Hash as Sha256; + use bitcoin::hashes::hex::FromHex; + use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey}; + use bitcoin::secp256k1::ecdsa::Signature; + use bitcoin::secp256k1::ffi::Signature as FFISignature; + use bitcoin::blockdata::script::Script; + use bitcoin::Txid; use core::time::Duration; use core::sync::atomic::Ordering; use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret}; - use crate::ln::channelmanager::{self, inbound_payment, PaymentId, PaymentSendFailure}; + use crate::ln::channelmanager::{inbound_payment, PaymentId, PaymentSendFailure, InterceptId}; use crate::ln::functional_test_utils::*; use crate::ln::msgs; - use crate::ln::msgs::ChannelMessageHandler; + use crate::ln::msgs::{ChannelMessageHandler, OptionalField}; use crate::routing::router::{PaymentParameters, RouteParameters, find_route}; use crate::util::errors::APIError; use crate::util::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason}; use crate::util::test_utils; - use crate::chain::keysinterface::{EntropySource, KeysInterface}; + use crate::util::config::ChannelConfig; + use crate::chain::keysinterface::EntropySource; #[test] fn test_notify_limits() { @@ -7568,7 +7772,7 @@ mod tests { assert!(nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1))); assert!(nodes[2].node.await_persistable_update_timeout(Duration::from_millis(1))); - let mut chan = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()); + let mut chan = create_announced_chan_between_nodes(&nodes, 0, 1); // We check that the channel info nodes have doesn't change too early, even though we try // to connect messages with new values @@ -7639,7 +7843,7 @@ mod tests { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()); + create_announced_chan_between_nodes(&nodes, 0, 1); // First, send a partial MPP payment. let (route, our_payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100_000); @@ -7761,8 +7965,8 @@ mod tests { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()); - let scorer = test_utils::TestScorer::with_penalty(0); + create_announced_chan_between_nodes(&nodes, 0, 1); + let scorer = test_utils::TestScorer::new(); let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes(); // To start (1), send a regular payment but don't claim it. @@ -7771,7 +7975,7 @@ mod tests { // Next, attempt a keysend payment and make sure it fails. let route_params = RouteParameters { - payment_params: PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id()), + payment_params: PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(), TEST_FINAL_CLTV), final_value_msat: 100_000, final_cltv_expiry_delta: TEST_FINAL_CLTV, }; @@ -7859,18 +8063,18 @@ mod tests { let payer_pubkey = nodes[0].node.get_our_node_id(); let payee_pubkey = nodes[1].node.get_our_node_id(); - nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap(); - nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap(); + nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); + nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); - let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], channelmanager::provided_init_features(), channelmanager::provided_init_features()); + let _chan = create_chan_between_nodes(&nodes[0], &nodes[1]); let route_params = RouteParameters { - payment_params: PaymentParameters::for_keysend(payee_pubkey), + payment_params: PaymentParameters::for_keysend(payee_pubkey, 40), final_value_msat: 10_000, final_cltv_expiry_delta: 40, }; let network_graph = nodes[0].network_graph.clone(); let first_hops = nodes[0].node.list_usable_channels(); - let scorer = test_utils::TestScorer::with_penalty(0); + let scorer = test_utils::TestScorer::new(); let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes(); let route = find_route( &payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::>()), @@ -7904,18 +8108,18 @@ mod tests { let payer_pubkey = nodes[0].node.get_our_node_id(); let payee_pubkey = nodes[1].node.get_our_node_id(); - nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap(); - nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap(); + nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); + nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); - let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], channelmanager::provided_init_features(), channelmanager::provided_init_features()); + let _chan = create_chan_between_nodes(&nodes[0], &nodes[1]); let route_params = RouteParameters { - payment_params: PaymentParameters::for_keysend(payee_pubkey), + payment_params: PaymentParameters::for_keysend(payee_pubkey, 40), final_value_msat: 10_000, final_cltv_expiry_delta: 40, }; let network_graph = nodes[0].network_graph.clone(); let first_hops = nodes[0].node.list_usable_channels(); - let scorer = test_utils::TestScorer::with_penalty(0); + let scorer = test_utils::TestScorer::new(); let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes(); let route = find_route( &payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::>()), @@ -7947,10 +8151,10 @@ mod tests { let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); let nodes = create_network(4, &node_cfgs, &node_chanmgrs); - let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).0.contents.short_channel_id; - let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features()).0.contents.short_channel_id; - let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, channelmanager::provided_init_features(), channelmanager::provided_init_features()).0.contents.short_channel_id; - let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, channelmanager::provided_init_features(), channelmanager::provided_init_features()).0.contents.short_channel_id; + let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id; + let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id; + let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id; + let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id; // Marshall an MPP route. let (mut route, payment_hash, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000); @@ -8011,9 +8215,9 @@ mod tests { nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None).unwrap(); let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), channelmanager::provided_init_features(), &open_channel); + nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel); let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), channelmanager::provided_init_features(), &accept_channel); + nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel); let (temporary_channel_id, tx, _funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42); let channel_id = &tx.txid().into_inner(); @@ -8058,9 +8262,9 @@ mod tests { update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &nodes_0_update, &nodes_1_update); nodes[0].node.close_channel(channel_id, &nodes[1].node.get_our_node_id()).unwrap(); - nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &channelmanager::provided_init_features(), &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id())); + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id())); let nodes_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &channelmanager::provided_init_features(), &nodes_1_shutdown); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &nodes_1_shutdown); let closing_signed_node_0 = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &closing_signed_node_0); @@ -8111,13 +8315,237 @@ mod tests { check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure); check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure); } + + fn check_not_connected_to_peer_error(res_err: Result, expected_public_key: PublicKey) { + let expected_message = format!("Not connected to node: {}", expected_public_key); + check_api_misuse_error_message(expected_message, res_err) + } + + fn check_unkown_peer_error(res_err: Result, expected_public_key: PublicKey) { + let expected_message = format!("Can't find a peer matching the passed counterparty node_id {}", expected_public_key); + check_api_misuse_error_message(expected_message, res_err) + } + + fn check_api_misuse_error_message(expected_err_message: String, res_err: Result) { + match res_err { + Err(APIError::APIMisuseError { err }) => { + assert_eq!(err, expected_err_message); + }, + Ok(_) => panic!("Unexpected Ok"), + Err(_) => panic!("Unexpected Error"), + } + } + + #[test] + fn test_api_calls_with_unkown_counterparty_node() { + // Tests that our API functions and message handlers that expects a `counterparty_node_id` + // as input, behaves as expected if the `counterparty_node_id` is an unkown peer in the + // `ChannelManager::per_peer_state` map. + let chanmon_cfg = create_chanmon_cfgs(2); + let node_cfg = create_node_cfgs(2, &chanmon_cfg); + let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[None, None]); + let nodes = create_network(2, &node_cfg, &node_chanmgr); + + // Boilerplate code to produce `open_channel` and `accept_channel` msgs more densly than + // creating dummy ones. + nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None).unwrap(); + let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg); + let accept_channel_msg = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); + + // Dummy values + let channel_id = [4; 32]; + let signature = Signature::from(unsafe { FFISignature::new() }); + let unkown_public_key = PublicKey::from_secret_key(&Secp256k1::signing_only(), &SecretKey::from_slice(&[42; 32]).unwrap()); + let intercept_id = InterceptId([0; 32]); + + // Dummy msgs + let funding_created_msg = msgs::FundingCreated { + temporary_channel_id: open_channel_msg.temporary_channel_id, + funding_txid: Txid::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap(), + funding_output_index: 0, + signature: signature, + }; + + let funding_signed_msg = msgs::FundingSigned { + channel_id: channel_id, + signature: signature, + }; + + let channel_ready_msg = msgs::ChannelReady { + channel_id: channel_id, + next_per_commitment_point: unkown_public_key, + short_channel_id_alias: None, + }; + + let announcement_signatures_msg = msgs::AnnouncementSignatures { + channel_id: channel_id, + short_channel_id: 0, + node_signature: signature, + bitcoin_signature: signature, + }; + + let channel_reestablish_msg = msgs::ChannelReestablish { + channel_id: channel_id, + next_local_commitment_number: 0, + next_remote_commitment_number: 0, + data_loss_protect: OptionalField::Absent, + }; + + let closing_signed_msg = msgs::ClosingSigned { + channel_id: channel_id, + fee_satoshis: 1000, + signature: signature, + fee_range: None, + }; + + let shutdown_msg = msgs::Shutdown { + channel_id: channel_id, + scriptpubkey: Script::new(), + }; + + let onion_routing_packet = msgs::OnionPacket { + version: 255, + public_key: Ok(unkown_public_key), + hop_data: [1; 20*65], + hmac: [2; 32] + }; + + let update_add_htlc_msg = msgs::UpdateAddHTLC { + channel_id: channel_id, + htlc_id: 0, + amount_msat: 1000000, + payment_hash: PaymentHash([1; 32]), + cltv_expiry: 821716, + onion_routing_packet + }; + + let commitment_signed_msg = msgs::CommitmentSigned { + channel_id: channel_id, + signature: signature, + htlc_signatures: Vec::new(), + }; + + let update_fee_msg = msgs::UpdateFee { + channel_id: channel_id, + feerate_per_kw: 1000, + }; + + let malformed_update_msg = msgs::UpdateFailMalformedHTLC{ + channel_id: channel_id, + htlc_id: 0, + sha256_of_onion: [1; 32], + failure_code: 0x8000, + }; + + let fulfill_update_msg = msgs::UpdateFulfillHTLC{ + channel_id: channel_id, + htlc_id: 0, + payment_preimage: PaymentPreimage([1; 32]), + }; + + let fail_update_msg = msgs::UpdateFailHTLC{ + channel_id: channel_id, + htlc_id: 0, + reason: msgs::OnionErrorPacket { data: Vec::new()}, + }; + + let revoke_and_ack_msg = msgs::RevokeAndACK { + channel_id: channel_id, + per_commitment_secret: [1; 32], + next_per_commitment_point: unkown_public_key, + }; + + // Test the API functions and message handlers. + check_not_connected_to_peer_error(nodes[0].node.create_channel(unkown_public_key, 1_000_000, 500_000_000, 42, None), unkown_public_key); + + nodes[1].node.handle_open_channel(&unkown_public_key, &open_channel_msg); + + nodes[0].node.handle_accept_channel(&unkown_public_key, &accept_channel_msg); + + check_unkown_peer_error(nodes[0].node.accept_inbound_channel(&open_channel_msg.temporary_channel_id, &unkown_public_key, 42), unkown_public_key); + + nodes[1].node.handle_funding_created(&unkown_public_key, &funding_created_msg); + + nodes[0].node.handle_funding_signed(&unkown_public_key, &funding_signed_msg); + + nodes[0].node.handle_channel_ready(&unkown_public_key, &channel_ready_msg); + + nodes[1].node.handle_announcement_signatures(&unkown_public_key, &announcement_signatures_msg); + + check_unkown_peer_error(nodes[0].node.close_channel(&channel_id, &unkown_public_key), unkown_public_key); + + check_unkown_peer_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &unkown_public_key), unkown_public_key); + + check_unkown_peer_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &unkown_public_key), unkown_public_key); + + check_unkown_peer_error(nodes[0].node.forward_intercepted_htlc(intercept_id, &channel_id, unkown_public_key, 1_000_000), unkown_public_key); + + check_unkown_peer_error(nodes[0].node.update_channel_config(&unkown_public_key, &[channel_id], &ChannelConfig::default()), unkown_public_key); + + nodes[0].node.handle_shutdown(&unkown_public_key, &shutdown_msg); + + nodes[1].node.handle_closing_signed(&unkown_public_key, &closing_signed_msg); + + nodes[0].node.handle_channel_reestablish(&unkown_public_key, &channel_reestablish_msg); + + nodes[1].node.handle_update_add_htlc(&unkown_public_key, &update_add_htlc_msg); + + nodes[1].node.handle_commitment_signed(&unkown_public_key, &commitment_signed_msg); + + nodes[1].node.handle_update_fail_malformed_htlc(&unkown_public_key, &malformed_update_msg); + + nodes[1].node.handle_update_fail_htlc(&unkown_public_key, &fail_update_msg); + + nodes[1].node.handle_update_fulfill_htlc(&unkown_public_key, &fulfill_update_msg); + + nodes[1].node.handle_revoke_and_ack(&unkown_public_key, &revoke_and_ack_msg); + + nodes[1].node.handle_update_fee(&unkown_public_key, &update_fee_msg); + } + + #[cfg(anchors)] + #[test] + fn test_anchors_zero_fee_htlc_tx_fallback() { + // Tests that if both nodes support anchors, but the remote node does not want to accept + // anchor channels at the moment, an error it sent to the local node such that it can retry + // the channel without the anchors feature. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let mut anchors_config = test_default_channel_config(); + anchors_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; + anchors_config.manually_accept_inbound_channels = true; + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(anchors_config.clone()), Some(anchors_config.clone())]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 0, None).unwrap(); + let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + assert!(open_channel_msg.channel_type.as_ref().unwrap().supports_anchors_zero_fee_htlc_tx()); + + nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg); + let events = nodes[1].node.get_and_clear_pending_events(); + match events[0] { + Event::OpenChannelRequest { temporary_channel_id, .. } => { + nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id()).unwrap(); + } + _ => panic!("Unexpected event"), + } + + let error_msg = get_err_msg!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &error_msg); + + let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + assert!(!open_channel_msg.channel_type.unwrap().supports_anchors_zero_fee_htlc_tx()); + + check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed); + } } #[cfg(all(any(test, feature = "_test_utils"), feature = "_bench_unstable"))] pub mod bench { use crate::chain::Listen; use crate::chain::chainmonitor::{ChainMonitor, Persist}; - use crate::chain::keysinterface::{EntropySource, KeysManager, KeysInterface, InMemorySigner}; + use crate::chain::keysinterface::{EntropySource, KeysManager, InMemorySigner}; use crate::ln::channelmanager::{self, BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage, PaymentId}; use crate::ln::functional_test_utils::*; use crate::ln::msgs::{ChannelMessageHandler, Init}; @@ -8140,7 +8568,7 @@ pub mod bench { &'a ChainMonitor, - &'a test_utils::TestBroadcaster, &'a KeysManager, + &'a test_utils::TestBroadcaster, &'a KeysManager, &'a KeysManager, &'a KeysManager, &'a test_utils::TestFeeEstimator, &'a test_utils::TestRouter<'a>, &'a test_utils::TestLogger>, } @@ -8161,7 +8589,8 @@ pub mod bench { let tx_broadcaster = test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new()), blocks: Arc::new(Mutex::new(Vec::new()))}; let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }; let logger_a = test_utils::TestLogger::with_id("node a".to_owned()); - let router = test_utils::TestRouter::new(Arc::new(NetworkGraph::new(genesis_hash, &logger_a))); + let scorer = Mutex::new(test_utils::TestScorer::new()); + let router = test_utils::TestRouter::new(Arc::new(NetworkGraph::new(genesis_hash, &logger_a)), &scorer); let mut config: UserConfig = Default::default(); config.channel_handshake_config.minimum_depth = 1; @@ -8169,7 +8598,7 @@ pub mod bench { let chain_monitor_a = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_a); let seed_a = [1u8; 32]; let keys_manager_a = KeysManager::new(&seed_a, 42, 42); - let node_a = ChannelManager::new(&fee_estimator, &chain_monitor_a, &tx_broadcaster, &router, &logger_a, &keys_manager_a, config.clone(), ChainParameters { + let node_a = ChannelManager::new(&fee_estimator, &chain_monitor_a, &tx_broadcaster, &router, &logger_a, &keys_manager_a, &keys_manager_a, &keys_manager_a, config.clone(), ChainParameters { network, best_block: BestBlock::from_genesis(network), }); @@ -8179,17 +8608,17 @@ pub mod bench { let chain_monitor_b = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_b); let seed_b = [2u8; 32]; let keys_manager_b = KeysManager::new(&seed_b, 42, 42); - let node_b = ChannelManager::new(&fee_estimator, &chain_monitor_b, &tx_broadcaster, &router, &logger_b, &keys_manager_b, config.clone(), ChainParameters { + let node_b = ChannelManager::new(&fee_estimator, &chain_monitor_b, &tx_broadcaster, &router, &logger_b, &keys_manager_b, &keys_manager_b, &keys_manager_b, config.clone(), ChainParameters { network, best_block: BestBlock::from_genesis(network), }); let node_b_holder = NodeHolder { node: &node_b }; - node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap(); - node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap(); + node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: node_b.init_features(), remote_network_address: None }).unwrap(); + node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: node_a.init_features(), remote_network_address: None }).unwrap(); node_a.create_channel(node_b.get_our_node_id(), 8_000_000, 100_000_000, 42, None).unwrap(); - node_b.handle_open_channel(&node_a.get_our_node_id(), channelmanager::provided_init_features(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id())); - node_a.handle_accept_channel(&node_b.get_our_node_id(), channelmanager::provided_init_features(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id())); + node_b.handle_open_channel(&node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id())); + node_a.handle_accept_channel(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id())); let tx; if let Event::FundingGenerationReady { temporary_channel_id, output_script, .. } = get_event!(node_a_holder, Event::FundingGenerationReady) { @@ -8250,9 +8679,9 @@ pub mod bench { macro_rules! send_payment { ($node_a: expr, $node_b: expr) => { let usable_channels = $node_a.list_usable_channels(); - let payment_params = PaymentParameters::from_node_id($node_b.get_our_node_id()) - .with_features(channelmanager::provided_invoice_features()); - let scorer = test_utils::TestScorer::with_penalty(0); + let payment_params = PaymentParameters::from_node_id($node_b.get_our_node_id(), TEST_FINAL_CLTV) + .with_features($node_b.invoice_features()); + let scorer = test_utils::TestScorer::new(); let seed = [3u8; 32]; let keys_manager = KeysManager::new(&seed, 42, 42); let random_seed_bytes = keys_manager.get_secure_random_bytes(); @@ -8263,7 +8692,7 @@ pub mod bench { payment_preimage.0[0..8].copy_from_slice(&payment_count.to_le_bytes()); payment_count += 1; let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner()); - let payment_secret = $node_b.create_inbound_payment_for_hash(payment_hash, None, 7200).unwrap(); + let payment_secret = $node_b.create_inbound_payment_for_hash(payment_hash, None, 7200, None).unwrap(); $node_a.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap(); let payment_event = SendEvent::from_event($node_a.get_and_clear_pending_msg_events().pop().unwrap());