X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=8620c3cc15fa7be8e4611d9102cc59770550f277;hb=5ce4bfc1f6369cde16a88f2d2af416fc32f71082;hp=84ea7f1a5fa13a9292cf44a9641e3c49fece9359;hpb=e2c5cb5b8ccf566f93f40752a8fad8354c28018e;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 84ea7f1a..8620c3cc 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -26,12 +26,10 @@ use bitcoin::network::constants::Network; use bitcoin::hashes::Hash; use bitcoin::hashes::sha256::Hash as Sha256; -use bitcoin::hashes::sha256d::Hash as Sha256dHash; use bitcoin::hash_types::{BlockHash, Txid}; use bitcoin::secp256k1::{SecretKey,PublicKey}; use bitcoin::secp256k1::Secp256k1; -use bitcoin::secp256k1::ecdh::SharedSecret; use bitcoin::{LockTime, secp256k1, Sequence}; use crate::chain; @@ -47,7 +45,7 @@ use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, No #[cfg(any(feature = "_test_utils", test))] use crate::ln::features::InvoiceFeatures; use crate::routing::gossip::NetworkGraph; -use crate::routing::router::{DefaultRouter, InFlightHtlcs, PaymentParameters, Route, RouteHop, RoutePath, Router}; +use crate::routing::router::{DefaultRouter, InFlightHtlcs, PaymentParameters, Route, RouteHop, RouteParameters, RoutePath, Router}; use crate::routing::scoring::ProbabilisticScorer; use crate::ln::msgs; use crate::ln::onion_utils; @@ -55,9 +53,9 @@ use crate::ln::onion_utils::HTLCFailReason; use crate::ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, MAX_VALUE_MSAT}; #[cfg(test)] use crate::ln::outbound_payment; -use crate::ln::outbound_payment::{OutboundPayments, PendingOutboundPayment}; +use crate::ln::outbound_payment::{OutboundPayments, PaymentAttempts, PendingOutboundPayment, Retry}; use crate::ln::wire::Encode; -use crate::chain::keysinterface::{EntropySource, KeysInterface, KeysManager, NodeSigner, Recipient, Sign, SignerProvider}; +use crate::chain::keysinterface::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider, ChannelSigner}; use crate::util::config::{UserConfig, ChannelConfig}; use crate::util::events::{Event, EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination}; use crate::util::events; @@ -72,7 +70,7 @@ use crate::prelude::*; use core::{cmp, mem}; use core::cell::RefCell; use crate::io::Read; -use crate::sync::{Arc, Mutex, MutexGuard, RwLock, RwLockReadGuard, FairRwLock}; +use crate::sync::{Arc, Mutex, RwLock, RwLockReadGuard, FairRwLock}; use core::sync::atomic::{AtomicUsize, Ordering}; use core::time::Duration; use core::ops::Deref; @@ -247,6 +245,10 @@ pub(crate) enum HTLCSource { first_hop_htlc_msat: u64, payment_id: PaymentId, payment_secret: Option, + /// Note that this is now "deprecated" - we write it for forwards (and read it for + /// backwards) compatibility reasons, but prefer to use the data in the + /// [`super::outbound_payment`] module, which stores per-payment data once instead of in + /// each HTLC. payment_params: Option, }, } @@ -291,6 +293,25 @@ struct ReceiveError { msg: &'static str, } +/// This enum is used to specify which error data to send to peers when failing back an HTLC +/// using [`ChannelManager::fail_htlc_backwards_with_reason`]. +/// +/// For more info on failure codes, see . +#[derive(Clone, Copy)] +pub enum FailureCode { + /// We had a temporary error processing the payment. Useful if no other error codes fit + /// and you want to indicate that the payer may want to retry. + TemporaryNodeFailure = 0x2000 | 2, + /// We have a required feature which was not in this onion. For example, you may require + /// some additional metadata that was not provided with this payment. + RequiredNodeFeatureMissing = 0x4000 | 0x2000 | 3, + /// You may wish to use this when a `payment_preimage` is unknown, or the CLTV expiry of + /// the HTLC is too close to the current block height for safe handling. + /// Using this failure code in [`ChannelManager::fail_htlc_backwards_with_reason`] is + /// equivalent to calling [`ChannelManager::fail_htlc_backwards`]. + IncorrectOrUnknownPaymentDetails = 0x4000 | 15, +} + type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>); /// Error type returned across the peer_state mutex boundary. When an Err is generated for a @@ -390,7 +411,7 @@ impl MsgHandleErrInternal { /// Event::PendingHTLCsForwardable for the API guidelines indicating how long should be waited). /// This provides some limited amount of privacy. Ideally this would range from somewhere like one /// second to 30 seconds, but people expect lightning to be, you know, kinda fast, sadly. -const MIN_HTLC_RELAY_HOLDING_CELL_MILLIS: u64 = 100; +pub(super) const MIN_HTLC_RELAY_HOLDING_CELL_MILLIS: u64 = 100; /// For events which result in both a RevokeAndACK and a CommitmentUpdate, by default they should /// be sent in the order they appear in the return value, however sometimes the order needs to be @@ -434,13 +455,6 @@ struct ClaimablePayments { pending_claiming_payments: HashMap, } -// Note this is only exposed in cfg(test): -pub(super) struct ChannelHolder { - /// Messages to send to peers - pushed to in the same lock that they are generated in (except - /// for broadcast messages, where ordering isn't as strict). - pub(super) pending_msg_events: Vec, -} - /// Events which we process internally but cannot be procsesed immediately at the generation site /// for some reason. They are handled in timer_tick_occurred, so may be processed with /// quite some time lag. @@ -461,7 +475,7 @@ pub(crate) enum MonitorUpdateCompletionAction { } /// State we hold per-peer. -pub(super) struct PeerState { +pub(super) struct PeerState { /// `temporary_channel_id` or `channel_id` -> `channel`. /// /// Holds all channels where the peer is the counterparty. Once a channel has been assigned a @@ -470,6 +484,9 @@ pub(super) struct PeerState { pub(super) channel_by_id: HashMap<[u8; 32], Channel>, /// The latest `InitFeatures` we heard from the peer. latest_features: InitFeatures, + /// Messages to send to the peer - pushed to in the same lock that they are generated in (except + /// for broadcast messages, where ordering isn't as strict). + pub(super) pending_msg_events: Vec, } /// Stores a PaymentSecret and any other data we may need to validate an inbound payment is @@ -506,6 +523,8 @@ pub type SimpleArcChannelManager = ChannelManager< Arc, Arc, Arc, + Arc, + Arc, Arc, Arc>>, @@ -525,7 +544,7 @@ pub type SimpleArcChannelManager = ChannelManager< /// type alias chooses the concrete types of KeysManager and DefaultRouter. /// /// (C-not exported) as Arcs don't make sense in bindings -pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = ChannelManager<&'a M, &'b T, &'c KeysManager, &'d F, &'e DefaultRouter<&'f NetworkGraph<&'g L>, &'g L, &'h Mutex, &'g L>>>, &'g L>; +pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = ChannelManager<&'a M, &'b T, &'c KeysManager, &'c KeysManager, &'c KeysManager, &'d F, &'e DefaultRouter<&'f NetworkGraph<&'g L>, &'g L, &'h Mutex, &'g L>>>, &'g L>; /// Manager which keeps track of a number of channels and sends messages to the appropriate /// channel, also tracking HTLC preimages and forwarding onion packets appropriately. @@ -579,35 +598,35 @@ pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = C // | | // | |__`pending_intercepted_htlcs` // | -// |__`pending_inbound_payments` +// |__`per_peer_state` // | | -// | |__`claimable_payments` -// | | -// | |__`pending_outbound_payments` // This field's struct contains a map of pending outbounds +// | |__`pending_inbound_payments` +// | | +// | |__`claimable_payments` // | | -// | |__`channel_state` +// | |__`pending_outbound_payments` // This field's struct contains a map of pending outbounds // | | -// | |__`per_peer_state` +// | |__`peer_state` // | | -// | |__`peer_state` -// | | -// | |__`id_to_peer` -// | | -// | |__`short_to_chan_info` -// | | -// | |__`outbound_scid_aliases` -// | | -// | |__`best_block` +// | |__`id_to_peer` +// | | +// | |__`short_to_chan_info` +// | | +// | |__`outbound_scid_aliases` +// | | +// | |__`best_block` +// | | +// | |__`pending_events` // | | -// | |__`pending_events` -// | | -// | |__`pending_background_events` +// | |__`pending_background_events` // -pub struct ChannelManager +pub struct ChannelManager where - M::Target: chain::Watch<::Signer>, + M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + ES::Target: EntropySource, + NS::Target: NodeSigner, + SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, L::Target: Logger, @@ -627,12 +646,6 @@ where best_block: RwLock, secp_ctx: Secp256k1, - /// See `ChannelManager` struct-level documentation for lock order requirements. - #[cfg(any(test, feature = "_test_utils"))] - pub(super) channel_state: Mutex, - #[cfg(not(any(test, feature = "_test_utils")))] - channel_state: Mutex, - /// Storage for PaymentSecrets and any requirements on future inbound payments before we will /// expose them to users via a PaymentClaimable event. HTLCs which do not meet the requirements /// here are failed when we process them as pending-forwardable-HTLCs, and entries are removed @@ -729,7 +742,6 @@ where #[cfg(not(test))] short_to_chan_info: FairRwLock>, - our_network_key: SecretKey, our_network_pubkey: PublicKey, inbound_payment_key: inbound_payment::ExpandedKey, @@ -766,9 +778,9 @@ where /// /// See `ChannelManager` struct-level documentation for lock order requirements. #[cfg(not(any(test, feature = "_test_utils")))] - per_peer_state: FairRwLock::Signer>>>>, + per_peer_state: FairRwLock::Signer>>>>, #[cfg(any(test, feature = "_test_utils"))] - pub(super) per_peer_state: FairRwLock::Signer>>>>, + pub(super) per_peer_state: FairRwLock::Signer>>>>, /// See `ChannelManager` struct-level documentation for lock order requirements. pending_events: Mutex>, @@ -784,7 +796,9 @@ where persistence_notifier: Notifier, - keys_manager: K, + entropy_source: ES, + node_signer: NS, + signer_provider: SP, logger: L, } @@ -883,12 +897,12 @@ pub const MIN_CLTV_EXPIRY_DELTA: u16 = 6*7; pub(super) const CLTV_FAR_FAR_AWAY: u32 = 14 * 24 * 6; /// Minimum CLTV difference between the current block height and received inbound payments. -/// Invoices generated for payment to us must set their `min_final_cltv_expiry` field to at least +/// Invoices generated for payment to us must set their `min_final_cltv_expiry_delta` field to at least /// this value. // Note that we fail if exactly HTLC_FAIL_BACK_BUFFER + 1 was used, so we need to add one for // any payments to succeed. Further, we don't want payments to fail if a block was found while // a payment was being routed, so we add an extra block to be safe. -pub const MIN_FINAL_CLTV_EXPIRY: u32 = HTLC_FAIL_BACK_BUFFER + 3; +pub const MIN_FINAL_CLTV_EXPIRY_DELTA: u16 = HTLC_FAIL_BACK_BUFFER as u16 + 3; // Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + ANTI_REORG_DELAY + LATENCY_GRACE_PERIOD_BLOCKS, // ie that if the next-hop peer fails the HTLC within @@ -1159,12 +1173,12 @@ macro_rules! handle_error { match $internal { Ok(msg) => Ok(msg), Err(MsgHandleErrInternal { err, chan_id, shutdown_finish }) => { - #[cfg(debug_assertions)] + #[cfg(any(feature = "_test_utils", test))] { // In testing, ensure there are no deadlocks where the lock is already held upon // entering the macro. - assert!($self.channel_state.try_lock().is_ok()); - assert!($self.pending_events.try_lock().is_ok()); + debug_assert!($self.pending_events.try_lock().is_ok()); + debug_assert!($self.per_peer_state.try_write().is_ok()); } let mut msg_events = Vec::with_capacity(2); @@ -1194,7 +1208,31 @@ macro_rules! handle_error { } if !msg_events.is_empty() { - $self.channel_state.lock().unwrap().pending_msg_events.append(&mut msg_events); + let per_peer_state = $self.per_peer_state.read().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(&$counterparty_node_id) { + let mut peer_state = peer_state_mutex.lock().unwrap(); + peer_state.pending_msg_events.append(&mut msg_events); + } + #[cfg(any(feature = "_test_utils", test))] + { + if let None = per_peer_state.get(&$counterparty_node_id) { + // This shouldn't occour in tests unless an unkown counterparty_node_id + // has been passed to our message handling functions. + let expected_error_str = format!("Can't find a peer matching the passed counterparty node_id {}", $counterparty_node_id); + match err.action { + msgs::ErrorAction::SendErrorMessage { + msg: msgs::ErrorMessage { ref channel_id, ref data } + } + => { + assert_eq!(*data, expected_error_str); + if let Some((err_channel_id, _user_channel_id)) = chan_id { + debug_assert_eq!(*channel_id, err_channel_id); + } + } + _ => debug_assert!(false, "Unexpected event"), + } + } + } } // Return error in case higher-API need one @@ -1394,11 +1432,13 @@ macro_rules! emit_channel_ready_event { } } -impl ChannelManager +impl ChannelManager where - M::Target: chain::Watch<::Signer>, + M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + ES::Target: EntropySource, + NS::Target: NodeSigner, + SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, L::Target: Logger, @@ -1413,10 +1453,10 @@ where /// Users need to notify the new ChannelManager when a new block is connected or /// disconnected using its `block_connected` and `block_disconnected` methods, starting /// from after `params.latest_hash`. - pub fn new(fee_est: F, chain_monitor: M, tx_broadcaster: T, router: R, logger: L, keys_manager: K, config: UserConfig, params: ChainParameters) -> Self { + pub fn new(fee_est: F, chain_monitor: M, tx_broadcaster: T, router: R, logger: L, entropy_source: ES, node_signer: NS, signer_provider: SP, config: UserConfig, params: ChainParameters) -> Self { let mut secp_ctx = Secp256k1::new(); - secp_ctx.seeded_randomize(&keys_manager.get_secure_random_bytes()); - let inbound_pmt_key_material = keys_manager.get_inbound_payment_key_material(); + secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes()); + let inbound_pmt_key_material = node_signer.get_inbound_payment_key_material(); let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material); ChannelManager { default_configuration: config.clone(), @@ -1428,9 +1468,6 @@ where best_block: RwLock::new(params.best_block), - channel_state: Mutex::new(ChannelHolder{ - pending_msg_events: Vec::new(), - }), outbound_scid_aliases: Mutex::new(HashSet::new()), pending_inbound_payments: Mutex::new(HashMap::new()), pending_outbound_payments: OutboundPayments::new(), @@ -1440,14 +1477,13 @@ where id_to_peer: Mutex::new(HashMap::new()), short_to_chan_info: FairRwLock::new(HashMap::new()), - our_network_key: keys_manager.get_node_secret(Recipient::Node).unwrap(), - our_network_pubkey: PublicKey::from_secret_key(&secp_ctx, &keys_manager.get_node_secret(Recipient::Node).unwrap()), + our_network_pubkey: node_signer.get_node_id(Recipient::Node).unwrap(), secp_ctx, inbound_payment_key: expanded_inbound_key, - fake_scid_rand_bytes: keys_manager.get_secure_random_bytes(), + fake_scid_rand_bytes: entropy_source.get_secure_random_bytes(), - probing_cookie_secret: keys_manager.get_secure_random_bytes(), + probing_cookie_secret: entropy_source.get_secure_random_bytes(), highest_seen_timestamp: AtomicUsize::new(0), @@ -1458,7 +1494,9 @@ where total_consistency_lock: RwLock::new(()), persistence_notifier: Notifier::new(), - keys_manager, + entropy_source, + node_signer, + signer_provider, logger, } @@ -1477,7 +1515,7 @@ where if cfg!(fuzzing) { // fuzzing chacha20 doesn't use the key at all so we always get the same alias outbound_scid_alias += 1; } else { - outbound_scid_alias = fake_scid::Namespace::OutboundAlias.get_fake_scid(height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.keys_manager); + outbound_scid_alias = fake_scid::Namespace::OutboundAlias.get_fake_scid(height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.entropy_source); } if outbound_scid_alias != 0 && self.outbound_scid_aliases.lock().unwrap().insert(outbound_scid_alias) { break; @@ -1522,7 +1560,6 @@ where // We want to make sure the lock is actually acquired by PersistenceNotifierGuard. debug_assert!(&self.total_consistency_lock.try_write().is_err()); - let mut channel_state = self.channel_state.lock().unwrap(); let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex_opt = per_peer_state.get(&their_network_key); @@ -1535,7 +1572,7 @@ where let outbound_scid_alias = self.create_and_insert_outbound_scid_alias(); let their_features = &peer_state.latest_features; let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration }; - match Channel::new_outbound(&self.fee_estimator, &self.keys_manager, their_network_key, + match Channel::new_outbound(&self.fee_estimator, &self.entropy_source, &self.signer_provider, their_network_key, their_features, channel_value_satoshis, push_msat, user_channel_id, config, self.best_block.read().unwrap().height(), outbound_scid_alias) { @@ -1560,14 +1597,14 @@ where hash_map::Entry::Vacant(entry) => { entry.insert(channel); } } - channel_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { node_id: their_network_key, msg: res, }); Ok(temporary_channel_id) } - fn list_channels_with_filter::Signer>)) -> bool + Copy>(&self, f: Fn) -> Vec { + fn list_channels_with_filter::Signer>)) -> bool + Copy>(&self, f: Fn) -> Vec { let mut res = Vec::new(); // Allocate our best estimate of the number of channels we have in the `res` // Vec. Sadly the `short_to_chan_info` map doesn't cover channels without @@ -1655,7 +1692,7 @@ where } /// Helper function that issues the channel close events - fn issue_channel_close_events(&self, channel: &Channel<::Signer>, closure_reason: ClosureReason) { + fn issue_channel_close_events(&self, channel: &Channel<::Signer>, closure_reason: ClosureReason) { let mut pending_events_lock = self.pending_events.lock().unwrap(); match channel.unbroadcasted_funding() { Some(transaction) => { @@ -1675,8 +1712,6 @@ where let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>; let result: Result<(), _> = loop { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); @@ -1688,12 +1723,12 @@ where let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(channel_id.clone()) { hash_map::Entry::Occupied(mut chan_entry) => { - let (shutdown_msg, monitor_update, htlcs) = chan_entry.get_mut().get_shutdown(&self.keys_manager, &peer_state.latest_features, target_feerate_sats_per_1000_weight)?; + let (shutdown_msg, monitor_update, htlcs) = chan_entry.get_mut().get_shutdown(&self.signer_provider, &peer_state.latest_features, target_feerate_sats_per_1000_weight)?; failed_htlcs = htlcs; // Update the monitor with the shutdown script if necessary. if let Some(monitor_update) = monitor_update { - let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update); + let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), &monitor_update); let (result, is_permanent) = handle_monitor_update_res!(self, update_res, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE); if is_permanent { @@ -1702,7 +1737,7 @@ where } } - channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { node_id: *counterparty_node_id, msg: shutdown_msg }); @@ -1710,7 +1745,7 @@ where if chan_entry.get().is_shutdown() { let channel = remove_channel!(self, chan_entry); if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) { - channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: channel_update }); } @@ -1791,7 +1826,7 @@ where // force-closing. The monitor update on the required in-memory copy should broadcast // the latest local state, which is the best we can do anyway. Thus, it is safe to // ignore the result here. - let _ = self.chain_monitor.update_channel(funding_txo, monitor_update); + let _ = self.chain_monitor.update_channel(funding_txo, &monitor_update); } } @@ -1799,13 +1834,13 @@ where /// user closes, which will be re-exposed as the `ChannelClosed` reason. fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: &PublicKey, peer_msg: Option<&String>, broadcast: bool) -> Result { + let per_peer_state = self.per_peer_state.read().unwrap(); + let peer_state_mutex_opt = per_peer_state.get(peer_node_id); let mut chan = { - let per_peer_state = self.per_peer_state.read().unwrap(); - if let None = per_peer_state.get(peer_node_id) { + if let None = peer_state_mutex_opt { return Err(APIError::APIMisuseError{ err: format!("Can't find a peer matching the passed counterparty node_id {}", peer_node_id) }); } - let peer_state_mutex = per_peer_state.get(peer_node_id).unwrap(); - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); let peer_state = &mut *peer_state_lock; if let hash_map::Entry::Occupied(chan) = peer_state.channel_by_id.entry(channel_id.clone()) { if let Some(peer_msg) = peer_msg { @@ -1821,8 +1856,8 @@ where log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..])); self.finish_force_close_channel(chan.force_shutdown(broadcast)); if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { - let mut channel_state = self.channel_state.lock().unwrap(); - channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap(); + peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); } @@ -1834,14 +1869,18 @@ where let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); match self.force_close_channel_with_peer(channel_id, counterparty_node_id, None, broadcast) { Ok(counterparty_node_id) => { - self.channel_state.lock().unwrap().pending_msg_events.push( - events::MessageSendEvent::HandleError { - node_id: counterparty_node_id, - action: msgs::ErrorAction::SendErrorMessage { - msg: msgs::ErrorMessage { channel_id: *channel_id, data: "Channel force-closed".to_owned() } - }, - } - ); + let per_peer_state = self.per_peer_state.read().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { + let mut peer_state = peer_state_mutex.lock().unwrap(); + peer_state.pending_msg_events.push( + events::MessageSendEvent::HandleError { + node_id: counterparty_node_id, + action: msgs::ErrorAction::SendErrorMessage { + msg: msgs::ErrorMessage { channel_id: *channel_id, data: "Channel force-closed".to_owned() } + }, + } + ); + } Ok(()) }, Err(e) => Err(e) @@ -1898,6 +1937,7 @@ where // final_expiry_too_soon // We have to have some headroom to broadcast on chain if we have the preimage, so make sure // we have at least HTLC_FAIL_BACK_BUFFER blocks to go. + // // Also, ensure that, in the case of an unknown preimage for the received payment hash, our // payment logic has enough time to fail the HTLC backward before our onchain logic triggers a // channel closure (see HTLC_FAIL_BACK_BUFFER rationale). @@ -1997,7 +2037,9 @@ where return_malformed_err!("invalid ephemeral pubkey", 0x8000 | 0x4000 | 6); } - let shared_secret = SharedSecret::new(&msg.onion_routing_packet.public_key.unwrap(), &self.our_network_key).secret_bytes(); + let shared_secret = self.node_signer.ecdh( + Recipient::Node, &msg.onion_routing_packet.public_key.unwrap(), None + ).unwrap().secret_bytes(); if msg.onion_routing_packet.version != 0 { //TODO: Spec doesn't indicate if we should only hash hop_data here (and in other @@ -2082,7 +2124,7 @@ where // short_channel_id is non-0 in any ::Forward. if let &PendingHTLCRouting::Forward { ref short_channel_id, .. } = routing { if let Some((err, mut code, chan_update)) = loop { - let id_option = self.short_to_chan_info.read().unwrap().get(&short_channel_id).cloned(); + let id_option = self.short_to_chan_info.read().unwrap().get(short_channel_id).cloned(); let forwarding_chan_info_opt = match id_option { None => { // unknown_next_peer // Note that this is likely a timing oracle for detecting whether an scid is a @@ -2216,7 +2258,7 @@ where /// [`MessageSendEvent::BroadcastChannelUpdate`] event. /// /// May be called with peer_state already locked! - fn get_channel_update_for_broadcast(&self, chan: &Channel<::Signer>) -> Result { + fn get_channel_update_for_broadcast(&self, chan: &Channel<::Signer>) -> Result { if !chan.should_announce() { return Err(LightningError { err: "Cannot broadcast a channel_update for a private channel".to_owned(), @@ -2235,7 +2277,7 @@ where /// and thus MUST NOT be called unless the recipient of the resulting message has already /// provided evidence that they know about the existence of the channel. /// May be called with peer_state already locked! - fn get_channel_update_for_unicast(&self, chan: &Channel<::Signer>) -> Result { + fn get_channel_update_for_unicast(&self, chan: &Channel<::Signer>) -> Result { log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.channel_id())); let short_channel_id = match chan.get_short_channel_id().or(chan.latest_inbound_scid_alias()) { None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}), @@ -2244,9 +2286,9 @@ where self.get_channel_update_for_onion(short_channel_id, chan) } - fn get_channel_update_for_onion(&self, short_channel_id: u64, chan: &Channel<::Signer>) -> Result { + fn get_channel_update_for_onion(&self, short_channel_id: u64, chan: &Channel<::Signer>) -> Result { log_trace!(self.logger, "Generating channel update for channel {}", log_bytes!(chan.channel_id())); - let were_node_one = PublicKey::from_secret_key(&self.secp_ctx, &self.our_network_key).serialize()[..] < chan.get_counterparty_node_id().serialize()[..]; + let were_node_one = self.our_network_pubkey.serialize()[..] < chan.get_counterparty_node_id().serialize()[..]; let unsigned = msgs::UnsignedChannelUpdate { chain_hash: self.genesis_hash, @@ -2260,9 +2302,11 @@ where fee_proportional_millionths: chan.get_fee_proportional_millionths(), excess_data: Vec::new(), }; - - let msg_hash = Sha256dHash::hash(&unsigned.encode()[..]); - let sig = self.secp_ctx.sign_ecdsa(&hash_to_message!(&msg_hash[..]), &self.our_network_key); + // Panic on failure to signal LDK should be restarted to retry signing the `ChannelUpdate`. + // If we returned an error and the `node_signer` cannot provide a signature for whatever + // reason`, we wouldn't be able to receive inbound payments through the corresponding + // channel. + let sig = self.node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelUpdate(&unsigned)).unwrap(); Ok(msgs::ChannelUpdate { signature: sig, @@ -2273,7 +2317,7 @@ where // Only public for testing, this should otherwise never be called direcly pub(crate) fn send_payment_along_path(&self, path: &Vec, payment_params: &Option, payment_hash: &PaymentHash, payment_secret: &Option, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option, session_priv_bytes: [u8; 32]) -> Result<(), APIError> { log_trace!(self.logger, "Attempting to send payment for path with next hop {}", path.first().unwrap().short_channel_id); - let prng_seed = self.keys_manager.get_secure_random_bytes(); + let prng_seed = self.entropy_source.get_secure_random_bytes(); let session_priv = SecretKey::from_slice(&session_priv_bytes[..]).expect("RNG is busted"); let onion_keys = onion_utils::construct_onion_keys(&self.secp_ctx, &path, &session_priv) @@ -2292,8 +2336,6 @@ where Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()), }; - let mut channel_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_lock; let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id); if let None = peer_state_mutex_opt { @@ -2318,7 +2360,7 @@ where chan) } { Some((update_add, commitment_signed, monitor_update)) => { - let update_err = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update); + let update_err = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &monitor_update); let chan_id = chan.get().channel_id(); match (update_err, handle_monitor_update_res!(self, update_err, chan, @@ -2339,7 +2381,7 @@ where } log_debug!(self.logger, "Sending payment along path resulted in a commitment_signed for channel {}", log_bytes!(chan_id)); - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + peer_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { node_id: path.first().unwrap().pubkey, updates: msgs::CommitmentUpdate { update_add_htlcs: vec![update_add], @@ -2423,7 +2465,19 @@ where pub fn send_payment(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option, payment_id: PaymentId) -> Result<(), PaymentSendFailure> { let best_block_height = self.best_block.read().unwrap().height(); self.pending_outbound_payments - .send_payment_with_route(route, payment_hash, payment_secret, payment_id, &self.keys_manager, best_block_height, + .send_payment_with_route(route, payment_hash, payment_secret, payment_id, &self.entropy_source, &self.node_signer, best_block_height, + |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| + self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) + } + + /// Similar to [`ChannelManager::send_payment`], but will automatically find a route based on + /// `route_params` and retry failed payment paths based on `retry_strategy`. + pub fn send_payment_with_retry(&self, payment_hash: PaymentHash, payment_secret: &Option, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result<(), PaymentSendFailure> { + let best_block_height = self.best_block.read().unwrap().height(); + self.pending_outbound_payments + .send_payment(payment_hash, payment_secret, payment_id, retry_strategy, route_params, + &self.router, self.list_usable_channels(), self.compute_inflight_htlcs(), + &self.entropy_source, &self.node_signer, best_block_height, |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) } @@ -2431,7 +2485,7 @@ where #[cfg(test)] fn test_send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option, keysend_preimage: Option, payment_id: PaymentId, recv_value_msat: Option, onion_session_privs: Vec<[u8; 32]>) -> Result<(), PaymentSendFailure> { let best_block_height = self.best_block.read().unwrap().height(); - self.pending_outbound_payments.test_send_payment_internal(route, payment_hash, payment_secret, keysend_preimage, payment_id, recv_value_msat, onion_session_privs, &self.keys_manager, best_block_height, + self.pending_outbound_payments.test_send_payment_internal(route, payment_hash, payment_secret, keysend_preimage, payment_id, recv_value_msat, onion_session_privs, &self.node_signer, best_block_height, |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) } @@ -2439,7 +2493,7 @@ where #[cfg(test)] pub(crate) fn test_add_new_pending_payment(&self, payment_hash: PaymentHash, payment_secret: Option, payment_id: PaymentId, route: &Route) -> Result, PaymentSendFailure> { let best_block_height = self.best_block.read().unwrap().height(); - self.pending_outbound_payments.test_add_new_pending_payment(payment_hash, payment_secret, payment_id, route, &self.keys_manager, best_block_height) + self.pending_outbound_payments.test_add_new_pending_payment(payment_hash, payment_secret, payment_id, route, Retry::Attempts(0), &self.entropy_source, best_block_height) } @@ -2455,7 +2509,7 @@ where /// [`abandon_payment`]: [`ChannelManager::abandon_payment`] pub fn retry_payment(&self, route: &Route, payment_id: PaymentId) -> Result<(), PaymentSendFailure> { let best_block_height = self.best_block.read().unwrap().height(); - self.pending_outbound_payments.retry_payment_with_route(route, payment_id, &self.keys_manager, best_block_height, + self.pending_outbound_payments.retry_payment_with_route(route, payment_id, &self.entropy_source, &self.node_signer, best_block_height, |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) } @@ -2504,7 +2558,7 @@ where /// [`send_payment`]: Self::send_payment pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option, payment_id: PaymentId) -> Result { let best_block_height = self.best_block.read().unwrap().height(); - self.pending_outbound_payments.send_spontaneous_payment(route, payment_preimage, payment_id, &self.keys_manager, best_block_height, + self.pending_outbound_payments.send_spontaneous_payment(route, payment_preimage, payment_id, &self.entropy_source, &self.node_signer, best_block_height, |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) } @@ -2514,7 +2568,7 @@ where /// us to easily discern them from real payments. pub fn send_probe(&self, hops: Vec) -> Result<(PaymentHash, PaymentId), PaymentSendFailure> { let best_block_height = self.best_block.read().unwrap().height(); - self.pending_outbound_payments.send_probe(hops, self.probing_cookie_secret, &self.keys_manager, best_block_height, + self.pending_outbound_payments.send_probe(hops, self.probing_cookie_secret, &self.entropy_source, &self.node_signer, best_block_height, |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) } @@ -2528,14 +2582,13 @@ where /// Handles the generation of a funding transaction, optionally (for tests) with a function /// which checks the correctness of the funding transaction given the associated channel. - fn funding_transaction_generated_intern::Signer>, &Transaction) -> Result>( + fn funding_transaction_generated_intern::Signer>, &Transaction) -> Result>( &self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, funding_transaction: Transaction, find_funding_output: FundingOutput ) -> Result<(), APIError> { - let mut channel_state = self.channel_state.lock().unwrap(); let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); if let None = peer_state_mutex_opt { - return Err(APIError::APIMisuseError { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) }) + return Err(APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) }) } let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); @@ -2565,11 +2618,10 @@ where } }; - channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingCreated { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingCreated { node_id: chan.get_counterparty_node_id(), msg, }); - mem::drop(channel_state); match peer_state.channel_by_id.entry(chan.channel_id()) { hash_map::Entry::Occupied(_) => { panic!("Generated duplicate funding txid?"); @@ -2704,8 +2756,6 @@ where let _persistence_guard = PersistenceNotifierGuard::notify_on_drop( &self.total_consistency_lock, &self.persistence_notifier, ); - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); if let None = peer_state_mutex_opt { @@ -2726,9 +2776,9 @@ where continue; } if let Ok(msg) = self.get_channel_update_for_broadcast(channel) { - channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg }); + peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg }); } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { node_id: channel.get_counterparty_node_id(), msg, }); @@ -2909,9 +2959,9 @@ where } } if let PendingHTLCRouting::Forward { onion_packet, .. } = routing { - let phantom_secret_res = self.keys_manager.get_node_secret(Recipient::PhantomNode); - if phantom_secret_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id, &self.genesis_hash) { - let phantom_shared_secret = SharedSecret::new(&onion_packet.public_key.unwrap(), &phantom_secret_res.unwrap()).secret_bytes(); + let phantom_pubkey_res = self.node_signer.get_node_id(Recipient::PhantomNode); + if phantom_pubkey_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id, &self.genesis_hash) { + let phantom_shared_secret = self.node_signer.ecdh(Recipient::PhantomNode, &onion_packet.public_key.unwrap(), None).unwrap().secret_bytes(); let next_hop = match onion_utils::decode_next_payment_hop(phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac, payment_hash) { Ok(res) => res, Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => { @@ -3087,7 +3137,7 @@ where let phantom_shared_secret = claimable_htlc.prev_hop.phantom_shared_secret; let mut receiver_node_id = self.our_network_pubkey; if phantom_shared_secret.is_some() { - receiver_node_id = self.keys_manager.get_node_id(Recipient::PhantomNode) + receiver_node_id = self.node_signer.get_node_id(Recipient::PhantomNode) .expect("Failed to get node_id for phantom node recipient"); } @@ -3167,13 +3217,23 @@ where match claimable_htlc.onion_payload { OnionPayload::Invoice { .. } => { let payment_data = payment_data.unwrap(); - let payment_preimage = match inbound_payment::verify(payment_hash, &payment_data, self.highest_seen_timestamp.load(Ordering::Acquire) as u64, &self.inbound_payment_key, &self.logger) { - Ok(payment_preimage) => payment_preimage, + let (payment_preimage, min_final_cltv_expiry_delta) = match inbound_payment::verify(payment_hash, &payment_data, self.highest_seen_timestamp.load(Ordering::Acquire) as u64, &self.inbound_payment_key, &self.logger) { + Ok(result) => result, Err(()) => { + log_trace!(self.logger, "Failing new HTLC with payment_hash {} as payment verification failed", log_bytes!(payment_hash.0)); fail_htlc!(claimable_htlc, payment_hash); continue } }; + if let Some(min_final_cltv_expiry_delta) = min_final_cltv_expiry_delta { + let expected_min_expiry_height = (self.current_best_block().height() + min_final_cltv_expiry_delta as u32) as u64; + if (cltv_expiry as u64) < expected_min_expiry_height { + log_trace!(self.logger, "Failing new HTLC with payment_hash {} as its CLTV expiry was too soon (had {}, earliest expected {})", + log_bytes!(payment_hash.0), cltv_expiry, expected_min_expiry_height); + fail_htlc!(claimable_htlc, payment_hash); + continue; + } + } check_total_value!(payment_data, payment_preimage); }, OnionPayload::Spontaneous(preimage) => { @@ -3236,6 +3296,12 @@ where } } + let best_block_height = self.best_block.read().unwrap().height(); + self.pending_outbound_payments.check_retry_payments(&self.router, || self.list_usable_channels(), + || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height, &self.logger, + |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| + self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)); + for (htlc_source, payment_hash, failure_reason, destination) in failed_forwards.drain(..) { self.fail_htlc_backwards_internal(&htlc_source, &payment_hash, &failure_reason, destination); } @@ -3270,7 +3336,7 @@ where BackgroundEvent::ClosingMonitorUpdate((funding_txo, update)) => { // The channel has already been closed, so no use bothering to care about the // monitor updating completing. - let _ = self.chain_monitor.update_channel(funding_txo, update); + let _ = self.chain_monitor.update_channel(funding_txo, &update); }, } } @@ -3283,7 +3349,7 @@ where self.process_background_events(); } - fn update_channel_fee(&self, chan_id: &[u8; 32], chan: &mut Channel<::Signer>, new_feerate: u32) -> NotifyOption { + fn update_channel_fee(&self, chan_id: &[u8; 32], chan: &mut Channel<::Signer>, new_feerate: u32) -> NotifyOption { if !chan.is_outbound() { return NotifyOption::SkipPersist; } // If the feerate has decreased by less than half, don't bother if new_feerate <= chan.get_feerate() && new_feerate * 2 > chan.get_feerate() { @@ -3350,13 +3416,11 @@ where let mut handle_errors: Vec<(Result<(), _>, _)> = Vec::new(); let mut timed_out_mpp_htlcs = Vec::new(); { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; - let pending_msg_events = &mut channel_state.pending_msg_events; let per_peer_state = self.per_peer_state.read().unwrap(); for (counterparty_node_id, peer_state_mutex) in per_peer_state.iter() { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; + let pending_msg_events = &mut peer_state.pending_msg_events; peer_state.channel_by_id.retain(|chan_id, chan| { let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate); if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } @@ -3460,27 +3524,46 @@ where /// [`events::Event::PaymentClaimed`] events even for payments you intend to fail, especially on /// startup during which time claims that were in-progress at shutdown may be replayed. pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) { + self.fail_htlc_backwards_with_reason(payment_hash, &FailureCode::IncorrectOrUnknownPaymentDetails); + } + + /// This is a variant of [`ChannelManager::fail_htlc_backwards`] that allows you to specify the + /// reason for the failure. + /// + /// See [`FailureCode`] for valid failure codes. + pub fn fail_htlc_backwards_with_reason(&self, payment_hash: &PaymentHash, failure_code: &FailureCode) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); let removed_source = self.claimable_payments.lock().unwrap().claimable_htlcs.remove(payment_hash); if let Some((_, mut sources)) = removed_source { for htlc in sources.drain(..) { - let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec(); - htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height().to_be_bytes()); + let reason = self.get_htlc_fail_reason_from_failure_code(failure_code, &htlc); let source = HTLCSource::PreviousHopData(htlc.prev_hop); - let reason = HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data); let receiver = HTLCDestination::FailedPayment { payment_hash: *payment_hash }; self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } } } + /// Gets error data to form an [`HTLCFailReason`] given a [`FailureCode`] and [`ClaimableHTLC`]. + fn get_htlc_fail_reason_from_failure_code(&self, failure_code: &FailureCode, htlc: &ClaimableHTLC) -> HTLCFailReason { + match failure_code { + FailureCode::TemporaryNodeFailure => HTLCFailReason::from_failure_code(*failure_code as u16), + FailureCode::RequiredNodeFeatureMissing => HTLCFailReason::from_failure_code(*failure_code as u16), + FailureCode::IncorrectOrUnknownPaymentDetails => { + let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec(); + htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height().to_be_bytes()); + HTLCFailReason::reason(*failure_code as u16, htlc_msat_height_data) + } + } + } + /// Gets an HTLC onion failure code and error data for an `UPDATE` error, given the error code /// that we want to return and a channel. /// /// This is for failures on the channel on which the HTLC was *received*, not failures /// forwarding - fn get_htlc_inbound_temp_fail_err_and_data(&self, desired_err_code: u16, chan: &Channel<::Signer>) -> (u16, Vec) { + fn get_htlc_inbound_temp_fail_err_and_data(&self, desired_err_code: u16, chan: &Channel<::Signer>) -> (u16, Vec) { // We can't be sure what SCID was used when relaying inbound towards us, so we have to // guess somewhat. If its a public channel, we figure best to just use the real SCID (as // we're not leaking that we have a channel with the counterparty), otherwise we try to use @@ -3500,7 +3583,7 @@ where /// Gets an HTLC onion failure code and error data for an `UPDATE` error, given the error code /// that we want to return and a channel. - fn get_htlc_temp_fail_err_and_data(&self, desired_err_code: u16, scid: u64, chan: &Channel<::Signer>) -> (u16, Vec) { + fn get_htlc_temp_fail_err_and_data(&self, desired_err_code: u16, scid: u64, chan: &Channel<::Signer>) -> (u16, Vec) { debug_assert_eq!(desired_err_code & 0x1000, 0x1000); if let Ok(upd) = self.get_channel_update_for_onion(scid, chan) { let mut enc = VecWriter(Vec::with_capacity(upd.serialized_length() + 6)); @@ -3553,15 +3636,17 @@ where /// Fails an HTLC backwards to the sender of it to us. /// Note that we do not assume that channels corresponding to failed HTLCs are still available. fn fail_htlc_backwards_internal(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) { - #[cfg(debug_assertions)] + #[cfg(any(feature = "_test_utils", test))] { - // Ensure that the `channel_state` and no peer state channel storage lock is not held - // when calling this function. + // Ensure that no peer state channel storage lock is not held when calling this + // function. // This ensures that future code doesn't introduce a lock_order requirement for - // `forward_htlcs` to be locked after the `channel_state` and `per_peer_state` locks, - // which calling this function with the locks aquired would. - assert!(self.channel_state.try_lock().is_ok()); - assert!(self.per_peer_state.try_write().is_ok()); + // `forward_htlcs` to be locked after the `per_peer_state` peer locks, which calling + // this function with any `per_peer_state` peer lock aquired would. + let per_peer_state = self.per_peer_state.read().unwrap(); + for (_, peer) in per_peer_state.iter() { + debug_assert!(peer.try_lock().is_ok()); + } } //TODO: There is a timing attack here where if a node fails an HTLC back to us they can @@ -3636,7 +3721,7 @@ where let mut receiver_node_id = self.our_network_pubkey; for htlc in sources.iter() { if htlc.prev_hop.phantom_shared_secret.is_some() { - let phantom_pubkey = self.keys_manager.get_node_id(Recipient::PhantomNode) + let phantom_pubkey = self.node_signer.get_node_id(Recipient::PhantomNode) .expect("Failed to get node_id for phantom node recipient"); receiver_node_id = phantom_pubkey; break; @@ -3676,7 +3761,6 @@ where let mut expected_amt_msat = None; let mut valid_mpp = true; let mut errs = Vec::new(); - let mut channel_state = Some(self.channel_state.lock().unwrap()); let mut per_peer_state = Some(self.per_peer_state.read().unwrap()); for htlc in sources.iter() { let (counterparty_node_id, chan_id) = match self.short_to_chan_info.read().unwrap().get(&htlc.prev_hop.short_channel_id) { @@ -3723,14 +3807,12 @@ where claimable_amt_msat += htlc.value; } if sources.is_empty() || expected_amt_msat.is_none() { - mem::drop(channel_state); mem::drop(per_peer_state); self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash); log_info!(self.logger, "Attempted to claim an incomplete payment which no longer had any available HTLCs!"); return; } if claimable_amt_msat != expected_amt_msat.unwrap() { - mem::drop(channel_state); mem::drop(per_peer_state); self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash); log_info!(self.logger, "Attempted to claim an incomplete payment, expected {} msat, had {} available to claim.", @@ -3739,9 +3821,8 @@ where } if valid_mpp { for htlc in sources.drain(..) { - if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); } if per_peer_state.is_none() { per_peer_state = Some(self.per_peer_state.read().unwrap()); } - if let Err((pk, err)) = self.claim_funds_from_hop(channel_state.take().unwrap(), per_peer_state.take().unwrap(), + if let Err((pk, err)) = self.claim_funds_from_hop(per_peer_state.take().unwrap(), htlc.prev_hop, payment_preimage, |_| Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash })) { @@ -3753,7 +3834,6 @@ where } } } - mem::drop(channel_state); mem::drop(per_peer_state); if !valid_mpp { for htlc in sources.drain(..) { @@ -3775,14 +3855,12 @@ where } fn claim_funds_from_hop) -> Option>(&self, - mut channel_state_lock: MutexGuard, - per_peer_state_lock: RwLockReadGuard::Signer>>>>, + per_peer_state_lock: RwLockReadGuard::Signer>>>>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage, completion_action: ComplFunc) -> Result<(), (PublicKey, MsgHandleErrInternal)> { //TODO: Delay the claimed_funds relaying just like we do outbound relay! let chan_id = prev_hop.outpoint.to_channel_id(); - let channel_state = &mut *channel_state_lock; let counterparty_node_id_opt = match self.short_to_chan_info.read().unwrap().get(&prev_hop.short_channel_id) { Some((cp_id, _dup_chan_id)) => Some(cp_id.clone()), @@ -3797,19 +3875,19 @@ where } else { (false, None) }; if found_channel { - if let hash_map::Entry::Occupied(mut chan) = peer_state_opt.as_mut().unwrap().channel_by_id.entry(chan_id) { + let peer_state = &mut *peer_state_opt.as_mut().unwrap(); + if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(chan_id) { let counterparty_node_id = chan.get().get_counterparty_node_id(); match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) { Ok(msgs_monitor_option) => { if let UpdateFulfillCommitFetch::NewClaim { msgs, htlc_value_msat, monitor_update } = msgs_monitor_option { - match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { + match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &monitor_update) { ChannelMonitorUpdateStatus::Completed => {}, e => { log_given_level!(self.logger, if e == ChannelMonitorUpdateStatus::PermanentFailure { Level::Error } else { Level::Debug }, "Failed to update channel monitor with preimage {:?}: {:?}", payment_preimage, e); let err = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(); - mem::drop(channel_state_lock); mem::drop(peer_state_opt); mem::drop(per_peer_state_lock); self.handle_monitor_update_completion_actions(completion_action(Some(htlc_value_msat))); @@ -3819,8 +3897,8 @@ where if let Some((msg, commitment_signed)) = msgs { log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}", log_bytes!(payment_preimage.0), log_bytes!(chan.get().channel_id())); - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: chan.get().get_counterparty_node_id(), + peer_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: counterparty_node_id, updates: msgs::CommitmentUpdate { update_add_htlcs: Vec::new(), update_fulfill_htlcs: vec![msg], @@ -3831,7 +3909,6 @@ where } }); } - mem::drop(channel_state_lock); mem::drop(peer_state_opt); mem::drop(per_peer_state_lock); self.handle_monitor_update_completion_actions(completion_action(Some(htlc_value_msat))); @@ -3841,7 +3918,7 @@ where } }, Err((e, monitor_update)) => { - match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { + match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &monitor_update) { ChannelMonitorUpdateStatus::Completed => {}, e => { // TODO: This needs to be handled somehow - if we receive a monitor update @@ -3857,7 +3934,6 @@ where if drop { chan.remove_entry(); } - mem::drop(channel_state_lock); mem::drop(peer_state_opt); mem::drop(per_peer_state_lock); self.handle_monitor_update_completion_actions(completion_action(None)); @@ -3878,7 +3954,7 @@ where }; // We update the ChannelMonitor on the backward link, after // receiving an `update_fulfill_htlc` from the forward link. - let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, preimage_update); + let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, &preimage_update); if update_res != ChannelMonitorUpdateStatus::Completed { // TODO: This needs to be handled somehow - if we receive a monitor update // with a preimage we *must* somehow manage to propagate it to the upstream @@ -3887,7 +3963,6 @@ where log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}", payment_preimage, update_res); } - mem::drop(channel_state_lock); mem::drop(peer_state_opt); mem::drop(per_peer_state_lock); // Note that we do process the completion action here. This totally could be a @@ -3904,15 +3979,14 @@ where self.pending_outbound_payments.finalize_claims(sources, &self.pending_events); } - fn claim_funds_internal(&self, channel_state_lock: MutexGuard, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option, from_onchain: bool, next_channel_id: [u8; 32]) { + fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option, from_onchain: bool, next_channel_id: [u8; 32]) { match source { HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => { - mem::drop(channel_state_lock); self.pending_outbound_payments.claim_htlc(payment_id, payment_preimage, session_priv, path, from_onchain, &self.pending_events, &self.logger); }, HTLCSource::PreviousHopData(hop_data) => { let prev_outpoint = hop_data.outpoint; - let res = self.claim_funds_from_hop(channel_state_lock, self.per_peer_state.read().unwrap(), hop_data, payment_preimage, + let res = self.claim_funds_from_hop(self.per_peer_state.read().unwrap(), hop_data, payment_preimage, |htlc_claim_value_msat| { if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat { let fee_earned_msat = if let Some(claimed_htlc_value) = htlc_claim_value_msat { @@ -3964,7 +4038,7 @@ where /// Handles a channel reentering a functional state, either due to reconnect or a monitor /// update completion. fn handle_channel_resumption(&self, pending_msg_events: &mut Vec, - channel: &mut Channel<::Signer>, raa: Option, + channel: &mut Channel<::Signer>, raa: Option, commitment_update: Option, order: RAACommitmentOrder, pending_forwards: Vec<(PendingHTLCInfo, u64)>, funding_broadcastable: Option, channel_ready: Option, announcement_sigs: Option) @@ -4029,8 +4103,6 @@ where let htlc_forwards; let (mut pending_failures, finalized_claims, counterparty_node_id) = { - let mut channel_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_lock; let counterparty_node_id = match counterparty_node_id { Some(cp_id) => cp_id.clone(), None => { @@ -4045,11 +4117,11 @@ where }; let per_peer_state = self.per_peer_state.read().unwrap(); let mut peer_state_lock; + let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id); + if let None = peer_state_mutex_opt { return } + peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); + let peer_state = &mut *peer_state_lock; let mut channel = { - if let None = per_peer_state.get(&counterparty_node_id) { return } - let peer_state_mutex = per_peer_state.get(&counterparty_node_id).unwrap(); - peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(funding_txo.to_channel_id()){ hash_map::Entry::Occupied(chan) => chan, hash_map::Entry::Vacant(_) => return, @@ -4059,7 +4131,7 @@ where return; } - let updates = channel.get_mut().monitor_updating_restored(&self.logger, self.get_our_node_id(), self.genesis_hash, self.best_block.read().unwrap().height()); + let updates = channel.get_mut().monitor_updating_restored(&self.logger, &self.node_signer, self.genesis_hash, &self.default_configuration, self.best_block.read().unwrap().height()); let channel_update = if updates.channel_ready.is_some() && channel.get().is_usable() { // We only send a channel_update in the case where we are just now sending a // channel_ready and the channel is in a usable state. We may re-send a @@ -4073,9 +4145,9 @@ where }) } else { None } } else { None }; - htlc_forwards = self.handle_channel_resumption(&mut channel_state.pending_msg_events, channel.get_mut(), updates.raa, updates.commitment_update, updates.order, updates.accepted_htlcs, updates.funding_broadcastable, updates.channel_ready, updates.announcement_sigs); + htlc_forwards = self.handle_channel_resumption(&mut peer_state.pending_msg_events, channel.get_mut(), updates.raa, updates.commitment_update, updates.order, updates.accepted_htlcs, updates.funding_broadcastable, updates.channel_ready, updates.announcement_sigs); if let Some(upd) = channel_update { - channel_state.pending_msg_events.push(upd); + peer_state.pending_msg_events.push(upd); } (updates.failed_htlcs, updates.finalized_claimed_htlcs, counterparty_node_id) @@ -4135,8 +4207,6 @@ where fn do_accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, accept_0conf: bool, user_channel_id: u128) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); if let None = peer_state_mutex_opt { @@ -4158,12 +4228,12 @@ where msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "No zero confirmation channels accepted".to_owned(), } } }; - channel_state.pending_msg_events.push(send_msg_err_event); + peer_state.pending_msg_events.push(send_msg_err_event); let _ = remove_channel!(self, channel); return Err(APIError::APIMisuseError { err: "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned() }); } - channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { node_id: channel.get().get_counterparty_node_id(), msg: channel.get_mut().accept_inbound_channel(user_channel_id), }); @@ -4175,7 +4245,7 @@ where Ok(()) } - fn internal_open_channel(&self, counterparty_node_id: &PublicKey, their_features: InitFeatures, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> { + fn internal_open_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> { if msg.chain_hash != self.genesis_hash { return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), msg.temporary_channel_id.clone())); } @@ -4185,12 +4255,19 @@ where } let mut random_bytes = [0u8; 16]; - random_bytes.copy_from_slice(&self.keys_manager.get_secure_random_bytes()[..16]); + random_bytes.copy_from_slice(&self.entropy_source.get_secure_random_bytes()[..16]); let user_channel_id = u128::from_be_bytes(random_bytes); let outbound_scid_alias = self.create_and_insert_outbound_scid_alias(); - let mut channel = match Channel::new_from_req(&self.fee_estimator, &self.keys_manager, - counterparty_node_id.clone(), &their_features, msg, user_channel_id, &self.default_configuration, + let per_peer_state = self.per_peer_state.read().unwrap(); + let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); + if let None = peer_state_mutex_opt { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id.clone())) + } + let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); + let peer_state = &mut *peer_state_lock; + let mut channel = match Channel::new_from_req(&self.fee_estimator, &self.entropy_source, &self.signer_provider, + counterparty_node_id.clone(), &self.channel_type_features(), &peer_state.latest_features, msg, user_channel_id, &self.default_configuration, self.best_block.read().unwrap().height(), &self.logger, outbound_scid_alias) { Err(e) => { @@ -4199,15 +4276,6 @@ where }, Ok(res) => res }; - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; - let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); - if let None = peer_state_mutex_opt { - return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id.clone())) - } - let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); - let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(channel.channel_id()) { hash_map::Entry::Occupied(_) => { self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias); @@ -4218,7 +4286,7 @@ where if channel.get_channel_type().requires_zero_conf() { return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), msg.temporary_channel_id.clone())); } - channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { node_id: counterparty_node_id.clone(), msg: channel.accept_inbound_channel(user_channel_id), }); @@ -4241,7 +4309,7 @@ where Ok(()) } - fn internal_accept_channel(&self, counterparty_node_id: &PublicKey, their_features: InitFeatures, msg: &msgs::AcceptChannel) -> Result<(), MsgHandleErrInternal> { + fn internal_accept_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannel) -> Result<(), MsgHandleErrInternal> { let (value, output_script, user_id) = { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); @@ -4252,7 +4320,7 @@ where let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.temporary_channel_id) { hash_map::Entry::Occupied(mut chan) => { - try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &their_features), chan); + try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &peer_state.latest_features), chan); (chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id()) }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)) @@ -4270,8 +4338,6 @@ where } fn internal_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); if let None = peer_state_mutex_opt { @@ -4283,7 +4349,7 @@ where let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.temporary_channel_id) { hash_map::Entry::Occupied(mut chan) => { - (try_chan_entry!(self, chan.get_mut().funding_created(msg, best_block, &self.keys_manager, &self.logger), chan), chan.remove()) + (try_chan_entry!(self, chan.get_mut().funding_created(msg, best_block, &self.signer_provider, &self.logger), chan), chan.remove()) }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)) } @@ -4334,12 +4400,12 @@ where i_e.insert(chan.get_counterparty_node_id()); } } - channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned { node_id: counterparty_node_id.clone(), msg: funding_msg, }); if let Some(msg) = channel_ready { - send_channel_ready!(self, channel_state.pending_msg_events, chan, msg); + send_channel_ready!(self, peer_state.pending_msg_events, chan, msg); } e.insert(chan); } @@ -4350,8 +4416,6 @@ where fn internal_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> { let funding_tx = { let best_block = *self.best_block.read().unwrap(); - let mut channel_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_lock; let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); if let None = peer_state_mutex_opt { @@ -4362,7 +4426,7 @@ where let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { - let (monitor, funding_tx, channel_ready) = match chan.get_mut().funding_signed(&msg, best_block, &self.keys_manager, &self.logger) { + let (monitor, funding_tx, channel_ready) = match chan.get_mut().funding_signed(&msg, best_block, &self.signer_provider, &self.logger) { Ok(update) => update, Err(e) => try_chan_entry!(self, Err(e), chan), }; @@ -4382,7 +4446,7 @@ where }, } if let Some(msg) = channel_ready { - send_channel_ready!(self, channel_state.pending_msg_events, chan.get(), msg); + send_channel_ready!(self, peer_state.pending_msg_events, chan.get(), msg); } funding_tx }, @@ -4395,8 +4459,6 @@ where } fn internal_channel_ready(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReady) -> Result<(), MsgHandleErrInternal> { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); if let None = peer_state_mutex_opt { @@ -4406,11 +4468,11 @@ where let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { - let announcement_sigs_opt = try_chan_entry!(self, chan.get_mut().channel_ready(&msg, self.get_our_node_id(), - self.genesis_hash.clone(), &self.best_block.read().unwrap(), &self.logger), chan); + let announcement_sigs_opt = try_chan_entry!(self, chan.get_mut().channel_ready(&msg, &self.node_signer, + self.genesis_hash.clone(), &self.default_configuration, &self.best_block.read().unwrap(), &self.logger), chan); if let Some(announcement_sigs) = announcement_sigs_opt { log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(chan.get().channel_id())); - channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { node_id: counterparty_node_id.clone(), msg: announcement_sigs, }); @@ -4422,7 +4484,7 @@ where // announcement_signatures. log_trace!(self.logger, "Sending private initial channel_update for our counterparty on channel {}", log_bytes!(chan.get().channel_id())); if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { node_id: counterparty_node_id.clone(), msg, }); @@ -4437,11 +4499,9 @@ where } } - fn internal_shutdown(&self, counterparty_node_id: &PublicKey, their_features: &InitFeatures, msg: &msgs::Shutdown) -> Result<(), MsgHandleErrInternal> { + fn internal_shutdown(&self, counterparty_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(), MsgHandleErrInternal> { let mut dropped_htlcs: Vec<(HTLCSource, PaymentHash)>; let result: Result<(), _> = loop { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); if let None = peer_state_mutex_opt { @@ -4458,12 +4518,12 @@ where if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" }); } - let (shutdown, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.keys_manager, &their_features, &msg), chan_entry); + let (shutdown, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_entry); dropped_htlcs = htlcs; // Update the monitor with the shutdown script if necessary. if let Some(monitor_update) = monitor_update { - let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update); + let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), &monitor_update); let (result, is_permanent) = handle_monitor_update_res!(self, update_res, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE); if is_permanent { @@ -4473,7 +4533,7 @@ where } if let Some(msg) = shutdown { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { node_id: *counterparty_node_id, msg, }); @@ -4495,21 +4555,19 @@ where } fn internal_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), MsgHandleErrInternal> { + let per_peer_state = self.per_peer_state.read().unwrap(); + let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); + if let None = peer_state_mutex_opt { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)) + } let (tx, chan_option) = { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; - let per_peer_state = self.per_peer_state.read().unwrap(); - if let None = per_peer_state.get(counterparty_node_id) { - return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)) - } - let peer_state_mutex = per_peer_state.get(counterparty_node_id).unwrap(); - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id.clone()) { hash_map::Entry::Occupied(mut chan_entry) => { let (closing_signed, tx) = try_chan_entry!(self, chan_entry.get_mut().closing_signed(&self.fee_estimator, &msg), chan_entry); if let Some(msg) = closing_signed { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { node_id: counterparty_node_id.clone(), msg, }); @@ -4532,8 +4590,9 @@ where } if let Some(chan) = chan_option { if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { - let mut channel_state = self.channel_state.lock().unwrap(); - channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); + let peer_state = &mut *peer_state_lock; + peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); } @@ -4563,7 +4622,7 @@ where match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { - let create_pending_htlc_status = |chan: &Channel<::Signer>, pending_forward_info: PendingHTLCStatus, error_code: u16| { + let create_pending_htlc_status = |chan: &Channel<::Signer>, pending_forward_info: PendingHTLCStatus, error_code: u16| { // If the update_add is completely bogus, the call will Err and we will close, // but if we've sent a shutdown and they haven't acknowledged it yet, we just // want to reject the new HTLC and fail it backwards instead of forwarding. @@ -4593,7 +4652,6 @@ where } fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> { - let channel_lock = self.channel_state.lock().unwrap(); let (htlc_source, forwarded_htlc_value) = { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); @@ -4609,7 +4667,7 @@ where hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } }; - self.claim_funds_internal(channel_lock, htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, msg.channel_id); + self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, msg.channel_id); Ok(()) } @@ -4652,8 +4710,6 @@ where } fn internal_commitment_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(), MsgHandleErrInternal> { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); if let None = peer_state_mutex_opt { @@ -4668,23 +4724,23 @@ where Err((None, e)) => try_chan_entry!(self, Err(e), chan), Err((Some(update), e)) => { assert!(chan.get().is_awaiting_monitor_update()); - let _ = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), update); + let _ = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &update); try_chan_entry!(self, Err(e), chan); unreachable!(); }, Ok(res) => res }; - let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update); + let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &monitor_update); if let Err(e) = handle_monitor_update_res!(self, update_res, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some()) { return Err(e); } - channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK { node_id: counterparty_node_id.clone(), msg: revoke_and_ack, }); if let Some(msg) = commitment_signed { - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + peer_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { node_id: counterparty_node_id.clone(), updates: msgs::CommitmentUpdate { update_add_htlcs: Vec::new(), @@ -4797,8 +4853,6 @@ where fn internal_revoke_and_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), MsgHandleErrInternal> { let mut htlcs_to_fail = Vec::new(); let res = loop { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); if let None = peer_state_mutex_opt { @@ -4812,7 +4866,7 @@ where let raa_updates = break_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.logger), chan); htlcs_to_fail = raa_updates.holding_cell_failed_htlcs; - let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), raa_updates.monitor_update); + let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &raa_updates.monitor_update); if was_paused_for_mon_update { assert!(update_res != ChannelMonitorUpdateStatus::Completed); assert!(raa_updates.commitment_update.is_none()); @@ -4831,7 +4885,7 @@ where } else { unreachable!(); } } if let Some(updates) = raa_updates.commitment_update { - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + peer_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { node_id: counterparty_node_id.clone(), updates, }); @@ -4881,8 +4935,6 @@ where } fn internal_announcement_signatures(&self, counterparty_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), MsgHandleErrInternal> { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); if let None = peer_state_mutex_opt { @@ -4896,9 +4948,11 @@ where return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it".to_owned(), action: msgs::ErrorAction::IgnoreError})); } - channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement { + peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement { msg: try_chan_entry!(self, chan.get_mut().announcement_signatures( - self.get_our_node_id(), self.genesis_hash.clone(), self.best_block.read().unwrap().height(), msg), chan), + &self.node_signer, self.genesis_hash.clone(), self.best_block.read().unwrap().height(), + msg, &self.default_configuration + ), chan), // Note that announcement_signatures fails if the channel cannot be announced, // so get_channel_update_for_broadcast will never fail by the time we get here. update_msg: self.get_channel_update_for_broadcast(chan.get()).unwrap(), @@ -4953,8 +5007,6 @@ where fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> { let htlc_forwards; let need_lnd_workaround = { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); @@ -4970,11 +5022,11 @@ where // freed HTLCs to fail backwards. If in the future we no longer drop pending // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here. let responses = try_chan_entry!(self, chan.get_mut().channel_reestablish( - msg, &self.logger, self.our_network_pubkey.clone(), self.genesis_hash, - &*self.best_block.read().unwrap()), chan); + msg, &self.logger, &self.node_signer, self.genesis_hash, + &self.default_configuration, &*self.best_block.read().unwrap()), chan); let mut channel_update = None; if let Some(msg) = responses.shutdown_msg { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { node_id: counterparty_node_id.clone(), msg, }); @@ -4991,10 +5043,10 @@ where } let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take(); htlc_forwards = self.handle_channel_resumption( - &mut channel_state.pending_msg_events, chan.get_mut(), responses.raa, responses.commitment_update, responses.order, + &mut peer_state.pending_msg_events, chan.get_mut(), responses.raa, responses.commitment_update, responses.order, Vec::new(), None, responses.channel_ready, responses.announcement_sigs); if let Some(upd) = channel_update { - channel_state.pending_msg_events.push(upd); + peer_state.pending_msg_events.push(upd); } need_lnd_workaround }, @@ -5023,7 +5075,7 @@ where MonitorEvent::HTLCEvent(htlc_update) => { if let Some(preimage) = htlc_update.payment_preimage { log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0)); - self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, funding_outpoint.to_channel_id()); + self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, funding_outpoint.to_channel_id()); } else { log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0)); let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() }; @@ -5033,8 +5085,6 @@ where }, MonitorEvent::CommitmentTxConfirmed(funding_outpoint) | MonitorEvent::UpdateFailed(funding_outpoint) => { - let mut channel_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_lock; let counterparty_node_id_opt = match counterparty_node_id { Some(cp_id) => Some(cp_id), None => { @@ -5047,9 +5097,9 @@ where if let Some(counterparty_node_id) = counterparty_node_id_opt { let per_peer_state = self.per_peer_state.read().unwrap(); if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { - let pending_msg_events = &mut channel_state.pending_msg_events; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; + let pending_msg_events = &mut peer_state.pending_msg_events; if let hash_map::Entry::Occupied(chan_entry) = peer_state.channel_by_id.entry(funding_outpoint.to_channel_id()) { let mut chan = remove_channel!(self, chan_entry); failed_channels.push(chan.force_shutdown(false)); @@ -5104,14 +5154,12 @@ where let mut failed_htlcs = Vec::new(); let mut handle_errors = Vec::new(); { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; - let pending_msg_events = &mut channel_state.pending_msg_events; let per_peer_state = self.per_peer_state.read().unwrap(); for (_cp_id, peer_state_mutex) in per_peer_state.iter() { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; + let pending_msg_events = &mut peer_state.pending_msg_events; peer_state.channel_by_id.retain(|channel_id, chan| { match chan.maybe_free_holding_cell_htlcs(&self.logger) { Ok((commitment_opt, holding_cell_failed_htlcs)) => { @@ -5123,7 +5171,7 @@ where )); } if let Some((commitment_update, monitor_update)) = commitment_opt { - match self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) { + match self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), &monitor_update) { ChannelMonitorUpdateStatus::Completed => { pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { node_id: chan.get_counterparty_node_id(), @@ -5170,14 +5218,12 @@ where let mut handle_errors: Vec<(PublicKey, Result<(), _>)> = Vec::new(); let mut has_update = false; { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; - let pending_msg_events = &mut channel_state.pending_msg_events; let per_peer_state = self.per_peer_state.read().unwrap(); for (_cp_id, peer_state_mutex) in per_peer_state.iter() { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; + let pending_msg_events = &mut peer_state.pending_msg_events; peer_state.channel_by_id.retain(|channel_id, chan| { match chan.maybe_propose_closing_signed(&self.fee_estimator, &self.logger) { Ok((msg_opt, tx_opt)) => { @@ -5252,7 +5298,7 @@ where return Err(APIError::APIMisuseError { err: format!("min_value_msat of {} greater than total 21 million bitcoin supply", min_value_msat.unwrap()) }); } - let payment_secret = PaymentSecret(self.keys_manager.get_secure_random_bytes()); + let payment_secret = PaymentSecret(self.entropy_source.get_secure_random_bytes()); let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); let mut payment_secrets = self.pending_inbound_payments.lock().unwrap(); @@ -5298,12 +5344,18 @@ where /// /// Errors if `min_value_msat` is greater than total bitcoin supply. /// + /// If `min_final_cltv_expiry_delta` is set to some value, then the payment will not be receivable + /// on versions of LDK prior to 0.0.114. + /// /// [`claim_funds`]: Self::claim_funds /// [`PaymentClaimable`]: events::Event::PaymentClaimable /// [`PaymentClaimable::payment_preimage`]: events::Event::PaymentClaimable::payment_preimage /// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash - pub fn create_inbound_payment(&self, min_value_msat: Option, invoice_expiry_delta_secs: u32) -> Result<(PaymentHash, PaymentSecret), ()> { - inbound_payment::create(&self.inbound_payment_key, min_value_msat, invoice_expiry_delta_secs, &self.keys_manager, self.highest_seen_timestamp.load(Ordering::Acquire) as u64) + pub fn create_inbound_payment(&self, min_value_msat: Option, invoice_expiry_delta_secs: u32, + min_final_cltv_expiry_delta: Option) -> Result<(PaymentHash, PaymentSecret), ()> { + inbound_payment::create(&self.inbound_payment_key, min_value_msat, invoice_expiry_delta_secs, + &self.entropy_source, self.highest_seen_timestamp.load(Ordering::Acquire) as u64, + min_final_cltv_expiry_delta) } /// Legacy version of [`create_inbound_payment`]. Use this method if you wish to share @@ -5317,7 +5369,7 @@ where /// [`create_inbound_payment`]: Self::create_inbound_payment #[deprecated] pub fn create_inbound_payment_legacy(&self, min_value_msat: Option, invoice_expiry_delta_secs: u32) -> Result<(PaymentHash, PaymentSecret), APIError> { - let payment_preimage = PaymentPreimage(self.keys_manager.get_secure_random_bytes()); + let payment_preimage = PaymentPreimage(self.entropy_source.get_secure_random_bytes()); let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner()); let payment_secret = self.set_payment_hash_secret_map(payment_hash, Some(payment_preimage), min_value_msat, invoice_expiry_delta_secs)?; Ok((payment_hash, payment_secret)) @@ -5351,8 +5403,8 @@ where /// If you need exact expiry semantics, you should enforce them upon receipt of /// [`PaymentClaimable`]. /// - /// Note that invoices generated for inbound payments should have their `min_final_cltv_expiry` - /// set to at least [`MIN_FINAL_CLTV_EXPIRY`]. + /// Note that invoices generated for inbound payments should have their `min_final_cltv_expiry_delta` + /// set to at least [`MIN_FINAL_CLTV_EXPIRY_DELTA`]. /// /// Note that a malicious eavesdropper can intuit whether an inbound payment was created by /// `create_inbound_payment` or `create_inbound_payment_for_hash` based on runtime. @@ -5364,10 +5416,16 @@ where /// /// Errors if `min_value_msat` is greater than total bitcoin supply. /// + /// If `min_final_cltv_expiry_delta` is set to some value, then the payment will not be receivable + /// on versions of LDK prior to 0.0.114. + /// /// [`create_inbound_payment`]: Self::create_inbound_payment /// [`PaymentClaimable`]: events::Event::PaymentClaimable - pub fn create_inbound_payment_for_hash(&self, payment_hash: PaymentHash, min_value_msat: Option, invoice_expiry_delta_secs: u32) -> Result { - inbound_payment::create_from_hash(&self.inbound_payment_key, min_value_msat, payment_hash, invoice_expiry_delta_secs, self.highest_seen_timestamp.load(Ordering::Acquire) as u64) + pub fn create_inbound_payment_for_hash(&self, payment_hash: PaymentHash, min_value_msat: Option, + invoice_expiry_delta_secs: u32, min_final_cltv_expiry: Option) -> Result { + inbound_payment::create_from_hash(&self.inbound_payment_key, min_value_msat, payment_hash, + invoice_expiry_delta_secs, self.highest_seen_timestamp.load(Ordering::Acquire) as u64, + min_final_cltv_expiry) } /// Legacy version of [`create_inbound_payment_for_hash`]. Use this method if you wish to share @@ -5400,7 +5458,7 @@ where let best_block_height = self.best_block.read().unwrap().height(); let short_to_chan_info = self.short_to_chan_info.read().unwrap(); loop { - let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(best_block_height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.keys_manager); + let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(best_block_height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.entropy_source); // Ensure the generated scid doesn't conflict with a real channel. match short_to_chan_info.get(&scid_candidate) { Some(_) => continue, @@ -5430,7 +5488,7 @@ where let best_block_height = self.best_block.read().unwrap().height(); let short_to_chan_info = self.short_to_chan_info.read().unwrap(); loop { - let scid_candidate = fake_scid::Namespace::Intercept.get_fake_scid(best_block_height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.keys_manager); + let scid_candidate = fake_scid::Namespace::Intercept.get_fake_scid(best_block_height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.entropy_source); // Ensure the generated scid doesn't conflict with a real channel. if short_to_chan_info.contains_key(&scid_candidate) { continue } return scid_candidate @@ -5516,15 +5574,30 @@ where } } -impl MessageSendEventsProvider for ChannelManager +impl MessageSendEventsProvider for ChannelManager where - M::Target: chain::Watch<::Signer>, + M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + ES::Target: EntropySource, + NS::Target: NodeSigner, + SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, L::Target: Logger, { + /// Returns `MessageSendEvent`s strictly ordered per-peer, in the order they were generated. + /// The returned array will contain `MessageSendEvent`s for different peers if + /// `MessageSendEvent`s to more than one peer exists, but `MessageSendEvent`s to the same peer + /// is always placed next to each other. + /// + /// Note that that while `MessageSendEvent`s are strictly ordered per-peer, the peer order for + /// the chunks of `MessageSendEvent`s for different peers is random. I.e. if the array contains + /// `MessageSendEvent`s for both `node_a` and `node_b`, the `MessageSendEvent`s for `node_a` + /// will randomly be placed first or last in the returned array. + /// + /// Note that even though `BroadcastChannelAnnouncement` and `BroadcastChannelUpdate` + /// `MessageSendEvent`s are intended to be broadcasted to all peers, they will be pleaced among + /// the `MessageSendEvent`s to the specific peer they were generated under. fn get_and_clear_pending_msg_events(&self) -> Vec { let events = RefCell::new(Vec::new()); PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || { @@ -5544,8 +5617,16 @@ where } let mut pending_events = Vec::new(); - let mut channel_state = self.channel_state.lock().unwrap(); - mem::swap(&mut pending_events, &mut channel_state.pending_msg_events); + let per_peer_state = self.per_peer_state.read().unwrap(); + for (_cp_id, peer_state_mutex) in per_peer_state.iter() { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + if peer_state.pending_msg_events.len() > 0 { + let mut peer_pending_events = Vec::new(); + mem::swap(&mut peer_pending_events, &mut peer_state.pending_msg_events); + pending_events.append(&mut peer_pending_events); + } + } if !pending_events.is_empty() { events.replace(pending_events); @@ -5557,11 +5638,13 @@ where } } -impl EventsProvider for ChannelManager +impl EventsProvider for ChannelManager where - M::Target: chain::Watch<::Signer>, + M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + ES::Target: EntropySource, + NS::Target: NodeSigner, + SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, L::Target: Logger, @@ -5594,11 +5677,13 @@ where } } -impl chain::Listen for ChannelManager +impl chain::Listen for ChannelManager where - M::Target: chain::Watch<::Signer>, + M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + ES::Target: EntropySource, + NS::Target: NodeSigner, + SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, L::Target: Logger, @@ -5628,15 +5713,17 @@ where *best_block = BestBlock::new(header.prev_blockhash, new_height) } - self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time, self.genesis_hash.clone(), self.get_our_node_id(), &self.logger)); + self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger)); } } -impl chain::Confirm for ChannelManager +impl chain::Confirm for ChannelManager where - M::Target: chain::Watch<::Signer>, + M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + ES::Target: EntropySource, + NS::Target: NodeSigner, + SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, L::Target: Logger, @@ -5650,13 +5737,13 @@ where log_trace!(self.logger, "{} transactions included in block {} at height {} provided", txdata.len(), block_hash, height); let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.genesis_hash.clone(), self.get_our_node_id(), &self.logger) + self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger) .map(|(a, b)| (a, Vec::new(), b))); let last_best_block_height = self.best_block.read().unwrap().height(); if height < last_best_block_height { let timestamp = self.highest_seen_timestamp.load(Ordering::Acquire); - self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.genesis_hash.clone(), self.get_our_node_id(), &self.logger)); + self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger)); } } @@ -5672,7 +5759,7 @@ where *self.best_block.write().unwrap() = BestBlock::new(block_hash, height); - self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, self.genesis_hash.clone(), self.get_our_node_id(), &self.logger)); + self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger)); macro_rules! max_time { ($timestamp: expr) => { @@ -5723,11 +5810,13 @@ where } } -impl ChannelManager +impl ChannelManager where - M::Target: chain::Watch<::Signer>, + M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + ES::Target: EntropySource, + NS::Target: NodeSigner, + SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, L::Target: Logger, @@ -5735,7 +5824,7 @@ where /// Calls a function which handles an on-chain event (blocks dis/connected, transactions /// un/confirmed, etc) on each channel, handling any resulting errors or messages generated by /// the function. - fn do_chain_event::Signer>) -> Result<(Option, Vec<(HTLCSource, PaymentHash)>, Option), ClosureReason>> + fn do_chain_event::Signer>) -> Result<(Option, Vec<(HTLCSource, PaymentHash)>, Option), ClosureReason>> (&self, height_opt: Option, f: FN) { // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called // during initialization prior to the chain_monitor being fully configured in some cases. @@ -5744,13 +5833,11 @@ where let mut failed_channels = Vec::new(); let mut timed_out_htlcs = Vec::new(); { - let mut channel_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_lock; - let pending_msg_events = &mut channel_state.pending_msg_events; let per_peer_state = self.per_peer_state.read().unwrap(); for (_cp_id, peer_state_mutex) in per_peer_state.iter() { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; + let pending_msg_events = &mut peer_state.pending_msg_events; peer_state.channel_by_id.retain(|_, channel| { let res = f(channel); if let Ok((channel_ready_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res { @@ -5783,7 +5870,7 @@ where msg: announcement_sigs, }); if let Some(height) = height_opt { - if let Some(announcement) = channel.get_signed_channel_announcement(self.get_our_node_id(), self.genesis_hash, height) { + if let Some(announcement) = channel.get_signed_channel_announcement(&self.node_signer, self.genesis_hash, height, &self.default_configuration) { pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement { msg: announcement, // Note that announcement_signatures fails if the channel cannot be announced, @@ -5927,26 +6014,62 @@ where pub fn current_best_block(&self) -> BestBlock { self.best_block.read().unwrap().clone() } + + /// Fetches the set of [`NodeFeatures`] flags which are provided by or required by + /// [`ChannelManager`]. + pub fn node_features(&self) -> NodeFeatures { + provided_node_features(&self.default_configuration) + } + + /// Fetches the set of [`InvoiceFeatures`] flags which are provided by or required by + /// [`ChannelManager`]. + /// + /// Note that the invoice feature flags can vary depending on if the invoice is a "phantom invoice" + /// or not. Thus, this method is not public. + #[cfg(any(feature = "_test_utils", test))] + pub fn invoice_features(&self) -> InvoiceFeatures { + provided_invoice_features(&self.default_configuration) + } + + /// Fetches the set of [`ChannelFeatures`] flags which are provided by or required by + /// [`ChannelManager`]. + pub fn channel_features(&self) -> ChannelFeatures { + provided_channel_features(&self.default_configuration) + } + + /// Fetches the set of [`ChannelTypeFeatures`] flags which are provided by or required by + /// [`ChannelManager`]. + pub fn channel_type_features(&self) -> ChannelTypeFeatures { + provided_channel_type_features(&self.default_configuration) + } + + /// Fetches the set of [`InitFeatures`] flags which are provided by or required by + /// [`ChannelManager`]. + pub fn init_features(&self) -> InitFeatures { + provided_init_features(&self.default_configuration) + } } -impl - ChannelMessageHandler for ChannelManager +impl + ChannelMessageHandler for ChannelManager where - M::Target: chain::Watch<::Signer>, + M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + ES::Target: EntropySource, + NS::Target: NodeSigner, + SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, L::Target: Logger, { - fn handle_open_channel(&self, counterparty_node_id: &PublicKey, their_features: InitFeatures, msg: &msgs::OpenChannel) { + fn handle_open_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannel) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - let _ = handle_error!(self, self.internal_open_channel(counterparty_node_id, their_features, msg), *counterparty_node_id); + let _ = handle_error!(self, self.internal_open_channel(counterparty_node_id, msg), *counterparty_node_id); } - fn handle_accept_channel(&self, counterparty_node_id: &PublicKey, their_features: InitFeatures, msg: &msgs::AcceptChannel) { + fn handle_accept_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannel) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - let _ = handle_error!(self, self.internal_accept_channel(counterparty_node_id, their_features, msg), *counterparty_node_id); + let _ = handle_error!(self, self.internal_accept_channel(counterparty_node_id, msg), *counterparty_node_id); } fn handle_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) { @@ -5964,9 +6087,9 @@ where let _ = handle_error!(self, self.internal_channel_ready(counterparty_node_id, msg), *counterparty_node_id); } - fn handle_shutdown(&self, counterparty_node_id: &PublicKey, their_features: &InitFeatures, msg: &msgs::Shutdown) { + fn handle_shutdown(&self, counterparty_node_id: &PublicKey, msg: &msgs::Shutdown) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - let _ = handle_error!(self, self.internal_shutdown(counterparty_node_id, their_features, msg), *counterparty_node_id); + let _ = handle_error!(self, self.internal_shutdown(counterparty_node_id, msg), *counterparty_node_id); } fn handle_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) { @@ -6033,15 +6156,14 @@ where let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); let mut failed_channels = Vec::new(); let mut no_channels_remain = true; - let mut channel_state = self.channel_state.lock().unwrap(); let mut per_peer_state = self.per_peer_state.write().unwrap(); { - let pending_msg_events = &mut channel_state.pending_msg_events; log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates. We believe we {} make future connections to this peer.", log_pubkey!(counterparty_node_id), if no_connection_possible { "cannot" } else { "can" }); if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; + let pending_msg_events = &mut peer_state.pending_msg_events; peer_state.channel_by_id.retain(|_, chan| { chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger); if chan.is_shutdown() { @@ -6053,32 +6175,31 @@ where } true }); + pending_msg_events.retain(|msg| { + match msg { + &events::MessageSendEvent::SendAcceptChannel { .. } => false, + &events::MessageSendEvent::SendOpenChannel { .. } => false, + &events::MessageSendEvent::SendFundingCreated { .. } => false, + &events::MessageSendEvent::SendFundingSigned { .. } => false, + &events::MessageSendEvent::SendChannelReady { .. } => false, + &events::MessageSendEvent::SendAnnouncementSignatures { .. } => false, + &events::MessageSendEvent::UpdateHTLCs { .. } => false, + &events::MessageSendEvent::SendRevokeAndACK { .. } => false, + &events::MessageSendEvent::SendClosingSigned { .. } => false, + &events::MessageSendEvent::SendShutdown { .. } => false, + &events::MessageSendEvent::SendChannelReestablish { .. } => false, + &events::MessageSendEvent::SendChannelAnnouncement { .. } => false, + &events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true, + &events::MessageSendEvent::BroadcastChannelUpdate { .. } => true, + &events::MessageSendEvent::SendChannelUpdate { .. } => false, + &events::MessageSendEvent::HandleError { .. } => false, + &events::MessageSendEvent::SendChannelRangeQuery { .. } => false, + &events::MessageSendEvent::SendShortIdsQuery { .. } => false, + &events::MessageSendEvent::SendReplyChannelRange { .. } => false, + &events::MessageSendEvent::SendGossipTimestampFilter { .. } => false, + } + }); } - pending_msg_events.retain(|msg| { - match msg { - &events::MessageSendEvent::SendAcceptChannel { ref node_id, .. } => node_id != counterparty_node_id, - &events::MessageSendEvent::SendOpenChannel { ref node_id, .. } => node_id != counterparty_node_id, - &events::MessageSendEvent::SendFundingCreated { ref node_id, .. } => node_id != counterparty_node_id, - &events::MessageSendEvent::SendFundingSigned { ref node_id, .. } => node_id != counterparty_node_id, - &events::MessageSendEvent::SendChannelReady { ref node_id, .. } => node_id != counterparty_node_id, - &events::MessageSendEvent::SendAnnouncementSignatures { ref node_id, .. } => node_id != counterparty_node_id, - &events::MessageSendEvent::UpdateHTLCs { ref node_id, .. } => node_id != counterparty_node_id, - &events::MessageSendEvent::SendRevokeAndACK { ref node_id, .. } => node_id != counterparty_node_id, - &events::MessageSendEvent::SendClosingSigned { ref node_id, .. } => node_id != counterparty_node_id, - &events::MessageSendEvent::SendShutdown { ref node_id, .. } => node_id != counterparty_node_id, - &events::MessageSendEvent::SendChannelReestablish { ref node_id, .. } => node_id != counterparty_node_id, - &events::MessageSendEvent::SendChannelAnnouncement { ref node_id, .. } => node_id != counterparty_node_id, - &events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true, - &events::MessageSendEvent::BroadcastChannelUpdate { .. } => true, - &events::MessageSendEvent::SendChannelUpdate { ref node_id, .. } => node_id != counterparty_node_id, - &events::MessageSendEvent::HandleError { ref node_id, .. } => node_id != counterparty_node_id, - &events::MessageSendEvent::SendChannelRangeQuery { .. } => false, - &events::MessageSendEvent::SendShortIdsQuery { .. } => false, - &events::MessageSendEvent::SendReplyChannelRange { .. } => false, - &events::MessageSendEvent::SendGossipTimestampFilter { .. } => false, - } - }); - mem::drop(channel_state); } if no_channels_remain { per_peer_state.remove(counterparty_node_id); @@ -6107,6 +6228,7 @@ where e.insert(Mutex::new(PeerState { channel_by_id: HashMap::new(), latest_features: init_msg.features.clone(), + pending_msg_events: Vec::new(), })); }, hash_map::Entry::Occupied(e) => { @@ -6115,14 +6237,12 @@ where } } - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; - let pending_msg_events = &mut channel_state.pending_msg_events; let per_peer_state = self.per_peer_state.read().unwrap(); for (_cp_id, peer_state_mutex) in per_peer_state.iter() { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; + let pending_msg_events = &mut peer_state.pending_msg_events; peer_state.channel_by_id.retain(|_, chan| { let retain = if chan.get_counterparty_node_id() == *counterparty_node_id { if !chan.have_received_message() { @@ -6140,7 +6260,7 @@ where } } else { true }; if retain && chan.get_counterparty_node_id() != *counterparty_node_id { - if let Some(msg) = chan.get_signed_channel_announcement(self.get_our_node_id(), self.genesis_hash.clone(), self.best_block.read().unwrap().height()) { + if let Some(msg) = chan.get_signed_channel_announcement(&self.node_signer, self.genesis_hash.clone(), self.best_block.read().unwrap().height(), &self.default_configuration) { if let Ok(update_msg) = self.get_channel_update_for_broadcast(chan) { pending_msg_events.push(events::MessageSendEvent::SendChannelAnnouncement { node_id: *counterparty_node_id, @@ -6160,16 +6280,21 @@ where let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); if msg.channel_id == [0; 32] { - for chan in self.list_channels() { - if chan.counterparty.node_id == *counterparty_node_id { - // Untrusted messages from peer, we throw away the error if id points to a non-existent channel - let _ = self.force_close_channel_with_peer(&chan.channel_id, counterparty_node_id, Some(&msg.data), true); - } + let channel_ids: Vec<[u8; 32]> = { + let per_peer_state = self.per_peer_state.read().unwrap(); + let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); + if let None = peer_state_mutex_opt { return; } + let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); + let peer_state = &mut *peer_state_lock; + peer_state.channel_by_id.keys().cloned().collect() + }; + for channel_id in channel_ids { + // Untrusted messages from peer, we throw away the error if id points to a non-existent channel + let _ = self.force_close_channel_with_peer(&channel_id, counterparty_node_id, Some(&msg.data), true); } } else { { // First check if we can advance the channel type and try again. - let mut channel_state = self.channel_state.lock().unwrap(); let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); if let None = peer_state_mutex_opt { return; } @@ -6177,7 +6302,7 @@ where let peer_state = &mut *peer_state_lock; if let Some(chan) = peer_state.channel_by_id.get_mut(&msg.channel_id) { if let Ok(msg) = chan.maybe_handle_error_without_close(self.genesis_hash) { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { node_id: *counterparty_node_id, msg, }); @@ -6192,18 +6317,18 @@ where } fn provided_node_features(&self) -> NodeFeatures { - provided_node_features() + provided_node_features(&self.default_configuration) } fn provided_init_features(&self, _their_init_features: &PublicKey) -> InitFeatures { - provided_init_features() + provided_init_features(&self.default_configuration) } } /// Fetches the set of [`NodeFeatures`] flags which are provided by or required by /// [`ChannelManager`]. -pub fn provided_node_features() -> NodeFeatures { - provided_init_features().to_context() +pub(crate) fn provided_node_features(config: &UserConfig) -> NodeFeatures { + provided_init_features(config).to_context() } /// Fetches the set of [`InvoiceFeatures`] flags which are provided by or required by @@ -6212,19 +6337,25 @@ pub fn provided_node_features() -> NodeFeatures { /// Note that the invoice feature flags can vary depending on if the invoice is a "phantom invoice" /// or not. Thus, this method is not public. #[cfg(any(feature = "_test_utils", test))] -pub fn provided_invoice_features() -> InvoiceFeatures { - provided_init_features().to_context() +pub(crate) fn provided_invoice_features(config: &UserConfig) -> InvoiceFeatures { + provided_init_features(config).to_context() } /// Fetches the set of [`ChannelFeatures`] flags which are provided by or required by /// [`ChannelManager`]. -pub fn provided_channel_features() -> ChannelFeatures { - provided_init_features().to_context() +pub(crate) fn provided_channel_features(config: &UserConfig) -> ChannelFeatures { + provided_init_features(config).to_context() +} + +/// Fetches the set of [`ChannelTypeFeatures`] flags which are provided by or required by +/// [`ChannelManager`]. +pub(crate) fn provided_channel_type_features(config: &UserConfig) -> ChannelTypeFeatures { + ChannelTypeFeatures::from_init(&provided_init_features(config)) } /// Fetches the set of [`InitFeatures`] flags which are provided by or required by /// [`ChannelManager`]. -pub fn provided_init_features() -> InitFeatures { +pub fn provided_init_features(_config: &UserConfig) -> InitFeatures { // Note that if new features are added here which other peers may (eventually) require, we // should also add the corresponding (optional) bit to the ChannelMessageHandler impl for // ErroringMessageHandler. @@ -6240,6 +6371,12 @@ pub fn provided_init_features() -> InitFeatures { features.set_channel_type_optional(); features.set_scid_privacy_optional(); features.set_zero_conf_optional(); + #[cfg(anchors)] + { // Attributes are not allowed on if expressions on our current MSRV of 1.41. + if _config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx { + features.set_anchors_zero_fee_htlc_tx_optional(); + } + } features } @@ -6302,7 +6439,7 @@ impl Writeable for ChannelDetails { impl Readable for ChannelDetails { fn read(reader: &mut R) -> Result { - init_and_read_tlv_fields!(reader, { + _init_and_read_tlv_fields!(reader, { (1, inbound_scid_alias, option), (2, channel_id, required), (3, channel_type, option), @@ -6631,11 +6768,13 @@ impl_writeable_tlv_based!(PendingInboundPayment, { (8, min_value_msat, required), }); -impl Writeable for ChannelManager +impl Writeable for ChannelManager where - M::Target: chain::Watch<::Signer>, + M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + ES::Target: EntropySource, + NS::Target: NodeSigner, + SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, L::Target: Logger, @@ -6692,6 +6831,8 @@ where } } + let per_peer_state = self.per_peer_state.write().unwrap(); + let pending_inbound_payments = self.pending_inbound_payments.lock().unwrap(); let claimable_payments = self.claimable_payments.lock().unwrap(); let pending_outbound_payments = self.pending_outbound_payments.pending_outbound_payments.lock().unwrap(); @@ -6707,7 +6848,6 @@ where htlc_purposes.push(purpose); } - let per_peer_state = self.per_peer_state.write().unwrap(); (per_peer_state.len() as u64).write(writer)?; for (peer_pubkey, peer_state_mutex) in per_peer_state.iter() { peer_pubkey.write(writer)?; @@ -6840,19 +6980,27 @@ where /// which you've already broadcasted the transaction. /// /// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor -pub struct ChannelManagerReadArgs<'a, M: Deref, T: Deref, K: Deref, F: Deref, R: Deref, L: Deref> +pub struct ChannelManagerReadArgs<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> where - M::Target: chain::Watch<::Signer>, + M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + ES::Target: EntropySource, + NS::Target: NodeSigner, + SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, L::Target: Logger, { + /// A cryptographically secure source of entropy. + pub entropy_source: ES, + + /// A signer that is able to perform node-scoped cryptographic operations. + pub node_signer: NS, + /// The keys provider which will give us relevant keys. Some keys will be loaded during /// deserialization and KeysInterface::read_chan_signer will be used to read per-Channel /// signing data. - pub keys_manager: K, + pub signer_provider: SP, /// The fee_estimator for use in the ChannelManager in the future. /// @@ -6893,15 +7041,17 @@ where /// this struct. /// /// (C-not exported) because we have no HashMap bindings - pub channel_monitors: HashMap::Signer>>, + pub channel_monitors: HashMap::Signer>>, } -impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, R: Deref, L: Deref> - ChannelManagerReadArgs<'a, M, T, K, F, R, L> +impl<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> + ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, L> where - M::Target: chain::Watch<::Signer>, + M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + ES::Target: EntropySource, + NS::Target: NodeSigner, + SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, L::Target: Logger, @@ -6909,10 +7059,10 @@ where /// Simple utility function to create a ChannelManagerReadArgs which creates the monitor /// HashMap for you. This is primarily useful for C bindings where it is not practical to /// populate a HashMap directly from C. - pub fn new(keys_manager: K, fee_estimator: F, chain_monitor: M, tx_broadcaster: T, router: R, logger: L, default_config: UserConfig, - mut channel_monitors: Vec<&'a mut ChannelMonitor<::Signer>>) -> Self { + pub fn new(entropy_source: ES, node_signer: NS, signer_provider: SP, fee_estimator: F, chain_monitor: M, tx_broadcaster: T, router: R, logger: L, default_config: UserConfig, + mut channel_monitors: Vec<&'a mut ChannelMonitor<::Signer>>) -> Self { Self { - keys_manager, fee_estimator, chain_monitor, tx_broadcaster, router, logger, default_config, + entropy_source, node_signer, signer_provider, fee_estimator, chain_monitor, tx_broadcaster, router, logger, default_config, channel_monitors: channel_monitors.drain(..).map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect() } } @@ -6920,33 +7070,37 @@ where // Implement ReadableArgs for an Arc'd ChannelManager to make it a bit easier to work with the // SipmleArcChannelManager type: -impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, R: Deref, L: Deref> - ReadableArgs> for (BlockHash, Arc>) +impl<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> + ReadableArgs> for (BlockHash, Arc>) where - M::Target: chain::Watch<::Signer>, + M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + ES::Target: EntropySource, + NS::Target: NodeSigner, + SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, L::Target: Logger, { - fn read(reader: &mut Reader, args: ChannelManagerReadArgs<'a, M, T, K, F, R, L>) -> Result { - let (blockhash, chan_manager) = <(BlockHash, ChannelManager)>::read(reader, args)?; + fn read(reader: &mut Reader, args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, L>) -> Result { + let (blockhash, chan_manager) = <(BlockHash, ChannelManager)>::read(reader, args)?; Ok((blockhash, Arc::new(chan_manager))) } } -impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, R: Deref, L: Deref> - ReadableArgs> for (BlockHash, ChannelManager) +impl<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> + ReadableArgs> for (BlockHash, ChannelManager) where - M::Target: chain::Watch<::Signer>, + M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + ES::Target: EntropySource, + NS::Target: NodeSigner, + SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, L::Target: Logger, { - fn read(reader: &mut Reader, mut args: ChannelManagerReadArgs<'a, M, T, K, F, R, L>) -> Result { + fn read(reader: &mut Reader, mut args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, L>) -> Result { let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION); let genesis_hash: BlockHash = Readable::read(reader)?; @@ -6957,12 +7111,14 @@ where let channel_count: u64 = Readable::read(reader)?; let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128)); - let mut peer_channels: HashMap::Signer>>> = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); + let mut peer_channels: HashMap::Signer>>> = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); let mut id_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); let mut channel_closures = Vec::new(); for _ in 0..channel_count { - let mut channel: Channel<::Signer> = Channel::read(reader, (&args.keys_manager, best_block_height))?; + let mut channel: Channel<::Signer> = Channel::read(reader, ( + &args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config) + ))?; let funding_txo = channel.get_funding_txo().ok_or(DecodeError::InvalidValue)?; funding_txo_set.insert(funding_txo.clone()); if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) { @@ -7055,7 +7211,7 @@ where } } - for (ref funding_txo, ref mut monitor) in args.channel_monitors.iter_mut() { + for (funding_txo, monitor) in args.channel_monitors.iter_mut() { if !funding_txo_set.contains(funding_txo) { log_info!(args.logger, "Broadcasting latest holder commitment transaction for closed channel {}", log_bytes!(funding_txo.to_channel_id())); monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger); @@ -7088,12 +7244,13 @@ where } let peer_count: u64 = Readable::read(reader)?; - let mut per_peer_state = HashMap::with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex::Signer>>)>())); + let mut per_peer_state = HashMap::with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex::Signer>>)>())); for _ in 0..peer_count { let peer_pubkey = Readable::read(reader)?; let peer_state = PeerState { channel_by_id: peer_channels.remove(&peer_pubkey).unwrap_or(HashMap::new()), latest_features: Readable::read(reader)?, + pending_msg_events: Vec::new(), }; per_peer_state.insert(peer_pubkey, Mutex::new(peer_state)); } @@ -7160,11 +7317,11 @@ where (11, probing_cookie_secret, option), }); if fake_scid_rand_bytes.is_none() { - fake_scid_rand_bytes = Some(args.keys_manager.get_secure_random_bytes()); + fake_scid_rand_bytes = Some(args.entropy_source.get_secure_random_bytes()); } if probing_cookie_secret.is_none() { - probing_cookie_secret = Some(args.keys_manager.get_secure_random_bytes()); + probing_cookie_secret = Some(args.entropy_source.get_secure_random_bytes()); } if pending_outbound_payments.is_none() && pending_outbound_payments_no_retry.is_none() { @@ -7204,6 +7361,9 @@ where hash_map::Entry::Vacant(entry) => { let path_fee = path.get_path_fees(); entry.insert(PendingOutboundPayment::Retryable { + retry_strategy: Retry::Attempts(0), + attempts: PaymentAttempts::new(), + route_params: None, session_privs: [session_priv_bytes].iter().map(|a| *a).collect(), payment_hash: htlc.payment_hash, payment_secret, @@ -7270,7 +7430,7 @@ where }); } - let inbound_pmt_key_material = args.keys_manager.get_inbound_payment_key_material(); + let inbound_pmt_key_material = args.node_signer.get_inbound_payment_key_material(); let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material); let mut claimable_htlcs = HashMap::with_capacity(claimable_htlcs_list.len()); @@ -7295,7 +7455,7 @@ where payment_preimage: match pending_inbound_payments.get(&payment_hash) { Some(inbound_payment) => inbound_payment.payment_preimage, None => match inbound_payment::verify(payment_hash, &hop_data, 0, &expanded_inbound_key, &args.logger) { - Ok(payment_preimage) => payment_preimage, + Ok((payment_preimage, _)) => payment_preimage, Err(()) => { log_error!(args.logger, "Failed to read claimable payment data for HTLC with payment hash {} - was not a pending inbound payment and didn't match our payment key", log_bytes!(payment_hash.0)); return Err(DecodeError::InvalidValue); @@ -7314,17 +7474,16 @@ where } let mut secp_ctx = Secp256k1::new(); - secp_ctx.seeded_randomize(&args.keys_manager.get_secure_random_bytes()); + secp_ctx.seeded_randomize(&args.entropy_source.get_secure_random_bytes()); if !channel_closures.is_empty() { pending_events_read.append(&mut channel_closures); } - let our_network_key = match args.keys_manager.get_node_secret(Recipient::Node) { + let our_network_pubkey = match args.node_signer.get_node_id(Recipient::Node) { Ok(key) => key, Err(()) => return Err(DecodeError::InvalidValue) }; - let our_network_pubkey = PublicKey::from_secret_key(&secp_ctx, &our_network_key); if let Some(network_pubkey) = received_network_pubkey { if network_pubkey != our_network_pubkey { log_error!(args.logger, "Key that was generated does not match the existing key."); @@ -7341,7 +7500,7 @@ where let mut outbound_scid_alias; loop { outbound_scid_alias = fake_scid::Namespace::OutboundAlias - .get_fake_scid(best_block_height, &genesis_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.keys_manager); + .get_fake_scid(best_block_height, &genesis_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.entropy_source); if outbound_scid_aliases.insert(outbound_scid_alias) { break; } } chan.set_outbound_scid_alias(outbound_scid_alias); @@ -7372,7 +7531,7 @@ where let mut receiver_node_id = Some(our_network_pubkey); let phantom_shared_secret = claimable_htlcs[0].prev_hop.phantom_shared_secret; if phantom_shared_secret.is_some() { - let phantom_pubkey = args.keys_manager.get_node_id(Recipient::PhantomNode) + let phantom_pubkey = args.node_signer.get_node_id(Recipient::PhantomNode) .expect("Failed to get node_id for phantom node recipient"); receiver_node_id = Some(phantom_pubkey) } @@ -7426,9 +7585,6 @@ where best_block: RwLock::new(BestBlock::new(best_block_hash, best_block_height)), - channel_state: Mutex::new(ChannelHolder { - pending_msg_events: Vec::new(), - }), inbound_payment_key: expanded_inbound_key, pending_inbound_payments: Mutex::new(pending_inbound_payments), pending_outbound_payments: OutboundPayments { pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()) }, @@ -7443,7 +7599,6 @@ where probing_cookie_secret: probing_cookie_secret.unwrap(), - our_network_key, our_network_pubkey, secp_ctx, @@ -7456,7 +7611,10 @@ where total_consistency_lock: RwLock::new(()), persistence_notifier: Notifier::new(), - keys_manager: args.keys_manager, + entropy_source: args.entropy_source, + node_signer: args.node_signer, + signer_provider: args.signer_provider, + logger: args.logger, default_configuration: args.default_config, }; @@ -7488,7 +7646,7 @@ mod tests { use core::time::Duration; use core::sync::atomic::Ordering; use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret}; - use crate::ln::channelmanager::{self, inbound_payment, PaymentId, PaymentSendFailure, InterceptId}; + use crate::ln::channelmanager::{inbound_payment, PaymentId, PaymentSendFailure, InterceptId}; use crate::ln::functional_test_utils::*; use crate::ln::msgs; use crate::ln::msgs::{ChannelMessageHandler, OptionalField}; @@ -7497,7 +7655,7 @@ mod tests { use crate::util::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason}; use crate::util::test_utils; use crate::util::config::ChannelConfig; - use crate::chain::keysinterface::{EntropySource, KeysInterface}; + use crate::chain::keysinterface::EntropySource; #[test] fn test_notify_limits() { @@ -7514,7 +7672,7 @@ mod tests { assert!(nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1))); assert!(nodes[2].node.await_persistable_update_timeout(Duration::from_millis(1))); - let mut chan = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()); + let mut chan = create_announced_chan_between_nodes(&nodes, 0, 1); // We check that the channel info nodes have doesn't change too early, even though we try // to connect messages with new values @@ -7585,7 +7743,7 @@ mod tests { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()); + create_announced_chan_between_nodes(&nodes, 0, 1); // First, send a partial MPP payment. let (route, our_payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100_000); @@ -7707,7 +7865,7 @@ mod tests { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()); + create_announced_chan_between_nodes(&nodes, 0, 1); let scorer = test_utils::TestScorer::with_penalty(0); let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes(); @@ -7717,7 +7875,7 @@ mod tests { // Next, attempt a keysend payment and make sure it fails. let route_params = RouteParameters { - payment_params: PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id()), + payment_params: PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(), TEST_FINAL_CLTV), final_value_msat: 100_000, final_cltv_expiry_delta: TEST_FINAL_CLTV, }; @@ -7805,12 +7963,12 @@ mod tests { let payer_pubkey = nodes[0].node.get_our_node_id(); let payee_pubkey = nodes[1].node.get_our_node_id(); - nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap(); - nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap(); + nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); + nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); - let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], channelmanager::provided_init_features(), channelmanager::provided_init_features()); + let _chan = create_chan_between_nodes(&nodes[0], &nodes[1]); let route_params = RouteParameters { - payment_params: PaymentParameters::for_keysend(payee_pubkey), + payment_params: PaymentParameters::for_keysend(payee_pubkey, 40), final_value_msat: 10_000, final_cltv_expiry_delta: 40, }; @@ -7850,12 +8008,12 @@ mod tests { let payer_pubkey = nodes[0].node.get_our_node_id(); let payee_pubkey = nodes[1].node.get_our_node_id(); - nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap(); - nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap(); + nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); + nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); - let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], channelmanager::provided_init_features(), channelmanager::provided_init_features()); + let _chan = create_chan_between_nodes(&nodes[0], &nodes[1]); let route_params = RouteParameters { - payment_params: PaymentParameters::for_keysend(payee_pubkey), + payment_params: PaymentParameters::for_keysend(payee_pubkey, 40), final_value_msat: 10_000, final_cltv_expiry_delta: 40, }; @@ -7893,10 +8051,10 @@ mod tests { let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); let nodes = create_network(4, &node_cfgs, &node_chanmgrs); - let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).0.contents.short_channel_id; - let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features()).0.contents.short_channel_id; - let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, channelmanager::provided_init_features(), channelmanager::provided_init_features()).0.contents.short_channel_id; - let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, channelmanager::provided_init_features(), channelmanager::provided_init_features()).0.contents.short_channel_id; + let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id; + let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id; + let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id; + let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id; // Marshall an MPP route. let (mut route, payment_hash, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000); @@ -7957,9 +8115,9 @@ mod tests { nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None).unwrap(); let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), channelmanager::provided_init_features(), &open_channel); + nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel); let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), channelmanager::provided_init_features(), &accept_channel); + nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel); let (temporary_channel_id, tx, _funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42); let channel_id = &tx.txid().into_inner(); @@ -8004,9 +8162,9 @@ mod tests { update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &nodes_0_update, &nodes_1_update); nodes[0].node.close_channel(channel_id, &nodes[1].node.get_our_node_id()).unwrap(); - nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &channelmanager::provided_init_features(), &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id())); + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id())); let nodes_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &channelmanager::provided_init_features(), &nodes_1_shutdown); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &nodes_1_shutdown); let closing_signed_node_0 = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &closing_signed_node_0); @@ -8058,23 +8216,6 @@ mod tests { check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure); } - fn check_unkown_peer_msg_event<'a, 'b: 'a, 'c: 'b>(node: &Node<'a, 'b, 'c>, closing_node_id: PublicKey, closed_channel_id: [u8; 32]){ - let close_msg_ev = node.node.get_and_clear_pending_msg_events(); - let expected_error_str = format!("Can't find a peer matching the passed counterparty node_id {}", closing_node_id); - match close_msg_ev[0] { - MessageSendEvent::HandleError { - ref node_id, action: msgs::ErrorAction::SendErrorMessage { - msg: msgs::ErrorMessage { ref channel_id, ref data } - } - } => { - assert_eq!(*node_id, closing_node_id); - assert_eq!(*data, expected_error_str); - assert_eq!(*channel_id, closed_channel_id); - } - _ => panic!("Unexpected event"), - } - } - fn check_not_connected_to_peer_error(res_err: Result, expected_public_key: PublicKey) { let expected_message = format!("Not connected to node: {}", expected_public_key); check_api_misuse_error_message(expected_message, res_err) @@ -8109,7 +8250,7 @@ mod tests { // creating dummy ones. nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None).unwrap(); let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), channelmanager::provided_init_features(), &open_channel_msg); + nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg); let accept_channel_msg = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); // Dummy values @@ -8218,24 +8359,19 @@ mod tests { // Test the API functions and message handlers. check_not_connected_to_peer_error(nodes[0].node.create_channel(unkown_public_key, 1_000_000, 500_000_000, 42, None), unkown_public_key); - nodes[1].node.handle_open_channel(&unkown_public_key, channelmanager::provided_init_features(), &open_channel_msg); - check_unkown_peer_msg_event(&nodes[1], unkown_public_key, open_channel_msg.temporary_channel_id); + nodes[1].node.handle_open_channel(&unkown_public_key, &open_channel_msg); - nodes[0].node.handle_accept_channel(&unkown_public_key, channelmanager::provided_init_features(), &accept_channel_msg); - check_unkown_peer_msg_event(&nodes[0], unkown_public_key, open_channel_msg.temporary_channel_id); + nodes[0].node.handle_accept_channel(&unkown_public_key, &accept_channel_msg); check_unkown_peer_error(nodes[0].node.accept_inbound_channel(&open_channel_msg.temporary_channel_id, &unkown_public_key, 42), unkown_public_key); + nodes[1].node.handle_funding_created(&unkown_public_key, &funding_created_msg); - check_unkown_peer_msg_event(&nodes[1], unkown_public_key, open_channel_msg.temporary_channel_id); nodes[0].node.handle_funding_signed(&unkown_public_key, &funding_signed_msg); - check_unkown_peer_msg_event(&nodes[0], unkown_public_key, channel_id); nodes[0].node.handle_channel_ready(&unkown_public_key, &channel_ready_msg); - check_unkown_peer_msg_event(&nodes[0], unkown_public_key, channel_id); nodes[1].node.handle_announcement_signatures(&unkown_public_key, &announcement_signatures_msg); - check_unkown_peer_msg_event(&nodes[1], unkown_public_key, channel_id); check_unkown_peer_error(nodes[0].node.close_channel(&channel_id, &unkown_public_key), unkown_public_key); @@ -8247,35 +8383,61 @@ mod tests { check_unkown_peer_error(nodes[0].node.update_channel_config(&unkown_public_key, &[channel_id], &ChannelConfig::default()), unkown_public_key); - nodes[0].node.handle_shutdown(&unkown_public_key, &channelmanager::provided_init_features(), &shutdown_msg); - check_unkown_peer_msg_event(&nodes[0], unkown_public_key, channel_id); + nodes[0].node.handle_shutdown(&unkown_public_key, &shutdown_msg); nodes[1].node.handle_closing_signed(&unkown_public_key, &closing_signed_msg); - check_unkown_peer_msg_event(&nodes[1], unkown_public_key, channel_id); nodes[0].node.handle_channel_reestablish(&unkown_public_key, &channel_reestablish_msg); - check_unkown_peer_msg_event(&nodes[0], unkown_public_key, channel_id); nodes[1].node.handle_update_add_htlc(&unkown_public_key, &update_add_htlc_msg); - check_unkown_peer_msg_event(&nodes[1], unkown_public_key, channel_id); nodes[1].node.handle_commitment_signed(&unkown_public_key, &commitment_signed_msg); - check_unkown_peer_msg_event(&nodes[1], unkown_public_key, channel_id); nodes[1].node.handle_update_fail_malformed_htlc(&unkown_public_key, &malformed_update_msg); - check_unkown_peer_msg_event(&nodes[1], unkown_public_key, channel_id); nodes[1].node.handle_update_fail_htlc(&unkown_public_key, &fail_update_msg); - check_unkown_peer_msg_event(&nodes[1], unkown_public_key, channel_id); nodes[1].node.handle_update_fulfill_htlc(&unkown_public_key, &fulfill_update_msg); - check_unkown_peer_msg_event(&nodes[1], unkown_public_key, channel_id); nodes[1].node.handle_revoke_and_ack(&unkown_public_key, &revoke_and_ack_msg); - check_unkown_peer_msg_event(&nodes[1], unkown_public_key, channel_id); nodes[1].node.handle_update_fee(&unkown_public_key, &update_fee_msg); - check_unkown_peer_msg_event(&nodes[1], unkown_public_key, channel_id); + } + + #[cfg(anchors)] + #[test] + fn test_anchors_zero_fee_htlc_tx_fallback() { + // Tests that if both nodes support anchors, but the remote node does not want to accept + // anchor channels at the moment, an error it sent to the local node such that it can retry + // the channel without the anchors feature. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let mut anchors_config = test_default_channel_config(); + anchors_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; + anchors_config.manually_accept_inbound_channels = true; + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(anchors_config.clone()), Some(anchors_config.clone())]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 0, None).unwrap(); + let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + assert!(open_channel_msg.channel_type.as_ref().unwrap().supports_anchors_zero_fee_htlc_tx()); + + nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg); + let events = nodes[1].node.get_and_clear_pending_events(); + match events[0] { + Event::OpenChannelRequest { temporary_channel_id, .. } => { + nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id()).unwrap(); + } + _ => panic!("Unexpected event"), + } + + let error_msg = get_err_msg!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &error_msg); + + let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + assert!(!open_channel_msg.channel_type.unwrap().supports_anchors_zero_fee_htlc_tx()); + + check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed); } } @@ -8283,7 +8445,7 @@ mod tests { pub mod bench { use crate::chain::Listen; use crate::chain::chainmonitor::{ChainMonitor, Persist}; - use crate::chain::keysinterface::{EntropySource, KeysManager, KeysInterface, InMemorySigner}; + use crate::chain::keysinterface::{EntropySource, KeysManager, InMemorySigner}; use crate::ln::channelmanager::{self, BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage, PaymentId}; use crate::ln::functional_test_utils::*; use crate::ln::msgs::{ChannelMessageHandler, Init}; @@ -8306,7 +8468,7 @@ pub mod bench { &'a ChainMonitor, - &'a test_utils::TestBroadcaster, &'a KeysManager, + &'a test_utils::TestBroadcaster, &'a KeysManager, &'a KeysManager, &'a KeysManager, &'a test_utils::TestFeeEstimator, &'a test_utils::TestRouter<'a>, &'a test_utils::TestLogger>, } @@ -8335,7 +8497,7 @@ pub mod bench { let chain_monitor_a = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_a); let seed_a = [1u8; 32]; let keys_manager_a = KeysManager::new(&seed_a, 42, 42); - let node_a = ChannelManager::new(&fee_estimator, &chain_monitor_a, &tx_broadcaster, &router, &logger_a, &keys_manager_a, config.clone(), ChainParameters { + let node_a = ChannelManager::new(&fee_estimator, &chain_monitor_a, &tx_broadcaster, &router, &logger_a, &keys_manager_a, &keys_manager_a, &keys_manager_a, config.clone(), ChainParameters { network, best_block: BestBlock::from_genesis(network), }); @@ -8345,17 +8507,17 @@ pub mod bench { let chain_monitor_b = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_b); let seed_b = [2u8; 32]; let keys_manager_b = KeysManager::new(&seed_b, 42, 42); - let node_b = ChannelManager::new(&fee_estimator, &chain_monitor_b, &tx_broadcaster, &router, &logger_b, &keys_manager_b, config.clone(), ChainParameters { + let node_b = ChannelManager::new(&fee_estimator, &chain_monitor_b, &tx_broadcaster, &router, &logger_b, &keys_manager_b, &keys_manager_b, &keys_manager_b, config.clone(), ChainParameters { network, best_block: BestBlock::from_genesis(network), }); let node_b_holder = NodeHolder { node: &node_b }; - node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap(); - node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap(); + node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: node_b.init_features(), remote_network_address: None }).unwrap(); + node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: node_a.init_features(), remote_network_address: None }).unwrap(); node_a.create_channel(node_b.get_our_node_id(), 8_000_000, 100_000_000, 42, None).unwrap(); - node_b.handle_open_channel(&node_a.get_our_node_id(), channelmanager::provided_init_features(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id())); - node_a.handle_accept_channel(&node_b.get_our_node_id(), channelmanager::provided_init_features(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id())); + node_b.handle_open_channel(&node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id())); + node_a.handle_accept_channel(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id())); let tx; if let Event::FundingGenerationReady { temporary_channel_id, output_script, .. } = get_event!(node_a_holder, Event::FundingGenerationReady) { @@ -8416,8 +8578,8 @@ pub mod bench { macro_rules! send_payment { ($node_a: expr, $node_b: expr) => { let usable_channels = $node_a.list_usable_channels(); - let payment_params = PaymentParameters::from_node_id($node_b.get_our_node_id()) - .with_features(channelmanager::provided_invoice_features()); + let payment_params = PaymentParameters::from_node_id($node_b.get_our_node_id(), TEST_FINAL_CLTV) + .with_features($node_b.invoice_features()); let scorer = test_utils::TestScorer::with_penalty(0); let seed = [3u8; 32]; let keys_manager = KeysManager::new(&seed, 42, 42); @@ -8429,7 +8591,7 @@ pub mod bench { payment_preimage.0[0..8].copy_from_slice(&payment_count.to_le_bytes()); payment_count += 1; let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner()); - let payment_secret = $node_b.create_inbound_payment_for_hash(payment_hash, None, 7200).unwrap(); + let payment_secret = $node_b.create_inbound_payment_for_hash(payment_hash, None, 7200, None).unwrap(); $node_a.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap(); let payment_event = SendEvent::from_event($node_a.get_and_clear_pending_msg_events().pop().unwrap());