X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=918e2583782743f810bd54e5fd61c6b9ba81aa0e;hb=72183bd93299e1eec7c73146c56103ed0290a19d;hp=ec316c6ec7d2c412cbdba1ea30e1aa868ed62be3;hpb=1b70a1e9733bc135a02a3df618852b9d542afcce;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index ec316c6e..918e2583 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -57,7 +57,7 @@ use crate::ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, MAX_VA use crate::ln::outbound_payment; use crate::ln::outbound_payment::{OutboundPayments, PendingOutboundPayment}; use crate::ln::wire::Encode; -use crate::chain::keysinterface::{EntropySource, KeysInterface, KeysManager, NodeSigner, Recipient, Sign, SignerProvider}; +use crate::chain::keysinterface::{EntropySource, KeysManager, NodeSigner, Recipient, Sign, SignerProvider}; use crate::util::config::{UserConfig, ChannelConfig}; use crate::util::events::{Event, EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination}; use crate::util::events; @@ -72,7 +72,7 @@ use crate::prelude::*; use core::{cmp, mem}; use core::cell::RefCell; use crate::io::Read; -use crate::sync::{Arc, Mutex, MutexGuard, RwLock, RwLockReadGuard, FairRwLock}; +use crate::sync::{Arc, Mutex, RwLock, RwLockReadGuard, FairRwLock}; use core::sync::atomic::{AtomicUsize, Ordering}; use core::time::Duration; use core::ops::Deref; @@ -434,13 +434,6 @@ struct ClaimablePayments { pending_claiming_payments: HashMap, } -// Note this is only exposed in cfg(test): -pub(super) struct ChannelHolder { - /// Messages to send to peers - pushed to in the same lock that they are generated in (except - /// for broadcast messages, where ordering isn't as strict). - pub(super) pending_msg_events: Vec, -} - /// Events which we process internally but cannot be procsesed immediately at the generation site /// for some reason. They are handled in timer_tick_occurred, so may be processed with /// quite some time lag. @@ -470,6 +463,9 @@ pub(super) struct PeerState { pub(super) channel_by_id: HashMap<[u8; 32], Channel>, /// The latest `InitFeatures` we heard from the peer. latest_features: InitFeatures, + /// Messages to send to the peer - pushed to in the same lock that they are generated in (except + /// for broadcast messages, where ordering isn't as strict). + pub(super) pending_msg_events: Vec, } /// Stores a PaymentSecret and any other data we may need to validate an inbound payment is @@ -506,6 +502,8 @@ pub type SimpleArcChannelManager = ChannelManager< Arc, Arc, Arc, + Arc, + Arc, Arc, Arc>>, @@ -525,7 +523,7 @@ pub type SimpleArcChannelManager = ChannelManager< /// type alias chooses the concrete types of KeysManager and DefaultRouter. /// /// (C-not exported) as Arcs don't make sense in bindings -pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = ChannelManager<&'a M, &'b T, &'c KeysManager, &'d F, &'e DefaultRouter<&'f NetworkGraph<&'g L>, &'g L, &'h Mutex, &'g L>>>, &'g L>; +pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = ChannelManager<&'a M, &'b T, &'c KeysManager, &'c KeysManager, &'c KeysManager, &'d F, &'e DefaultRouter<&'f NetworkGraph<&'g L>, &'g L, &'h Mutex, &'g L>>>, &'g L>; /// Manager which keeps track of a number of channels and sends messages to the appropriate /// channel, also tracking HTLC preimages and forwarding onion packets appropriately. @@ -585,29 +583,29 @@ pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = C // | | // | |__`pending_outbound_payments` // This field's struct contains a map of pending outbounds // | | -// | |__`channel_state` +// | |__`per_peer_state` // | | -// | |__`per_peer_state` +// | |__`peer_state` // | | -// | |__`peer_state` -// | | -// | |__`id_to_peer` -// | | -// | |__`short_to_chan_info` -// | | -// | |__`outbound_scid_aliases` -// | | -// | |__`best_block` +// | |__`id_to_peer` +// | | +// | |__`short_to_chan_info` +// | | +// | |__`outbound_scid_aliases` +// | | +// | |__`best_block` +// | | +// | |__`pending_events` // | | -// | |__`pending_events` -// | | -// | |__`pending_background_events` +// | |__`pending_background_events` // -pub struct ChannelManager +pub struct ChannelManager where - M::Target: chain::Watch<::Signer>, + M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + ES::Target: EntropySource, + NS::Target: NodeSigner, + SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, L::Target: Logger, @@ -627,12 +625,6 @@ where best_block: RwLock, secp_ctx: Secp256k1, - /// See `ChannelManager` struct-level documentation for lock order requirements. - #[cfg(any(test, feature = "_test_utils"))] - pub(super) channel_state: Mutex, - #[cfg(not(any(test, feature = "_test_utils")))] - channel_state: Mutex, - /// Storage for PaymentSecrets and any requirements on future inbound payments before we will /// expose them to users via a PaymentClaimable event. HTLCs which do not meet the requirements /// here are failed when we process them as pending-forwardable-HTLCs, and entries are removed @@ -766,9 +758,9 @@ where /// /// See `ChannelManager` struct-level documentation for lock order requirements. #[cfg(not(any(test, feature = "_test_utils")))] - per_peer_state: FairRwLock::Signer>>>>, + per_peer_state: FairRwLock::Signer>>>>, #[cfg(any(test, feature = "_test_utils"))] - pub(super) per_peer_state: FairRwLock::Signer>>>>, + pub(super) per_peer_state: FairRwLock::Signer>>>>, /// See `ChannelManager` struct-level documentation for lock order requirements. pending_events: Mutex>, @@ -784,7 +776,9 @@ where persistence_notifier: Notifier, - keys_manager: K, + entropy_source: ES, + node_signer: NS, + signer_provider: SP, logger: L, } @@ -1163,8 +1157,8 @@ macro_rules! handle_error { { // In testing, ensure there are no deadlocks where the lock is already held upon // entering the macro. - assert!($self.channel_state.try_lock().is_ok()); assert!($self.pending_events.try_lock().is_ok()); + assert!($self.per_peer_state.try_write().is_ok()); } let mut msg_events = Vec::with_capacity(2); @@ -1194,7 +1188,31 @@ macro_rules! handle_error { } if !msg_events.is_empty() { - $self.channel_state.lock().unwrap().pending_msg_events.append(&mut msg_events); + let per_peer_state = $self.per_peer_state.read().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(&$counterparty_node_id) { + let mut peer_state = peer_state_mutex.lock().unwrap(); + peer_state.pending_msg_events.append(&mut msg_events); + } + #[cfg(debug_assertions)] + { + if let None = per_peer_state.get(&$counterparty_node_id) { + // This shouldn't occour in tests unless an unkown counterparty_node_id + // has been passed to our message handling functions. + let expected_error_str = format!("Can't find a peer matching the passed counterparty node_id {}", $counterparty_node_id); + match err.action { + msgs::ErrorAction::SendErrorMessage { + msg: msgs::ErrorMessage { ref channel_id, ref data } + } + => { + assert_eq!(*data, expected_error_str); + if let Some((err_channel_id, _user_channel_id)) = chan_id { + assert_eq!(*channel_id, err_channel_id); + } + } + _ => panic!("Unexpected event"), + } + } + } } // Return error in case higher-API need one @@ -1394,11 +1412,13 @@ macro_rules! emit_channel_ready_event { } } -impl ChannelManager +impl ChannelManager where - M::Target: chain::Watch<::Signer>, + M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + ES::Target: EntropySource, + NS::Target: NodeSigner, + SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, L::Target: Logger, @@ -1413,10 +1433,10 @@ where /// Users need to notify the new ChannelManager when a new block is connected or /// disconnected using its `block_connected` and `block_disconnected` methods, starting /// from after `params.latest_hash`. - pub fn new(fee_est: F, chain_monitor: M, tx_broadcaster: T, router: R, logger: L, keys_manager: K, config: UserConfig, params: ChainParameters) -> Self { + pub fn new(fee_est: F, chain_monitor: M, tx_broadcaster: T, router: R, logger: L, entropy_source: ES, node_signer: NS, signer_provider: SP, config: UserConfig, params: ChainParameters) -> Self { let mut secp_ctx = Secp256k1::new(); - secp_ctx.seeded_randomize(&keys_manager.get_secure_random_bytes()); - let inbound_pmt_key_material = keys_manager.get_inbound_payment_key_material(); + secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes()); + let inbound_pmt_key_material = node_signer.get_inbound_payment_key_material(); let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material); ChannelManager { default_configuration: config.clone(), @@ -1428,9 +1448,6 @@ where best_block: RwLock::new(params.best_block), - channel_state: Mutex::new(ChannelHolder{ - pending_msg_events: Vec::new(), - }), outbound_scid_aliases: Mutex::new(HashSet::new()), pending_inbound_payments: Mutex::new(HashMap::new()), pending_outbound_payments: OutboundPayments::new(), @@ -1440,14 +1457,14 @@ where id_to_peer: Mutex::new(HashMap::new()), short_to_chan_info: FairRwLock::new(HashMap::new()), - our_network_key: keys_manager.get_node_secret(Recipient::Node).unwrap(), - our_network_pubkey: PublicKey::from_secret_key(&secp_ctx, &keys_manager.get_node_secret(Recipient::Node).unwrap()), + our_network_key: node_signer.get_node_secret(Recipient::Node).unwrap(), + our_network_pubkey: PublicKey::from_secret_key(&secp_ctx, &node_signer.get_node_secret(Recipient::Node).unwrap()), secp_ctx, inbound_payment_key: expanded_inbound_key, - fake_scid_rand_bytes: keys_manager.get_secure_random_bytes(), + fake_scid_rand_bytes: entropy_source.get_secure_random_bytes(), - probing_cookie_secret: keys_manager.get_secure_random_bytes(), + probing_cookie_secret: entropy_source.get_secure_random_bytes(), highest_seen_timestamp: AtomicUsize::new(0), @@ -1458,7 +1475,9 @@ where total_consistency_lock: RwLock::new(()), persistence_notifier: Notifier::new(), - keys_manager, + entropy_source, + node_signer, + signer_provider, logger, } @@ -1477,7 +1496,7 @@ where if cfg!(fuzzing) { // fuzzing chacha20 doesn't use the key at all so we always get the same alias outbound_scid_alias += 1; } else { - outbound_scid_alias = fake_scid::Namespace::OutboundAlias.get_fake_scid(height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.keys_manager); + outbound_scid_alias = fake_scid::Namespace::OutboundAlias.get_fake_scid(height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.entropy_source); } if outbound_scid_alias != 0 && self.outbound_scid_aliases.lock().unwrap().insert(outbound_scid_alias) { break; @@ -1522,7 +1541,6 @@ where // We want to make sure the lock is actually acquired by PersistenceNotifierGuard. debug_assert!(&self.total_consistency_lock.try_write().is_err()); - let mut channel_state = self.channel_state.lock().unwrap(); let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex_opt = per_peer_state.get(&their_network_key); @@ -1535,7 +1553,7 @@ where let outbound_scid_alias = self.create_and_insert_outbound_scid_alias(); let their_features = &peer_state.latest_features; let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration }; - match Channel::new_outbound(&self.fee_estimator, &self.keys_manager, their_network_key, + match Channel::new_outbound(&self.fee_estimator, &self.entropy_source, &self.signer_provider, their_network_key, their_features, channel_value_satoshis, push_msat, user_channel_id, config, self.best_block.read().unwrap().height(), outbound_scid_alias) { @@ -1560,14 +1578,14 @@ where hash_map::Entry::Vacant(entry) => { entry.insert(channel); } } - channel_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { node_id: their_network_key, msg: res, }); Ok(temporary_channel_id) } - fn list_channels_with_filter::Signer>)) -> bool + Copy>(&self, f: Fn) -> Vec { + fn list_channels_with_filter::Signer>)) -> bool + Copy>(&self, f: Fn) -> Vec { let mut res = Vec::new(); // Allocate our best estimate of the number of channels we have in the `res` // Vec. Sadly the `short_to_chan_info` map doesn't cover channels without @@ -1655,7 +1673,7 @@ where } /// Helper function that issues the channel close events - fn issue_channel_close_events(&self, channel: &Channel<::Signer>, closure_reason: ClosureReason) { + fn issue_channel_close_events(&self, channel: &Channel<::Signer>, closure_reason: ClosureReason) { let mut pending_events_lock = self.pending_events.lock().unwrap(); match channel.unbroadcasted_funding() { Some(transaction) => { @@ -1675,8 +1693,6 @@ where let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>; let result: Result<(), _> = loop { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); @@ -1688,7 +1704,7 @@ where let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(channel_id.clone()) { hash_map::Entry::Occupied(mut chan_entry) => { - let (shutdown_msg, monitor_update, htlcs) = chan_entry.get_mut().get_shutdown(&self.keys_manager, &peer_state.latest_features, target_feerate_sats_per_1000_weight)?; + let (shutdown_msg, monitor_update, htlcs) = chan_entry.get_mut().get_shutdown(&self.signer_provider, &peer_state.latest_features, target_feerate_sats_per_1000_weight)?; failed_htlcs = htlcs; // Update the monitor with the shutdown script if necessary. @@ -1702,7 +1718,7 @@ where } } - channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { node_id: *counterparty_node_id, msg: shutdown_msg }); @@ -1710,7 +1726,7 @@ where if chan_entry.get().is_shutdown() { let channel = remove_channel!(self, chan_entry); if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) { - channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: channel_update }); } @@ -1799,13 +1815,13 @@ where /// user closes, which will be re-exposed as the `ChannelClosed` reason. fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: &PublicKey, peer_msg: Option<&String>, broadcast: bool) -> Result { + let per_peer_state = self.per_peer_state.read().unwrap(); + let peer_state_mutex_opt = per_peer_state.get(peer_node_id); let mut chan = { - let per_peer_state = self.per_peer_state.read().unwrap(); - if let None = per_peer_state.get(peer_node_id) { + if let None = peer_state_mutex_opt { return Err(APIError::APIMisuseError{ err: format!("Can't find a peer matching the passed counterparty node_id {}", peer_node_id) }); } - let peer_state_mutex = per_peer_state.get(peer_node_id).unwrap(); - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); let peer_state = &mut *peer_state_lock; if let hash_map::Entry::Occupied(chan) = peer_state.channel_by_id.entry(channel_id.clone()) { if let Some(peer_msg) = peer_msg { @@ -1821,8 +1837,8 @@ where log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..])); self.finish_force_close_channel(chan.force_shutdown(broadcast)); if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { - let mut channel_state = self.channel_state.lock().unwrap(); - channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap(); + peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); } @@ -1834,14 +1850,18 @@ where let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); match self.force_close_channel_with_peer(channel_id, counterparty_node_id, None, broadcast) { Ok(counterparty_node_id) => { - self.channel_state.lock().unwrap().pending_msg_events.push( - events::MessageSendEvent::HandleError { - node_id: counterparty_node_id, - action: msgs::ErrorAction::SendErrorMessage { - msg: msgs::ErrorMessage { channel_id: *channel_id, data: "Channel force-closed".to_owned() } - }, - } - ); + let per_peer_state = self.per_peer_state.read().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { + let mut peer_state = peer_state_mutex.lock().unwrap(); + peer_state.pending_msg_events.push( + events::MessageSendEvent::HandleError { + node_id: counterparty_node_id, + action: msgs::ErrorAction::SendErrorMessage { + msg: msgs::ErrorMessage { channel_id: *channel_id, data: "Channel force-closed".to_owned() } + }, + } + ); + } Ok(()) }, Err(e) => Err(e) @@ -2216,7 +2236,7 @@ where /// [`MessageSendEvent::BroadcastChannelUpdate`] event. /// /// May be called with peer_state already locked! - fn get_channel_update_for_broadcast(&self, chan: &Channel<::Signer>) -> Result { + fn get_channel_update_for_broadcast(&self, chan: &Channel<::Signer>) -> Result { if !chan.should_announce() { return Err(LightningError { err: "Cannot broadcast a channel_update for a private channel".to_owned(), @@ -2235,7 +2255,7 @@ where /// and thus MUST NOT be called unless the recipient of the resulting message has already /// provided evidence that they know about the existence of the channel. /// May be called with peer_state already locked! - fn get_channel_update_for_unicast(&self, chan: &Channel<::Signer>) -> Result { + fn get_channel_update_for_unicast(&self, chan: &Channel<::Signer>) -> Result { log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.channel_id())); let short_channel_id = match chan.get_short_channel_id().or(chan.latest_inbound_scid_alias()) { None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}), @@ -2244,7 +2264,7 @@ where self.get_channel_update_for_onion(short_channel_id, chan) } - fn get_channel_update_for_onion(&self, short_channel_id: u64, chan: &Channel<::Signer>) -> Result { + fn get_channel_update_for_onion(&self, short_channel_id: u64, chan: &Channel<::Signer>) -> Result { log_trace!(self.logger, "Generating channel update for channel {}", log_bytes!(chan.channel_id())); let were_node_one = PublicKey::from_secret_key(&self.secp_ctx, &self.our_network_key).serialize()[..] < chan.get_counterparty_node_id().serialize()[..]; @@ -2273,7 +2293,7 @@ where // Only public for testing, this should otherwise never be called direcly pub(crate) fn send_payment_along_path(&self, path: &Vec, payment_params: &Option, payment_hash: &PaymentHash, payment_secret: &Option, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option, session_priv_bytes: [u8; 32]) -> Result<(), APIError> { log_trace!(self.logger, "Attempting to send payment for path with next hop {}", path.first().unwrap().short_channel_id); - let prng_seed = self.keys_manager.get_secure_random_bytes(); + let prng_seed = self.entropy_source.get_secure_random_bytes(); let session_priv = SecretKey::from_slice(&session_priv_bytes[..]).expect("RNG is busted"); let onion_keys = onion_utils::construct_onion_keys(&self.secp_ctx, &path, &session_priv) @@ -2292,8 +2312,6 @@ where Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()), }; - let mut channel_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_lock; let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id); if let None = peer_state_mutex_opt { @@ -2339,7 +2357,7 @@ where } log_debug!(self.logger, "Sending payment along path resulted in a commitment_signed for channel {}", log_bytes!(chan_id)); - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + peer_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { node_id: path.first().unwrap().pubkey, updates: msgs::CommitmentUpdate { update_add_htlcs: vec![update_add], @@ -2423,7 +2441,7 @@ where pub fn send_payment(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option, payment_id: PaymentId) -> Result<(), PaymentSendFailure> { let best_block_height = self.best_block.read().unwrap().height(); self.pending_outbound_payments - .send_payment_with_route(route, payment_hash, payment_secret, payment_id, &self.keys_manager, best_block_height, + .send_payment_with_route(route, payment_hash, payment_secret, payment_id, &self.entropy_source, &self.node_signer, best_block_height, |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) } @@ -2431,7 +2449,7 @@ where #[cfg(test)] fn test_send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option, keysend_preimage: Option, payment_id: PaymentId, recv_value_msat: Option, onion_session_privs: Vec<[u8; 32]>) -> Result<(), PaymentSendFailure> { let best_block_height = self.best_block.read().unwrap().height(); - self.pending_outbound_payments.test_send_payment_internal(route, payment_hash, payment_secret, keysend_preimage, payment_id, recv_value_msat, onion_session_privs, &self.keys_manager, best_block_height, + self.pending_outbound_payments.test_send_payment_internal(route, payment_hash, payment_secret, keysend_preimage, payment_id, recv_value_msat, onion_session_privs, &self.node_signer, best_block_height, |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) } @@ -2439,7 +2457,7 @@ where #[cfg(test)] pub(crate) fn test_add_new_pending_payment(&self, payment_hash: PaymentHash, payment_secret: Option, payment_id: PaymentId, route: &Route) -> Result, PaymentSendFailure> { let best_block_height = self.best_block.read().unwrap().height(); - self.pending_outbound_payments.test_add_new_pending_payment(payment_hash, payment_secret, payment_id, route, &self.keys_manager, best_block_height) + self.pending_outbound_payments.test_add_new_pending_payment(payment_hash, payment_secret, payment_id, route, &self.entropy_source, best_block_height) } @@ -2455,7 +2473,7 @@ where /// [`abandon_payment`]: [`ChannelManager::abandon_payment`] pub fn retry_payment(&self, route: &Route, payment_id: PaymentId) -> Result<(), PaymentSendFailure> { let best_block_height = self.best_block.read().unwrap().height(); - self.pending_outbound_payments.retry_payment_with_route(route, payment_id, &self.keys_manager, best_block_height, + self.pending_outbound_payments.retry_payment_with_route(route, payment_id, &self.entropy_source, &self.node_signer, best_block_height, |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) } @@ -2504,7 +2522,7 @@ where /// [`send_payment`]: Self::send_payment pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option, payment_id: PaymentId) -> Result { let best_block_height = self.best_block.read().unwrap().height(); - self.pending_outbound_payments.send_spontaneous_payment(route, payment_preimage, payment_id, &self.keys_manager, best_block_height, + self.pending_outbound_payments.send_spontaneous_payment(route, payment_preimage, payment_id, &self.entropy_source, &self.node_signer, best_block_height, |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) } @@ -2514,7 +2532,7 @@ where /// us to easily discern them from real payments. pub fn send_probe(&self, hops: Vec) -> Result<(PaymentHash, PaymentId), PaymentSendFailure> { let best_block_height = self.best_block.read().unwrap().height(); - self.pending_outbound_payments.send_probe(hops, self.probing_cookie_secret, &self.keys_manager, best_block_height, + self.pending_outbound_payments.send_probe(hops, self.probing_cookie_secret, &self.entropy_source, &self.node_signer, best_block_height, |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) } @@ -2528,10 +2546,9 @@ where /// Handles the generation of a funding transaction, optionally (for tests) with a function /// which checks the correctness of the funding transaction given the associated channel. - fn funding_transaction_generated_intern::Signer>, &Transaction) -> Result>( + fn funding_transaction_generated_intern::Signer>, &Transaction) -> Result>( &self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, funding_transaction: Transaction, find_funding_output: FundingOutput ) -> Result<(), APIError> { - let mut channel_state = self.channel_state.lock().unwrap(); let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); if let None = peer_state_mutex_opt { @@ -2565,11 +2582,10 @@ where } }; - channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingCreated { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingCreated { node_id: chan.get_counterparty_node_id(), msg, }); - mem::drop(channel_state); match peer_state.channel_by_id.entry(chan.channel_id()) { hash_map::Entry::Occupied(_) => { panic!("Generated duplicate funding txid?"); @@ -2704,8 +2720,6 @@ where let _persistence_guard = PersistenceNotifierGuard::notify_on_drop( &self.total_consistency_lock, &self.persistence_notifier, ); - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); if let None = peer_state_mutex_opt { @@ -2726,9 +2740,9 @@ where continue; } if let Ok(msg) = self.get_channel_update_for_broadcast(channel) { - channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg }); + peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg }); } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { node_id: channel.get_counterparty_node_id(), msg, }); @@ -2909,7 +2923,7 @@ where } } if let PendingHTLCRouting::Forward { onion_packet, .. } = routing { - let phantom_secret_res = self.keys_manager.get_node_secret(Recipient::PhantomNode); + let phantom_secret_res = self.node_signer.get_node_secret(Recipient::PhantomNode); if phantom_secret_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id, &self.genesis_hash) { let phantom_shared_secret = SharedSecret::new(&onion_packet.public_key.unwrap(), &phantom_secret_res.unwrap()).secret_bytes(); let next_hop = match onion_utils::decode_next_payment_hop(phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac, payment_hash) { @@ -3087,7 +3101,7 @@ where let phantom_shared_secret = claimable_htlc.prev_hop.phantom_shared_secret; let mut receiver_node_id = self.our_network_pubkey; if phantom_shared_secret.is_some() { - receiver_node_id = self.keys_manager.get_node_id(Recipient::PhantomNode) + receiver_node_id = self.node_signer.get_node_id(Recipient::PhantomNode) .expect("Failed to get node_id for phantom node recipient"); } @@ -3283,7 +3297,7 @@ where self.process_background_events(); } - fn update_channel_fee(&self, chan_id: &[u8; 32], chan: &mut Channel<::Signer>, new_feerate: u32) -> NotifyOption { + fn update_channel_fee(&self, chan_id: &[u8; 32], chan: &mut Channel<::Signer>, new_feerate: u32) -> NotifyOption { if !chan.is_outbound() { return NotifyOption::SkipPersist; } // If the feerate has decreased by less than half, don't bother if new_feerate <= chan.get_feerate() && new_feerate * 2 > chan.get_feerate() { @@ -3350,13 +3364,11 @@ where let mut handle_errors: Vec<(Result<(), _>, _)> = Vec::new(); let mut timed_out_mpp_htlcs = Vec::new(); { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; - let pending_msg_events = &mut channel_state.pending_msg_events; let per_peer_state = self.per_peer_state.read().unwrap(); for (counterparty_node_id, peer_state_mutex) in per_peer_state.iter() { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; + let pending_msg_events = &mut peer_state.pending_msg_events; peer_state.channel_by_id.retain(|chan_id, chan| { let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate); if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } @@ -3480,7 +3492,7 @@ where /// /// This is for failures on the channel on which the HTLC was *received*, not failures /// forwarding - fn get_htlc_inbound_temp_fail_err_and_data(&self, desired_err_code: u16, chan: &Channel<::Signer>) -> (u16, Vec) { + fn get_htlc_inbound_temp_fail_err_and_data(&self, desired_err_code: u16, chan: &Channel<::Signer>) -> (u16, Vec) { // We can't be sure what SCID was used when relaying inbound towards us, so we have to // guess somewhat. If its a public channel, we figure best to just use the real SCID (as // we're not leaking that we have a channel with the counterparty), otherwise we try to use @@ -3500,7 +3512,7 @@ where /// Gets an HTLC onion failure code and error data for an `UPDATE` error, given the error code /// that we want to return and a channel. - fn get_htlc_temp_fail_err_and_data(&self, desired_err_code: u16, scid: u64, chan: &Channel<::Signer>) -> (u16, Vec) { + fn get_htlc_temp_fail_err_and_data(&self, desired_err_code: u16, scid: u64, chan: &Channel<::Signer>) -> (u16, Vec) { debug_assert_eq!(desired_err_code & 0x1000, 0x1000); if let Ok(upd) = self.get_channel_update_for_onion(scid, chan) { let mut enc = VecWriter(Vec::with_capacity(upd.serialized_length() + 6)); @@ -3555,12 +3567,11 @@ where fn fail_htlc_backwards_internal(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) { #[cfg(debug_assertions)] { - // Ensure that the `channel_state` and no peer state channel storage lock is not held - // when calling this function. + // Ensure that no peer state channel storage lock is not held when calling this + // function. // This ensures that future code doesn't introduce a lock_order requirement for - // `forward_htlcs` to be locked after the `channel_state` and `per_peer_state` locks, - // which calling this function with the locks aquired would. - assert!(self.channel_state.try_lock().is_ok()); + // `forward_htlcs` to be locked after the `per_peer_state` locks, which calling this + // function with the `per_peer_state` aquired would. assert!(self.per_peer_state.try_write().is_ok()); } @@ -3636,7 +3647,7 @@ where let mut receiver_node_id = self.our_network_pubkey; for htlc in sources.iter() { if htlc.prev_hop.phantom_shared_secret.is_some() { - let phantom_pubkey = self.keys_manager.get_node_id(Recipient::PhantomNode) + let phantom_pubkey = self.node_signer.get_node_id(Recipient::PhantomNode) .expect("Failed to get node_id for phantom node recipient"); receiver_node_id = phantom_pubkey; break; @@ -3676,7 +3687,6 @@ where let mut expected_amt_msat = None; let mut valid_mpp = true; let mut errs = Vec::new(); - let mut channel_state = Some(self.channel_state.lock().unwrap()); let mut per_peer_state = Some(self.per_peer_state.read().unwrap()); for htlc in sources.iter() { let (counterparty_node_id, chan_id) = match self.short_to_chan_info.read().unwrap().get(&htlc.prev_hop.short_channel_id) { @@ -3723,14 +3733,12 @@ where claimable_amt_msat += htlc.value; } if sources.is_empty() || expected_amt_msat.is_none() { - mem::drop(channel_state); mem::drop(per_peer_state); self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash); log_info!(self.logger, "Attempted to claim an incomplete payment which no longer had any available HTLCs!"); return; } if claimable_amt_msat != expected_amt_msat.unwrap() { - mem::drop(channel_state); mem::drop(per_peer_state); self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash); log_info!(self.logger, "Attempted to claim an incomplete payment, expected {} msat, had {} available to claim.", @@ -3739,9 +3747,8 @@ where } if valid_mpp { for htlc in sources.drain(..) { - if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); } if per_peer_state.is_none() { per_peer_state = Some(self.per_peer_state.read().unwrap()); } - if let Err((pk, err)) = self.claim_funds_from_hop(channel_state.take().unwrap(), per_peer_state.take().unwrap(), + if let Err((pk, err)) = self.claim_funds_from_hop(per_peer_state.take().unwrap(), htlc.prev_hop, payment_preimage, |_| Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash })) { @@ -3753,7 +3760,6 @@ where } } } - mem::drop(channel_state); mem::drop(per_peer_state); if !valid_mpp { for htlc in sources.drain(..) { @@ -3775,14 +3781,12 @@ where } fn claim_funds_from_hop) -> Option>(&self, - mut channel_state_lock: MutexGuard, - per_peer_state_lock: RwLockReadGuard::Signer>>>>, + per_peer_state_lock: RwLockReadGuard::Signer>>>>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage, completion_action: ComplFunc) -> Result<(), (PublicKey, MsgHandleErrInternal)> { //TODO: Delay the claimed_funds relaying just like we do outbound relay! let chan_id = prev_hop.outpoint.to_channel_id(); - let channel_state = &mut *channel_state_lock; let counterparty_node_id_opt = match self.short_to_chan_info.read().unwrap().get(&prev_hop.short_channel_id) { Some((cp_id, _dup_chan_id)) => Some(cp_id.clone()), @@ -3797,7 +3801,8 @@ where } else { (false, None) }; if found_channel { - if let hash_map::Entry::Occupied(mut chan) = peer_state_opt.as_mut().unwrap().channel_by_id.entry(chan_id) { + let peer_state = &mut *peer_state_opt.as_mut().unwrap(); + if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(chan_id) { let counterparty_node_id = chan.get().get_counterparty_node_id(); match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) { Ok(msgs_monitor_option) => { @@ -3809,7 +3814,6 @@ where "Failed to update channel monitor with preimage {:?}: {:?}", payment_preimage, e); let err = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(); - mem::drop(channel_state_lock); mem::drop(peer_state_opt); mem::drop(per_peer_state_lock); self.handle_monitor_update_completion_actions(completion_action(Some(htlc_value_msat))); @@ -3819,8 +3823,8 @@ where if let Some((msg, commitment_signed)) = msgs { log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}", log_bytes!(payment_preimage.0), log_bytes!(chan.get().channel_id())); - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: chan.get().get_counterparty_node_id(), + peer_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: counterparty_node_id, updates: msgs::CommitmentUpdate { update_add_htlcs: Vec::new(), update_fulfill_htlcs: vec![msg], @@ -3831,7 +3835,6 @@ where } }); } - mem::drop(channel_state_lock); mem::drop(peer_state_opt); mem::drop(per_peer_state_lock); self.handle_monitor_update_completion_actions(completion_action(Some(htlc_value_msat))); @@ -3857,7 +3860,6 @@ where if drop { chan.remove_entry(); } - mem::drop(channel_state_lock); mem::drop(peer_state_opt); mem::drop(per_peer_state_lock); self.handle_monitor_update_completion_actions(completion_action(None)); @@ -3887,7 +3889,6 @@ where log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}", payment_preimage, update_res); } - mem::drop(channel_state_lock); mem::drop(peer_state_opt); mem::drop(per_peer_state_lock); // Note that we do process the completion action here. This totally could be a @@ -3904,15 +3905,14 @@ where self.pending_outbound_payments.finalize_claims(sources, &self.pending_events); } - fn claim_funds_internal(&self, channel_state_lock: MutexGuard, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option, from_onchain: bool, next_channel_id: [u8; 32]) { + fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option, from_onchain: bool, next_channel_id: [u8; 32]) { match source { HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => { - mem::drop(channel_state_lock); self.pending_outbound_payments.claim_htlc(payment_id, payment_preimage, session_priv, path, from_onchain, &self.pending_events, &self.logger); }, HTLCSource::PreviousHopData(hop_data) => { let prev_outpoint = hop_data.outpoint; - let res = self.claim_funds_from_hop(channel_state_lock, self.per_peer_state.read().unwrap(), hop_data, payment_preimage, + let res = self.claim_funds_from_hop(self.per_peer_state.read().unwrap(), hop_data, payment_preimage, |htlc_claim_value_msat| { if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat { let fee_earned_msat = if let Some(claimed_htlc_value) = htlc_claim_value_msat { @@ -3964,7 +3964,7 @@ where /// Handles a channel reentering a functional state, either due to reconnect or a monitor /// update completion. fn handle_channel_resumption(&self, pending_msg_events: &mut Vec, - channel: &mut Channel<::Signer>, raa: Option, + channel: &mut Channel<::Signer>, raa: Option, commitment_update: Option, order: RAACommitmentOrder, pending_forwards: Vec<(PendingHTLCInfo, u64)>, funding_broadcastable: Option, channel_ready: Option, announcement_sigs: Option) @@ -4029,8 +4029,6 @@ where let htlc_forwards; let (mut pending_failures, finalized_claims, counterparty_node_id) = { - let mut channel_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_lock; let counterparty_node_id = match counterparty_node_id { Some(cp_id) => cp_id.clone(), None => { @@ -4045,11 +4043,11 @@ where }; let per_peer_state = self.per_peer_state.read().unwrap(); let mut peer_state_lock; + let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id); + if let None = peer_state_mutex_opt { return } + peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); + let peer_state = &mut *peer_state_lock; let mut channel = { - if let None = per_peer_state.get(&counterparty_node_id) { return } - let peer_state_mutex = per_peer_state.get(&counterparty_node_id).unwrap(); - peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(funding_txo.to_channel_id()){ hash_map::Entry::Occupied(chan) => chan, hash_map::Entry::Vacant(_) => return, @@ -4073,9 +4071,9 @@ where }) } else { None } } else { None }; - htlc_forwards = self.handle_channel_resumption(&mut channel_state.pending_msg_events, channel.get_mut(), updates.raa, updates.commitment_update, updates.order, updates.accepted_htlcs, updates.funding_broadcastable, updates.channel_ready, updates.announcement_sigs); + htlc_forwards = self.handle_channel_resumption(&mut peer_state.pending_msg_events, channel.get_mut(), updates.raa, updates.commitment_update, updates.order, updates.accepted_htlcs, updates.funding_broadcastable, updates.channel_ready, updates.announcement_sigs); if let Some(upd) = channel_update { - channel_state.pending_msg_events.push(upd); + peer_state.pending_msg_events.push(upd); } (updates.failed_htlcs, updates.finalized_claimed_htlcs, counterparty_node_id) @@ -4135,8 +4133,6 @@ where fn do_accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, accept_0conf: bool, user_channel_id: u128) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); if let None = peer_state_mutex_opt { @@ -4158,12 +4154,12 @@ where msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "No zero confirmation channels accepted".to_owned(), } } }; - channel_state.pending_msg_events.push(send_msg_err_event); + peer_state.pending_msg_events.push(send_msg_err_event); let _ = remove_channel!(self, channel); return Err(APIError::APIMisuseError { err: "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned() }); } - channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { node_id: channel.get().get_counterparty_node_id(), msg: channel.get_mut().accept_inbound_channel(user_channel_id), }); @@ -4185,11 +4181,11 @@ where } let mut random_bytes = [0u8; 16]; - random_bytes.copy_from_slice(&self.keys_manager.get_secure_random_bytes()[..16]); + random_bytes.copy_from_slice(&self.entropy_source.get_secure_random_bytes()[..16]); let user_channel_id = u128::from_be_bytes(random_bytes); let outbound_scid_alias = self.create_and_insert_outbound_scid_alias(); - let mut channel = match Channel::new_from_req(&self.fee_estimator, &self.keys_manager, + let mut channel = match Channel::new_from_req(&self.fee_estimator, &self.entropy_source, &self.signer_provider, counterparty_node_id.clone(), &their_features, msg, user_channel_id, &self.default_configuration, self.best_block.read().unwrap().height(), &self.logger, outbound_scid_alias) { @@ -4199,8 +4195,6 @@ where }, Ok(res) => res }; - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); if let None = peer_state_mutex_opt { @@ -4218,7 +4212,7 @@ where if channel.get_channel_type().requires_zero_conf() { return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), msg.temporary_channel_id.clone())); } - channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { node_id: counterparty_node_id.clone(), msg: channel.accept_inbound_channel(user_channel_id), }); @@ -4270,8 +4264,6 @@ where } fn internal_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); if let None = peer_state_mutex_opt { @@ -4283,7 +4275,7 @@ where let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.temporary_channel_id) { hash_map::Entry::Occupied(mut chan) => { - (try_chan_entry!(self, chan.get_mut().funding_created(msg, best_block, &self.keys_manager, &self.logger), chan), chan.remove()) + (try_chan_entry!(self, chan.get_mut().funding_created(msg, best_block, &self.signer_provider, &self.logger), chan), chan.remove()) }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)) } @@ -4334,12 +4326,12 @@ where i_e.insert(chan.get_counterparty_node_id()); } } - channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned { node_id: counterparty_node_id.clone(), msg: funding_msg, }); if let Some(msg) = channel_ready { - send_channel_ready!(self, channel_state.pending_msg_events, chan, msg); + send_channel_ready!(self, peer_state.pending_msg_events, chan, msg); } e.insert(chan); } @@ -4350,8 +4342,6 @@ where fn internal_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> { let funding_tx = { let best_block = *self.best_block.read().unwrap(); - let mut channel_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_lock; let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); if let None = peer_state_mutex_opt { @@ -4362,7 +4352,7 @@ where let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { - let (monitor, funding_tx, channel_ready) = match chan.get_mut().funding_signed(&msg, best_block, &self.keys_manager, &self.logger) { + let (monitor, funding_tx, channel_ready) = match chan.get_mut().funding_signed(&msg, best_block, &self.signer_provider, &self.logger) { Ok(update) => update, Err(e) => try_chan_entry!(self, Err(e), chan), }; @@ -4382,7 +4372,7 @@ where }, } if let Some(msg) = channel_ready { - send_channel_ready!(self, channel_state.pending_msg_events, chan.get(), msg); + send_channel_ready!(self, peer_state.pending_msg_events, chan.get(), msg); } funding_tx }, @@ -4395,8 +4385,6 @@ where } fn internal_channel_ready(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReady) -> Result<(), MsgHandleErrInternal> { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); if let None = peer_state_mutex_opt { @@ -4410,7 +4398,7 @@ where self.genesis_hash.clone(), &self.best_block.read().unwrap(), &self.logger), chan); if let Some(announcement_sigs) = announcement_sigs_opt { log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(chan.get().channel_id())); - channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { node_id: counterparty_node_id.clone(), msg: announcement_sigs, }); @@ -4422,7 +4410,7 @@ where // announcement_signatures. log_trace!(self.logger, "Sending private initial channel_update for our counterparty on channel {}", log_bytes!(chan.get().channel_id())); if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { node_id: counterparty_node_id.clone(), msg, }); @@ -4440,8 +4428,6 @@ where fn internal_shutdown(&self, counterparty_node_id: &PublicKey, their_features: &InitFeatures, msg: &msgs::Shutdown) -> Result<(), MsgHandleErrInternal> { let mut dropped_htlcs: Vec<(HTLCSource, PaymentHash)>; let result: Result<(), _> = loop { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); if let None = peer_state_mutex_opt { @@ -4458,7 +4444,7 @@ where if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" }); } - let (shutdown, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.keys_manager, &their_features, &msg), chan_entry); + let (shutdown, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.signer_provider, &their_features, &msg), chan_entry); dropped_htlcs = htlcs; // Update the monitor with the shutdown script if necessary. @@ -4473,7 +4459,7 @@ where } if let Some(msg) = shutdown { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { node_id: *counterparty_node_id, msg, }); @@ -4495,21 +4481,19 @@ where } fn internal_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), MsgHandleErrInternal> { + let per_peer_state = self.per_peer_state.read().unwrap(); + let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); + if let None = peer_state_mutex_opt { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)) + } let (tx, chan_option) = { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; - let per_peer_state = self.per_peer_state.read().unwrap(); - if let None = per_peer_state.get(counterparty_node_id) { - return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)) - } - let peer_state_mutex = per_peer_state.get(counterparty_node_id).unwrap(); - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id.clone()) { hash_map::Entry::Occupied(mut chan_entry) => { let (closing_signed, tx) = try_chan_entry!(self, chan_entry.get_mut().closing_signed(&self.fee_estimator, &msg), chan_entry); if let Some(msg) = closing_signed { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { node_id: counterparty_node_id.clone(), msg, }); @@ -4532,8 +4516,9 @@ where } if let Some(chan) = chan_option { if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { - let mut channel_state = self.channel_state.lock().unwrap(); - channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); + let peer_state = &mut *peer_state_lock; + peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); } @@ -4563,7 +4548,7 @@ where match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { - let create_pending_htlc_status = |chan: &Channel<::Signer>, pending_forward_info: PendingHTLCStatus, error_code: u16| { + let create_pending_htlc_status = |chan: &Channel<::Signer>, pending_forward_info: PendingHTLCStatus, error_code: u16| { // If the update_add is completely bogus, the call will Err and we will close, // but if we've sent a shutdown and they haven't acknowledged it yet, we just // want to reject the new HTLC and fail it backwards instead of forwarding. @@ -4593,7 +4578,6 @@ where } fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> { - let channel_lock = self.channel_state.lock().unwrap(); let (htlc_source, forwarded_htlc_value) = { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); @@ -4609,7 +4593,7 @@ where hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } }; - self.claim_funds_internal(channel_lock, htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, msg.channel_id); + self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, msg.channel_id); Ok(()) } @@ -4652,8 +4636,6 @@ where } fn internal_commitment_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(), MsgHandleErrInternal> { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); if let None = peer_state_mutex_opt { @@ -4679,12 +4661,12 @@ where return Err(e); } - channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK { node_id: counterparty_node_id.clone(), msg: revoke_and_ack, }); if let Some(msg) = commitment_signed { - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + peer_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { node_id: counterparty_node_id.clone(), updates: msgs::CommitmentUpdate { update_add_htlcs: Vec::new(), @@ -4797,8 +4779,6 @@ where fn internal_revoke_and_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), MsgHandleErrInternal> { let mut htlcs_to_fail = Vec::new(); let res = loop { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); if let None = peer_state_mutex_opt { @@ -4831,7 +4811,7 @@ where } else { unreachable!(); } } if let Some(updates) = raa_updates.commitment_update { - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + peer_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { node_id: counterparty_node_id.clone(), updates, }); @@ -4881,8 +4861,6 @@ where } fn internal_announcement_signatures(&self, counterparty_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), MsgHandleErrInternal> { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); if let None = peer_state_mutex_opt { @@ -4896,7 +4874,7 @@ where return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it".to_owned(), action: msgs::ErrorAction::IgnoreError})); } - channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement { + peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement { msg: try_chan_entry!(self, chan.get_mut().announcement_signatures( self.get_our_node_id(), self.genesis_hash.clone(), self.best_block.read().unwrap().height(), msg), chan), // Note that announcement_signatures fails if the channel cannot be announced, @@ -4953,8 +4931,6 @@ where fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> { let htlc_forwards; let need_lnd_workaround = { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); @@ -4974,7 +4950,7 @@ where &*self.best_block.read().unwrap()), chan); let mut channel_update = None; if let Some(msg) = responses.shutdown_msg { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { node_id: counterparty_node_id.clone(), msg, }); @@ -4991,10 +4967,10 @@ where } let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take(); htlc_forwards = self.handle_channel_resumption( - &mut channel_state.pending_msg_events, chan.get_mut(), responses.raa, responses.commitment_update, responses.order, + &mut peer_state.pending_msg_events, chan.get_mut(), responses.raa, responses.commitment_update, responses.order, Vec::new(), None, responses.channel_ready, responses.announcement_sigs); if let Some(upd) = channel_update { - channel_state.pending_msg_events.push(upd); + peer_state.pending_msg_events.push(upd); } need_lnd_workaround }, @@ -5023,7 +4999,7 @@ where MonitorEvent::HTLCEvent(htlc_update) => { if let Some(preimage) = htlc_update.payment_preimage { log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0)); - self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, funding_outpoint.to_channel_id()); + self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, funding_outpoint.to_channel_id()); } else { log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0)); let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() }; @@ -5033,8 +5009,6 @@ where }, MonitorEvent::CommitmentTxConfirmed(funding_outpoint) | MonitorEvent::UpdateFailed(funding_outpoint) => { - let mut channel_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_lock; let counterparty_node_id_opt = match counterparty_node_id { Some(cp_id) => Some(cp_id), None => { @@ -5047,9 +5021,9 @@ where if let Some(counterparty_node_id) = counterparty_node_id_opt { let per_peer_state = self.per_peer_state.read().unwrap(); if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { - let pending_msg_events = &mut channel_state.pending_msg_events; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; + let pending_msg_events = &mut peer_state.pending_msg_events; if let hash_map::Entry::Occupied(chan_entry) = peer_state.channel_by_id.entry(funding_outpoint.to_channel_id()) { let mut chan = remove_channel!(self, chan_entry); failed_channels.push(chan.force_shutdown(false)); @@ -5104,14 +5078,12 @@ where let mut failed_htlcs = Vec::new(); let mut handle_errors = Vec::new(); { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; - let pending_msg_events = &mut channel_state.pending_msg_events; let per_peer_state = self.per_peer_state.read().unwrap(); for (_cp_id, peer_state_mutex) in per_peer_state.iter() { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; + let pending_msg_events = &mut peer_state.pending_msg_events; peer_state.channel_by_id.retain(|channel_id, chan| { match chan.maybe_free_holding_cell_htlcs(&self.logger) { Ok((commitment_opt, holding_cell_failed_htlcs)) => { @@ -5170,14 +5142,12 @@ where let mut handle_errors: Vec<(PublicKey, Result<(), _>)> = Vec::new(); let mut has_update = false; { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; - let pending_msg_events = &mut channel_state.pending_msg_events; let per_peer_state = self.per_peer_state.read().unwrap(); for (_cp_id, peer_state_mutex) in per_peer_state.iter() { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; + let pending_msg_events = &mut peer_state.pending_msg_events; peer_state.channel_by_id.retain(|channel_id, chan| { match chan.maybe_propose_closing_signed(&self.fee_estimator, &self.logger) { Ok((msg_opt, tx_opt)) => { @@ -5252,7 +5222,7 @@ where return Err(APIError::APIMisuseError { err: format!("min_value_msat of {} greater than total 21 million bitcoin supply", min_value_msat.unwrap()) }); } - let payment_secret = PaymentSecret(self.keys_manager.get_secure_random_bytes()); + let payment_secret = PaymentSecret(self.entropy_source.get_secure_random_bytes()); let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); let mut payment_secrets = self.pending_inbound_payments.lock().unwrap(); @@ -5303,7 +5273,7 @@ where /// [`PaymentClaimable::payment_preimage`]: events::Event::PaymentClaimable::payment_preimage /// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash pub fn create_inbound_payment(&self, min_value_msat: Option, invoice_expiry_delta_secs: u32) -> Result<(PaymentHash, PaymentSecret), ()> { - inbound_payment::create(&self.inbound_payment_key, min_value_msat, invoice_expiry_delta_secs, &self.keys_manager, self.highest_seen_timestamp.load(Ordering::Acquire) as u64) + inbound_payment::create(&self.inbound_payment_key, min_value_msat, invoice_expiry_delta_secs, &self.entropy_source, self.highest_seen_timestamp.load(Ordering::Acquire) as u64) } /// Legacy version of [`create_inbound_payment`]. Use this method if you wish to share @@ -5317,7 +5287,7 @@ where /// [`create_inbound_payment`]: Self::create_inbound_payment #[deprecated] pub fn create_inbound_payment_legacy(&self, min_value_msat: Option, invoice_expiry_delta_secs: u32) -> Result<(PaymentHash, PaymentSecret), APIError> { - let payment_preimage = PaymentPreimage(self.keys_manager.get_secure_random_bytes()); + let payment_preimage = PaymentPreimage(self.entropy_source.get_secure_random_bytes()); let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner()); let payment_secret = self.set_payment_hash_secret_map(payment_hash, Some(payment_preimage), min_value_msat, invoice_expiry_delta_secs)?; Ok((payment_hash, payment_secret)) @@ -5400,7 +5370,7 @@ where let best_block_height = self.best_block.read().unwrap().height(); let short_to_chan_info = self.short_to_chan_info.read().unwrap(); loop { - let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(best_block_height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.keys_manager); + let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(best_block_height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.entropy_source); // Ensure the generated scid doesn't conflict with a real channel. match short_to_chan_info.get(&scid_candidate) { Some(_) => continue, @@ -5430,7 +5400,7 @@ where let best_block_height = self.best_block.read().unwrap().height(); let short_to_chan_info = self.short_to_chan_info.read().unwrap(); loop { - let scid_candidate = fake_scid::Namespace::Intercept.get_fake_scid(best_block_height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.keys_manager); + let scid_candidate = fake_scid::Namespace::Intercept.get_fake_scid(best_block_height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.entropy_source); // Ensure the generated scid doesn't conflict with a real channel. if short_to_chan_info.contains_key(&scid_candidate) { continue } return scid_candidate @@ -5516,15 +5486,30 @@ where } } -impl MessageSendEventsProvider for ChannelManager +impl MessageSendEventsProvider for ChannelManager where - M::Target: chain::Watch<::Signer>, + M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + ES::Target: EntropySource, + NS::Target: NodeSigner, + SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, L::Target: Logger, { + /// Returns `MessageSendEvent`s strictly ordered per-peer, in the order they were generated. + /// The returned array will contain `MessageSendEvent`s for different peers if + /// `MessageSendEvent`s to more than one peer exists, but `MessageSendEvent`s to the same peer + /// is always placed next to each other. + /// + /// Note that that while `MessageSendEvent`s are strictly ordered per-peer, the peer order for + /// the chunks of `MessageSendEvent`s for different peers is random. I.e. if the array contains + /// `MessageSendEvent`s for both `node_a` and `node_b`, the `MessageSendEvent`s for `node_a` + /// will randomly be placed first or last in the returned array. + /// + /// Note that even though `BroadcastChannelAnnouncement` and `BroadcastChannelUpdate` + /// `MessageSendEvent`s are intended to be broadcasted to all peers, they will be pleaced among + /// the `MessageSendEvent`s to the specific peer they were generated under. fn get_and_clear_pending_msg_events(&self) -> Vec { let events = RefCell::new(Vec::new()); PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || { @@ -5544,8 +5529,16 @@ where } let mut pending_events = Vec::new(); - let mut channel_state = self.channel_state.lock().unwrap(); - mem::swap(&mut pending_events, &mut channel_state.pending_msg_events); + let per_peer_state = self.per_peer_state.read().unwrap(); + for (_cp_id, peer_state_mutex) in per_peer_state.iter() { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + if peer_state.pending_msg_events.len() > 0 { + let mut peer_pending_events = Vec::new(); + mem::swap(&mut peer_pending_events, &mut peer_state.pending_msg_events); + pending_events.append(&mut peer_pending_events); + } + } if !pending_events.is_empty() { events.replace(pending_events); @@ -5557,11 +5550,13 @@ where } } -impl EventsProvider for ChannelManager +impl EventsProvider for ChannelManager where - M::Target: chain::Watch<::Signer>, + M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + ES::Target: EntropySource, + NS::Target: NodeSigner, + SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, L::Target: Logger, @@ -5594,11 +5589,13 @@ where } } -impl chain::Listen for ChannelManager +impl chain::Listen for ChannelManager where - M::Target: chain::Watch<::Signer>, + M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + ES::Target: EntropySource, + NS::Target: NodeSigner, + SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, L::Target: Logger, @@ -5632,11 +5629,13 @@ where } } -impl chain::Confirm for ChannelManager +impl chain::Confirm for ChannelManager where - M::Target: chain::Watch<::Signer>, + M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + ES::Target: EntropySource, + NS::Target: NodeSigner, + SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, L::Target: Logger, @@ -5723,11 +5722,13 @@ where } } -impl ChannelManager +impl ChannelManager where - M::Target: chain::Watch<::Signer>, + M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + ES::Target: EntropySource, + NS::Target: NodeSigner, + SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, L::Target: Logger, @@ -5735,7 +5736,7 @@ where /// Calls a function which handles an on-chain event (blocks dis/connected, transactions /// un/confirmed, etc) on each channel, handling any resulting errors or messages generated by /// the function. - fn do_chain_event::Signer>) -> Result<(Option, Vec<(HTLCSource, PaymentHash)>, Option), ClosureReason>> + fn do_chain_event::Signer>) -> Result<(Option, Vec<(HTLCSource, PaymentHash)>, Option), ClosureReason>> (&self, height_opt: Option, f: FN) { // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called // during initialization prior to the chain_monitor being fully configured in some cases. @@ -5744,13 +5745,11 @@ where let mut failed_channels = Vec::new(); let mut timed_out_htlcs = Vec::new(); { - let mut channel_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_lock; - let pending_msg_events = &mut channel_state.pending_msg_events; let per_peer_state = self.per_peer_state.read().unwrap(); for (_cp_id, peer_state_mutex) in per_peer_state.iter() { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; + let pending_msg_events = &mut peer_state.pending_msg_events; peer_state.channel_by_id.retain(|_, channel| { let res = f(channel); if let Ok((channel_ready_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res { @@ -5929,12 +5928,14 @@ where } } -impl - ChannelMessageHandler for ChannelManager +impl + ChannelMessageHandler for ChannelManager where - M::Target: chain::Watch<::Signer>, + M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + ES::Target: EntropySource, + NS::Target: NodeSigner, + SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, L::Target: Logger, @@ -6033,15 +6034,14 @@ where let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); let mut failed_channels = Vec::new(); let mut no_channels_remain = true; - let mut channel_state = self.channel_state.lock().unwrap(); let mut per_peer_state = self.per_peer_state.write().unwrap(); { - let pending_msg_events = &mut channel_state.pending_msg_events; log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates. We believe we {} make future connections to this peer.", log_pubkey!(counterparty_node_id), if no_connection_possible { "cannot" } else { "can" }); if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; + let pending_msg_events = &mut peer_state.pending_msg_events; peer_state.channel_by_id.retain(|_, chan| { chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger); if chan.is_shutdown() { @@ -6053,32 +6053,31 @@ where } true }); + pending_msg_events.retain(|msg| { + match msg { + &events::MessageSendEvent::SendAcceptChannel { .. } => false, + &events::MessageSendEvent::SendOpenChannel { .. } => false, + &events::MessageSendEvent::SendFundingCreated { .. } => false, + &events::MessageSendEvent::SendFundingSigned { .. } => false, + &events::MessageSendEvent::SendChannelReady { .. } => false, + &events::MessageSendEvent::SendAnnouncementSignatures { .. } => false, + &events::MessageSendEvent::UpdateHTLCs { .. } => false, + &events::MessageSendEvent::SendRevokeAndACK { .. } => false, + &events::MessageSendEvent::SendClosingSigned { .. } => false, + &events::MessageSendEvent::SendShutdown { .. } => false, + &events::MessageSendEvent::SendChannelReestablish { .. } => false, + &events::MessageSendEvent::SendChannelAnnouncement { .. } => false, + &events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true, + &events::MessageSendEvent::BroadcastChannelUpdate { .. } => true, + &events::MessageSendEvent::SendChannelUpdate { .. } => false, + &events::MessageSendEvent::HandleError { .. } => false, + &events::MessageSendEvent::SendChannelRangeQuery { .. } => false, + &events::MessageSendEvent::SendShortIdsQuery { .. } => false, + &events::MessageSendEvent::SendReplyChannelRange { .. } => false, + &events::MessageSendEvent::SendGossipTimestampFilter { .. } => false, + } + }); } - pending_msg_events.retain(|msg| { - match msg { - &events::MessageSendEvent::SendAcceptChannel { ref node_id, .. } => node_id != counterparty_node_id, - &events::MessageSendEvent::SendOpenChannel { ref node_id, .. } => node_id != counterparty_node_id, - &events::MessageSendEvent::SendFundingCreated { ref node_id, .. } => node_id != counterparty_node_id, - &events::MessageSendEvent::SendFundingSigned { ref node_id, .. } => node_id != counterparty_node_id, - &events::MessageSendEvent::SendChannelReady { ref node_id, .. } => node_id != counterparty_node_id, - &events::MessageSendEvent::SendAnnouncementSignatures { ref node_id, .. } => node_id != counterparty_node_id, - &events::MessageSendEvent::UpdateHTLCs { ref node_id, .. } => node_id != counterparty_node_id, - &events::MessageSendEvent::SendRevokeAndACK { ref node_id, .. } => node_id != counterparty_node_id, - &events::MessageSendEvent::SendClosingSigned { ref node_id, .. } => node_id != counterparty_node_id, - &events::MessageSendEvent::SendShutdown { ref node_id, .. } => node_id != counterparty_node_id, - &events::MessageSendEvent::SendChannelReestablish { ref node_id, .. } => node_id != counterparty_node_id, - &events::MessageSendEvent::SendChannelAnnouncement { ref node_id, .. } => node_id != counterparty_node_id, - &events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true, - &events::MessageSendEvent::BroadcastChannelUpdate { .. } => true, - &events::MessageSendEvent::SendChannelUpdate { ref node_id, .. } => node_id != counterparty_node_id, - &events::MessageSendEvent::HandleError { ref node_id, .. } => node_id != counterparty_node_id, - &events::MessageSendEvent::SendChannelRangeQuery { .. } => false, - &events::MessageSendEvent::SendShortIdsQuery { .. } => false, - &events::MessageSendEvent::SendReplyChannelRange { .. } => false, - &events::MessageSendEvent::SendGossipTimestampFilter { .. } => false, - } - }); - mem::drop(channel_state); } if no_channels_remain { per_peer_state.remove(counterparty_node_id); @@ -6107,6 +6106,7 @@ where e.insert(Mutex::new(PeerState { channel_by_id: HashMap::new(), latest_features: init_msg.features.clone(), + pending_msg_events: Vec::new(), })); }, hash_map::Entry::Occupied(e) => { @@ -6115,14 +6115,12 @@ where } } - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; - let pending_msg_events = &mut channel_state.pending_msg_events; let per_peer_state = self.per_peer_state.read().unwrap(); for (_cp_id, peer_state_mutex) in per_peer_state.iter() { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; + let pending_msg_events = &mut peer_state.pending_msg_events; peer_state.channel_by_id.retain(|_, chan| { let retain = if chan.get_counterparty_node_id() == *counterparty_node_id { if !chan.have_received_message() { @@ -6175,7 +6173,6 @@ where } else { { // First check if we can advance the channel type and try again. - let mut channel_state = self.channel_state.lock().unwrap(); let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); if let None = peer_state_mutex_opt { return; } @@ -6183,7 +6180,7 @@ where let peer_state = &mut *peer_state_lock; if let Some(chan) = peer_state.channel_by_id.get_mut(&msg.channel_id) { if let Ok(msg) = chan.maybe_handle_error_without_close(self.genesis_hash) { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { node_id: *counterparty_node_id, msg, }); @@ -6637,11 +6634,13 @@ impl_writeable_tlv_based!(PendingInboundPayment, { (8, min_value_msat, required), }); -impl Writeable for ChannelManager +impl Writeable for ChannelManager where - M::Target: chain::Watch<::Signer>, + M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + ES::Target: EntropySource, + NS::Target: NodeSigner, + SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, L::Target: Logger, @@ -6846,19 +6845,27 @@ where /// which you've already broadcasted the transaction. /// /// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor -pub struct ChannelManagerReadArgs<'a, M: Deref, T: Deref, K: Deref, F: Deref, R: Deref, L: Deref> +pub struct ChannelManagerReadArgs<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> where - M::Target: chain::Watch<::Signer>, + M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + ES::Target: EntropySource, + NS::Target: NodeSigner, + SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, L::Target: Logger, { + /// A cryptographically secure source of entropy. + pub entropy_source: ES, + + /// A signer that is able to perform node-scoped cryptographic operations. + pub node_signer: NS, + /// The keys provider which will give us relevant keys. Some keys will be loaded during /// deserialization and KeysInterface::read_chan_signer will be used to read per-Channel /// signing data. - pub keys_manager: K, + pub signer_provider: SP, /// The fee_estimator for use in the ChannelManager in the future. /// @@ -6899,15 +6906,17 @@ where /// this struct. /// /// (C-not exported) because we have no HashMap bindings - pub channel_monitors: HashMap::Signer>>, + pub channel_monitors: HashMap::Signer>>, } -impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, R: Deref, L: Deref> - ChannelManagerReadArgs<'a, M, T, K, F, R, L> +impl<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> + ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, L> where - M::Target: chain::Watch<::Signer>, + M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + ES::Target: EntropySource, + NS::Target: NodeSigner, + SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, L::Target: Logger, @@ -6915,10 +6924,10 @@ where /// Simple utility function to create a ChannelManagerReadArgs which creates the monitor /// HashMap for you. This is primarily useful for C bindings where it is not practical to /// populate a HashMap directly from C. - pub fn new(keys_manager: K, fee_estimator: F, chain_monitor: M, tx_broadcaster: T, router: R, logger: L, default_config: UserConfig, - mut channel_monitors: Vec<&'a mut ChannelMonitor<::Signer>>) -> Self { + pub fn new(entropy_source: ES, node_signer: NS, signer_provider: SP, fee_estimator: F, chain_monitor: M, tx_broadcaster: T, router: R, logger: L, default_config: UserConfig, + mut channel_monitors: Vec<&'a mut ChannelMonitor<::Signer>>) -> Self { Self { - keys_manager, fee_estimator, chain_monitor, tx_broadcaster, router, logger, default_config, + entropy_source, node_signer, signer_provider, fee_estimator, chain_monitor, tx_broadcaster, router, logger, default_config, channel_monitors: channel_monitors.drain(..).map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect() } } @@ -6926,33 +6935,37 @@ where // Implement ReadableArgs for an Arc'd ChannelManager to make it a bit easier to work with the // SipmleArcChannelManager type: -impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, R: Deref, L: Deref> - ReadableArgs> for (BlockHash, Arc>) +impl<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> + ReadableArgs> for (BlockHash, Arc>) where - M::Target: chain::Watch<::Signer>, + M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + ES::Target: EntropySource, + NS::Target: NodeSigner, + SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, L::Target: Logger, { - fn read(reader: &mut Reader, args: ChannelManagerReadArgs<'a, M, T, K, F, R, L>) -> Result { - let (blockhash, chan_manager) = <(BlockHash, ChannelManager)>::read(reader, args)?; + fn read(reader: &mut Reader, args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, L>) -> Result { + let (blockhash, chan_manager) = <(BlockHash, ChannelManager)>::read(reader, args)?; Ok((blockhash, Arc::new(chan_manager))) } } -impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, R: Deref, L: Deref> - ReadableArgs> for (BlockHash, ChannelManager) +impl<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> + ReadableArgs> for (BlockHash, ChannelManager) where - M::Target: chain::Watch<::Signer>, + M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + ES::Target: EntropySource, + NS::Target: NodeSigner, + SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, L::Target: Logger, { - fn read(reader: &mut Reader, mut args: ChannelManagerReadArgs<'a, M, T, K, F, R, L>) -> Result { + fn read(reader: &mut Reader, mut args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, L>) -> Result { let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION); let genesis_hash: BlockHash = Readable::read(reader)?; @@ -6963,12 +6976,12 @@ where let channel_count: u64 = Readable::read(reader)?; let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128)); - let mut peer_channels: HashMap::Signer>>> = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); + let mut peer_channels: HashMap::Signer>>> = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); let mut id_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); let mut channel_closures = Vec::new(); for _ in 0..channel_count { - let mut channel: Channel<::Signer> = Channel::read(reader, (&args.keys_manager, best_block_height))?; + let mut channel: Channel<::Signer> = Channel::read(reader, (&args.entropy_source, &args.signer_provider, best_block_height))?; let funding_txo = channel.get_funding_txo().ok_or(DecodeError::InvalidValue)?; funding_txo_set.insert(funding_txo.clone()); if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) { @@ -7094,12 +7107,13 @@ where } let peer_count: u64 = Readable::read(reader)?; - let mut per_peer_state = HashMap::with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex::Signer>>)>())); + let mut per_peer_state = HashMap::with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex::Signer>>)>())); for _ in 0..peer_count { let peer_pubkey = Readable::read(reader)?; let peer_state = PeerState { channel_by_id: peer_channels.remove(&peer_pubkey).unwrap_or(HashMap::new()), latest_features: Readable::read(reader)?, + pending_msg_events: Vec::new(), }; per_peer_state.insert(peer_pubkey, Mutex::new(peer_state)); } @@ -7166,11 +7180,11 @@ where (11, probing_cookie_secret, option), }); if fake_scid_rand_bytes.is_none() { - fake_scid_rand_bytes = Some(args.keys_manager.get_secure_random_bytes()); + fake_scid_rand_bytes = Some(args.entropy_source.get_secure_random_bytes()); } if probing_cookie_secret.is_none() { - probing_cookie_secret = Some(args.keys_manager.get_secure_random_bytes()); + probing_cookie_secret = Some(args.entropy_source.get_secure_random_bytes()); } if pending_outbound_payments.is_none() && pending_outbound_payments_no_retry.is_none() { @@ -7276,7 +7290,7 @@ where }); } - let inbound_pmt_key_material = args.keys_manager.get_inbound_payment_key_material(); + let inbound_pmt_key_material = args.node_signer.get_inbound_payment_key_material(); let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material); let mut claimable_htlcs = HashMap::with_capacity(claimable_htlcs_list.len()); @@ -7320,13 +7334,13 @@ where } let mut secp_ctx = Secp256k1::new(); - secp_ctx.seeded_randomize(&args.keys_manager.get_secure_random_bytes()); + secp_ctx.seeded_randomize(&args.entropy_source.get_secure_random_bytes()); if !channel_closures.is_empty() { pending_events_read.append(&mut channel_closures); } - let our_network_key = match args.keys_manager.get_node_secret(Recipient::Node) { + let our_network_key = match args.node_signer.get_node_secret(Recipient::Node) { Ok(key) => key, Err(()) => return Err(DecodeError::InvalidValue) }; @@ -7347,7 +7361,7 @@ where let mut outbound_scid_alias; loop { outbound_scid_alias = fake_scid::Namespace::OutboundAlias - .get_fake_scid(best_block_height, &genesis_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.keys_manager); + .get_fake_scid(best_block_height, &genesis_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.entropy_source); if outbound_scid_aliases.insert(outbound_scid_alias) { break; } } chan.set_outbound_scid_alias(outbound_scid_alias); @@ -7378,7 +7392,7 @@ where let mut receiver_node_id = Some(our_network_pubkey); let phantom_shared_secret = claimable_htlcs[0].prev_hop.phantom_shared_secret; if phantom_shared_secret.is_some() { - let phantom_pubkey = args.keys_manager.get_node_id(Recipient::PhantomNode) + let phantom_pubkey = args.node_signer.get_node_id(Recipient::PhantomNode) .expect("Failed to get node_id for phantom node recipient"); receiver_node_id = Some(phantom_pubkey) } @@ -7432,9 +7446,6 @@ where best_block: RwLock::new(BestBlock::new(best_block_hash, best_block_height)), - channel_state: Mutex::new(ChannelHolder { - pending_msg_events: Vec::new(), - }), inbound_payment_key: expanded_inbound_key, pending_inbound_payments: Mutex::new(pending_inbound_payments), pending_outbound_payments: OutboundPayments { pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()) }, @@ -7462,7 +7473,10 @@ where total_consistency_lock: RwLock::new(()), persistence_notifier: Notifier::new(), - keys_manager: args.keys_manager, + entropy_source: args.entropy_source, + node_signer: args.node_signer, + signer_provider: args.signer_provider, + logger: args.logger, default_configuration: args.default_config, }; @@ -7503,7 +7517,7 @@ mod tests { use crate::util::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason}; use crate::util::test_utils; use crate::util::config::ChannelConfig; - use crate::chain::keysinterface::{EntropySource, KeysInterface}; + use crate::chain::keysinterface::EntropySource; #[test] fn test_notify_limits() { @@ -8064,23 +8078,6 @@ mod tests { check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure); } - fn check_unkown_peer_msg_event<'a, 'b: 'a, 'c: 'b>(node: &Node<'a, 'b, 'c>, closing_node_id: PublicKey, closed_channel_id: [u8; 32]){ - let close_msg_ev = node.node.get_and_clear_pending_msg_events(); - let expected_error_str = format!("Can't find a peer matching the passed counterparty node_id {}", closing_node_id); - match close_msg_ev[0] { - MessageSendEvent::HandleError { - ref node_id, action: msgs::ErrorAction::SendErrorMessage { - msg: msgs::ErrorMessage { ref channel_id, ref data } - } - } => { - assert_eq!(*node_id, closing_node_id); - assert_eq!(*data, expected_error_str); - assert_eq!(*channel_id, closed_channel_id); - } - _ => panic!("Unexpected event"), - } - } - fn check_not_connected_to_peer_error(res_err: Result, expected_public_key: PublicKey) { let expected_message = format!("Not connected to node: {}", expected_public_key); check_api_misuse_error_message(expected_message, res_err) @@ -8225,23 +8222,18 @@ mod tests { check_not_connected_to_peer_error(nodes[0].node.create_channel(unkown_public_key, 1_000_000, 500_000_000, 42, None), unkown_public_key); nodes[1].node.handle_open_channel(&unkown_public_key, channelmanager::provided_init_features(), &open_channel_msg); - check_unkown_peer_msg_event(&nodes[1], unkown_public_key, open_channel_msg.temporary_channel_id); nodes[0].node.handle_accept_channel(&unkown_public_key, channelmanager::provided_init_features(), &accept_channel_msg); - check_unkown_peer_msg_event(&nodes[0], unkown_public_key, open_channel_msg.temporary_channel_id); check_unkown_peer_error(nodes[0].node.accept_inbound_channel(&open_channel_msg.temporary_channel_id, &unkown_public_key, 42), unkown_public_key); + nodes[1].node.handle_funding_created(&unkown_public_key, &funding_created_msg); - check_unkown_peer_msg_event(&nodes[1], unkown_public_key, open_channel_msg.temporary_channel_id); nodes[0].node.handle_funding_signed(&unkown_public_key, &funding_signed_msg); - check_unkown_peer_msg_event(&nodes[0], unkown_public_key, channel_id); nodes[0].node.handle_channel_ready(&unkown_public_key, &channel_ready_msg); - check_unkown_peer_msg_event(&nodes[0], unkown_public_key, channel_id); nodes[1].node.handle_announcement_signatures(&unkown_public_key, &announcement_signatures_msg); - check_unkown_peer_msg_event(&nodes[1], unkown_public_key, channel_id); check_unkown_peer_error(nodes[0].node.close_channel(&channel_id, &unkown_public_key), unkown_public_key); @@ -8254,34 +8246,24 @@ mod tests { check_unkown_peer_error(nodes[0].node.update_channel_config(&unkown_public_key, &[channel_id], &ChannelConfig::default()), unkown_public_key); nodes[0].node.handle_shutdown(&unkown_public_key, &channelmanager::provided_init_features(), &shutdown_msg); - check_unkown_peer_msg_event(&nodes[0], unkown_public_key, channel_id); nodes[1].node.handle_closing_signed(&unkown_public_key, &closing_signed_msg); - check_unkown_peer_msg_event(&nodes[1], unkown_public_key, channel_id); nodes[0].node.handle_channel_reestablish(&unkown_public_key, &channel_reestablish_msg); - check_unkown_peer_msg_event(&nodes[0], unkown_public_key, channel_id); nodes[1].node.handle_update_add_htlc(&unkown_public_key, &update_add_htlc_msg); - check_unkown_peer_msg_event(&nodes[1], unkown_public_key, channel_id); nodes[1].node.handle_commitment_signed(&unkown_public_key, &commitment_signed_msg); - check_unkown_peer_msg_event(&nodes[1], unkown_public_key, channel_id); nodes[1].node.handle_update_fail_malformed_htlc(&unkown_public_key, &malformed_update_msg); - check_unkown_peer_msg_event(&nodes[1], unkown_public_key, channel_id); nodes[1].node.handle_update_fail_htlc(&unkown_public_key, &fail_update_msg); - check_unkown_peer_msg_event(&nodes[1], unkown_public_key, channel_id); nodes[1].node.handle_update_fulfill_htlc(&unkown_public_key, &fulfill_update_msg); - check_unkown_peer_msg_event(&nodes[1], unkown_public_key, channel_id); nodes[1].node.handle_revoke_and_ack(&unkown_public_key, &revoke_and_ack_msg); - check_unkown_peer_msg_event(&nodes[1], unkown_public_key, channel_id); nodes[1].node.handle_update_fee(&unkown_public_key, &update_fee_msg); - check_unkown_peer_msg_event(&nodes[1], unkown_public_key, channel_id); } } @@ -8289,7 +8271,7 @@ mod tests { pub mod bench { use crate::chain::Listen; use crate::chain::chainmonitor::{ChainMonitor, Persist}; - use crate::chain::keysinterface::{EntropySource, KeysManager, KeysInterface, InMemorySigner}; + use crate::chain::keysinterface::{EntropySource, KeysManager, InMemorySigner}; use crate::ln::channelmanager::{self, BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage, PaymentId}; use crate::ln::functional_test_utils::*; use crate::ln::msgs::{ChannelMessageHandler, Init}; @@ -8312,7 +8294,7 @@ pub mod bench { &'a ChainMonitor, - &'a test_utils::TestBroadcaster, &'a KeysManager, + &'a test_utils::TestBroadcaster, &'a KeysManager, &'a KeysManager, &'a KeysManager, &'a test_utils::TestFeeEstimator, &'a test_utils::TestRouter<'a>, &'a test_utils::TestLogger>, } @@ -8341,7 +8323,7 @@ pub mod bench { let chain_monitor_a = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_a); let seed_a = [1u8; 32]; let keys_manager_a = KeysManager::new(&seed_a, 42, 42); - let node_a = ChannelManager::new(&fee_estimator, &chain_monitor_a, &tx_broadcaster, &router, &logger_a, &keys_manager_a, config.clone(), ChainParameters { + let node_a = ChannelManager::new(&fee_estimator, &chain_monitor_a, &tx_broadcaster, &router, &logger_a, &keys_manager_a, &keys_manager_a, &keys_manager_a, config.clone(), ChainParameters { network, best_block: BestBlock::from_genesis(network), }); @@ -8351,7 +8333,7 @@ pub mod bench { let chain_monitor_b = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_b); let seed_b = [2u8; 32]; let keys_manager_b = KeysManager::new(&seed_b, 42, 42); - let node_b = ChannelManager::new(&fee_estimator, &chain_monitor_b, &tx_broadcaster, &router, &logger_b, &keys_manager_b, config.clone(), ChainParameters { + let node_b = ChannelManager::new(&fee_estimator, &chain_monitor_b, &tx_broadcaster, &router, &logger_b, &keys_manager_b, &keys_manager_b, &keys_manager_b, config.clone(), ChainParameters { network, best_block: BestBlock::from_genesis(network), });