From a19d71d0b2768f2aee747b07be509b6591378e7a Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sun, 29 Dec 2019 14:22:43 -0500 Subject: [PATCH] Keep track of the Init Features for every connected/channel'd peer Since we want to keep track of the Init-context features for every peer we have channels with, we have to keep them for as long as the peer is connected (since we may open a channel with them at any point). We go ahead and take this opportunity to create a new per-peer-state struct which has two levels of mutexes which is appropriate for moving channel storage to. Since we can't process messages from a given peer in parallel, the inner lock is a regular mutex, but the outer lock is RW so that we can process for different peers at the same time with an outer read lock. --- lightning/src/ln/channelmanager.rs | 60 +++++++++++++++++++++++++++++- 1 file changed, 59 insertions(+), 1 deletion(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 5eb2bb4a..72193baf 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -275,6 +275,12 @@ pub(super) struct ChannelHolder { pub(super) pending_msg_events: Vec, } +/// State we hold per-peer. In the future we should put channels in here, but for now we only hold +/// the latest Init features we heard from the peer. +struct PeerState { + latest_features: InitFeatures, +} + #[cfg(not(any(target_pointer_width = "32", target_pointer_width = "64")))] const ERR: () = "You need at least 32 bit pointers (well, usize, but we'll assume they're the same) for ChannelManager::latest_block_height"; @@ -328,6 +334,14 @@ pub struct ChannelManager { channel_state: Mutex>, our_network_key: SecretKey, + /// The bulk of our storage will eventually be here (channels and message queues and the like). + /// If we are connected to a peer we always at least have an entry here, even if no channels + /// are currently open with that peer. + /// Because adding or removing an entry is rare, we usually take an outer read lock and then + /// operate on the inner value freely. Sadly, this prevents parallel operation when opening a + /// new channel. + per_peer_state: RwLock>>, + pending_events: Mutex>, /// Used when we have to take a BIG lock to make sure everything is self-consistent. /// Essentially just when we're serializing ourselves out. @@ -610,6 +624,8 @@ impl ChannelManager { }), our_network_key: keys_manager.get_node_secret(), + per_peer_state: RwLock::new(HashMap::new()), + pending_events: Mutex::new(Vec::new()), total_consistency_lock: RwLock::new(()), @@ -2780,6 +2796,7 @@ impl ChannelMessageHandler for ChannelManager ChannelMessageHandler for ChannelManager ChannelMessageHandler for ChannelManager ChannelMessageHandler for ChannelManager { + e.insert(Mutex::new(PeerState { + latest_features: init_msg.features.clone(), + })); + }, + hash_map::Entry::Occupied(e) => { + e.get().lock().unwrap().latest_features = init_msg.features.clone(); + }, + } + } + let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; let pending_msg_events = &mut channel_state.pending_msg_events; @@ -3123,6 +3161,14 @@ impl Writeable for ChannelManager> ReadableArg claimable_htlcs.insert(payment_hash, previous_hops); } + let peer_count: u64 = Readable::read(reader)?; + let mut per_peer_state = HashMap::with_capacity(cmp::min(peer_count as usize, 128)); + for _ in 0..peer_count { + let peer_pubkey = Readable::read(reader)?; + let peer_state = PeerState { + latest_features: Readable::read(reader)?, + }; + per_peer_state.insert(peer_pubkey, Mutex::new(peer_state)); + } + let channel_manager = ChannelManager { genesis_hash, fee_estimator: args.fee_estimator, @@ -3275,6 +3331,8 @@ impl<'a, R : ::std::io::Read, ChanSigner: ChannelKeys + Readable> ReadableArg }), our_network_key: args.keys_manager.get_node_secret(), + per_peer_state: RwLock::new(per_peer_state), + pending_events: Mutex::new(Vec::new()), total_consistency_lock: RwLock::new(()), keys_manager: args.keys_manager, -- 2.30.2