[peer_handler] Take the peers lock before getting messages to send
[rust-lightning] / lightning / src / ln / peer_handler.rs
index db8bb4290219d4f7615117d22839e28110ab90c6..d4eb9eae5e7910f7a0b4de52ab6591794210f8ea 100644 (file)
@@ -142,6 +142,8 @@ impl ChannelMessageHandler for ErroringMessageHandler {
        fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) {
                ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id);
        }
+       // msgs::ChannelUpdate does not contain the channel_id field, so we just drop them.
+       fn handle_channel_update(&self, _their_node_id: &PublicKey, _msg: &msgs::ChannelUpdate) {}
        fn peer_disconnected(&self, _their_node_id: &PublicKey, _no_connection_possible: bool) {}
        fn peer_connected(&self, _their_node_id: &PublicKey, _msg: &msgs::Init) {}
        fn handle_error(&self, _their_node_id: &PublicKey, _msg: &msgs::ErrorMessage) {}
@@ -970,6 +972,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref> PeerManager<D
                                }
                        },
                        wire::Message::ChannelUpdate(msg) => {
+                               self.message_handler.chan_handler.handle_channel_update(&peer.their_node_id.unwrap(), &msg);
                                let should_forward = match self.message_handler.route_handler.handle_channel_update(&msg) {
                                        Ok(v) => v,
                                        Err(e) => { return Err(e.into()); },
@@ -1017,9 +1020,9 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref> PeerManager<D
                        // buffer by doing things like announcing channels on another node. We should be willing to
                        // drop optional-ish messages when send buffers get full!
 
+                       let mut peers_lock = self.peers.lock().unwrap();
                        let mut events_generated = self.message_handler.chan_handler.get_and_clear_pending_msg_events();
                        events_generated.append(&mut self.message_handler.route_handler.get_and_clear_pending_msg_events());
-                       let mut peers_lock = self.peers.lock().unwrap();
                        let peers = &mut *peers_lock;
                        for event in events_generated.drain(..) {
                                macro_rules! get_peer_for_forwarding {
@@ -1281,6 +1284,17 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref> PeerManager<D
                                                peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg)));
                                                self.do_attempt_write_data(&mut descriptor, peer);
                                        }
+                                       MessageSendEvent::SendReplyChannelRange { ref node_id, ref msg } => {
+                                               log_trace!(self.logger, "Handling SendReplyChannelRange event in peer_handler for node {} with num_scids={} first_blocknum={} number_of_blocks={}, sync_complete={}",
+                                                       log_pubkey!(node_id),
+                                                       msg.short_channel_ids.len(),
+                                                       msg.first_blocknum,
+                                                       msg.number_of_blocks,
+                                                       msg.sync_complete);
+                                               let (mut descriptor, peer) = get_peer_for_forwarding!(node_id, {});
+                                               peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg)));
+                                               self.do_attempt_write_data(&mut descriptor, peer);
+                                       }
                                }
                        }
 
@@ -1345,7 +1359,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref> PeerManager<D
        /// It will send pings to each peer and disconnect those which did not respond to the last round of pings.
 
        /// Will most likely call send_data on all of the registered descriptors, thus, be very careful with reentrancy issues!
-       pub fn timer_tick_occured(&self) {
+       pub fn timer_tick_occurred(&self) {
                let mut peers_lock = self.peers.lock().unwrap();
                {
                        let peers = &mut *peers_lock;
@@ -1518,11 +1532,11 @@ mod tests {
                assert_eq!(peers[0].peers.lock().unwrap().peers.len(), 1);
 
                // peers[0] awaiting_pong is set to true, but the Peer is still connected
-               peers[0].timer_tick_occured();
+               peers[0].timer_tick_occurred();
                assert_eq!(peers[0].peers.lock().unwrap().peers.len(), 1);
 
-               // Since timer_tick_occured() is called again when awaiting_pong is true, all Peers are disconnected
-               peers[0].timer_tick_occured();
+               // Since timer_tick_occurred() is called again when awaiting_pong is true, all Peers are disconnected
+               peers[0].timer_tick_occurred();
                assert_eq!(peers[0].peers.lock().unwrap().peers.len(), 0);
        }