fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) {
ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id);
}
+ // msgs::ChannelUpdate does not contain the channel_id field, so we just drop them.
+ fn handle_channel_update(&self, _their_node_id: &PublicKey, _msg: &msgs::ChannelUpdate) {}
fn peer_disconnected(&self, _their_node_id: &PublicKey, _no_connection_possible: bool) {}
fn peer_connected(&self, _their_node_id: &PublicKey, _msg: &msgs::Init) {}
fn handle_error(&self, _their_node_id: &PublicKey, _msg: &msgs::ErrorMessage) {}
}
},
wire::Message::ChannelUpdate(msg) => {
+ self.message_handler.chan_handler.handle_channel_update(&peer.their_node_id.unwrap(), &msg);
let should_forward = match self.message_handler.route_handler.handle_channel_update(&msg) {
Ok(v) => v,
Err(e) => { return Err(e.into()); },
// buffer by doing things like announcing channels on another node. We should be willing to
// drop optional-ish messages when send buffers get full!
+ let mut peers_lock = self.peers.lock().unwrap();
let mut events_generated = self.message_handler.chan_handler.get_and_clear_pending_msg_events();
events_generated.append(&mut self.message_handler.route_handler.get_and_clear_pending_msg_events());
- let mut peers_lock = self.peers.lock().unwrap();
let peers = &mut *peers_lock;
for event in events_generated.drain(..) {
macro_rules! get_peer_for_forwarding {