X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=src%2Fln%2Fpeer_handler.rs;h=765334167364a44cc06f6a33a69e25d3e5a3e1c0;hb=434211434540ab348d5a6937b5d9157eeb6f11bc;hp=797c55191f680a336a71fd58409d1e9c832f93d9;hpb=c43e535bc03404bad16e3d30e5b2fc7215e6ca15;p=rust-lightning diff --git a/src/ln/peer_handler.rs b/src/ln/peer_handler.rs index 797c55191..765334167 100644 --- a/src/ln/peer_handler.rs +++ b/src/ln/peer_handler.rs @@ -1,4 +1,5 @@ //! Top level peer message handling and socket handling logic lives here. +//! //! Instead of actually servicing sockets ourselves we require that you implement the //! SocketDescriptor interface and use that to receive actions which you should perform on the //! socket, and call into PeerManager with bytes read from the socket. The PeerManager will then @@ -11,13 +12,13 @@ use ln::msgs; use util::ser::{Writeable, Writer, Readable}; use ln::peer_channel_encryptor::{PeerChannelEncryptor,NextNoiseStep}; use util::byte_utils; -use util::events::{EventsProvider,Event}; +use util::events::{MessageSendEvent}; use util::logger::Logger; use std::collections::{HashMap,hash_map,LinkedList}; use std::sync::{Arc, Mutex}; use std::sync::atomic::{AtomicUsize, Ordering}; -use std::{cmp,error,mem,hash,fmt}; +use std::{cmp,error,hash,fmt}; /// Provides references to trait impls which handle different types of messages. pub struct MessageHandler { @@ -32,7 +33,9 @@ pub struct MessageHandler { /// Provides an object which can be used to send data to and which uniquely identifies a connection /// to a remote host. You will need to be able to generate multiple of these which meet Eq and /// implement Hash to meet the PeerManager API. +/// /// For efficiency, Clone should be relatively cheap for this type. +/// /// You probably want to just extend an int and put a file descriptor in a struct and implement /// send_data. Note that if you are using a higher-level net library that may close() itself, be /// careful to ensure you don't have races whereby you might register a new connection with an fd @@ -42,9 +45,11 @@ pub trait SocketDescriptor : cmp::Eq + hash::Hash + Clone { /// Returns the amount of data which was sent, possibly 0 if the socket has since disconnected. /// Note that in the disconnected case, a disconnect_event must still fire and further write /// attempts may occur until that time. + /// /// If the returned size is smaller than data.len() - write_offset, a write_available event must /// trigger the next time more data can be written. Additionally, until the a send_data event /// completes fully, no further read_events should trigger on the same peer! + /// /// If a read_event on this descriptor had previously returned true (indicating that read /// events should be paused to prevent DoS in the send buffer), resume_read may be set /// indicating that read events on this descriptor should resume. A resume_read of false does @@ -122,7 +127,6 @@ impl PeerHolder { pub struct PeerManager { message_handler: MessageHandler, peers: Mutex>, - pending_events: Mutex>, our_node_secret: SecretKey, initial_syncs_sent: AtomicUsize, logger: Arc, @@ -159,7 +163,6 @@ impl PeerManager { PeerManager { message_handler: message_handler, peers: Mutex::new(PeerHolder { peers: HashMap::new(), node_id_to_descriptor: HashMap::new() }), - pending_events: Mutex::new(Vec::new()), our_node_secret: our_node_secret, initial_syncs_sent: AtomicUsize::new(0), logger, @@ -167,6 +170,7 @@ impl PeerManager { } /// Get the list of node ids for peers which have completed the initial handshake. + /// /// For outbound connections, this will be the same as the their_node_id parameter passed in to /// new_outbound_connection, however entries will only appear once the initial handshake has /// completed and we are sure the remote peer has the private key for the given node_id. @@ -183,7 +187,9 @@ impl PeerManager { /// Indicates a new outbound connection has been established to a node with the given node_id. /// Note that if an Err is returned here you MUST NOT call disconnect_event for the new /// descriptor but must disconnect the connection immediately. + /// /// Returns some bytes to send to the remote node. + /// /// Panics if descriptor is duplicative with some other descriptor which has not yet has a /// disconnect_event. pub fn new_outbound_connection(&self, their_node_id: PublicKey, descriptor: Descriptor) -> Result, PeerHandleError> { @@ -213,10 +219,12 @@ impl PeerManager { } /// Indicates a new inbound connection has been established. + /// /// May refuse the connection by returning an Err, but will never write bytes to the remote end /// (outbound connector always speaks first). Note that if an Err is returned here you MUST NOT /// call disconnect_event for the new descriptor but must disconnect the connection /// immediately. + /// /// Panics if descriptor is duplicative with some other descriptor which has not yet has a /// disconnect_event. pub fn new_inbound_connection(&self, descriptor: Descriptor) -> Result<(), PeerHandleError> { @@ -266,12 +274,14 @@ impl PeerManager { } /// Indicates that there is room to write data to the given socket descriptor. + /// /// May return an Err to indicate that the connection should be closed. + /// /// Will most likely call send_data on the descriptor passed in (or the descriptor handed into - /// new_*_connection) before returning. Thus, be very careful with reentrancy issues! The + /// new_*\_connection) before returning. Thus, be very careful with reentrancy issues! The /// invariants around calling write_event in case a write did not fully complete must still /// hold - be ready to call write_event again if a write call generated here isn't sufficient! - /// Panics if the descriptor was not previously registered in a new_*_connection event. + /// Panics if the descriptor was not previously registered in a new_\*_connection event. pub fn write_event(&self, descriptor: &mut Descriptor) -> Result<(), PeerHandleError> { let mut peers = self.peers.lock().unwrap(); match peers.peers.get_mut(descriptor) { @@ -285,16 +295,20 @@ impl PeerManager { } /// Indicates that data was read from the given socket descriptor. + /// /// May return an Err to indicate that the connection should be closed. + /// /// Will very likely call send_data on the descriptor passed in (or a descriptor handed into /// new_*_connection) before returning. Thus, be very careful with reentrancy issues! The /// invariants around calling write_event in case a write did not fully complete must still /// hold. Note that this function will often call send_data on many peers before returning, not /// just this peer! + /// /// If Ok(true) is returned, further read_events should not be triggered until a write_event on /// this file descriptor has resume_read set (preventing DoS issues in the send buffer). Note /// that this must be true even if a send_data call with resume_read=true was made during the /// course of this function! + /// /// Panics if the descriptor was not previously registered in a new_*_connection event. pub fn read_event(&self, peer_descriptor: &mut Descriptor, data: Vec) -> Result { match self.do_read_event(peer_descriptor, data) { @@ -545,8 +559,7 @@ impl PeerManager { // Channel control: 32 => { let msg = try_potential_decodeerror!(msgs::OpenChannel::read(&mut reader)); - let resp = try_potential_handleerror!(self.message_handler.chan_handler.handle_open_channel(&peer.their_node_id.unwrap(), &msg)); - encode_and_send_msg!(resp, 33); + try_potential_handleerror!(self.message_handler.chan_handler.handle_open_channel(&peer.their_node_id.unwrap(), &msg)); }, 33 => { let msg = try_potential_decodeerror!(msgs::AcceptChannel::read(&mut reader)); @@ -555,8 +568,7 @@ impl PeerManager { 34 => { let msg = try_potential_decodeerror!(msgs::FundingCreated::read(&mut reader)); - let resp = try_potential_handleerror!(self.message_handler.chan_handler.handle_funding_created(&peer.their_node_id.unwrap(), &msg)); - encode_and_send_msg!(resp, 35); + try_potential_handleerror!(self.message_handler.chan_handler.handle_funding_created(&peer.their_node_id.unwrap(), &msg)); }, 35 => { let msg = try_potential_decodeerror!(msgs::FundingSigned::read(&mut reader)); @@ -564,29 +576,16 @@ impl PeerManager { }, 36 => { let msg = try_potential_decodeerror!(msgs::FundingLocked::read(&mut reader)); - let resp_option = try_potential_handleerror!(self.message_handler.chan_handler.handle_funding_locked(&peer.their_node_id.unwrap(), &msg)); - match resp_option { - Some(resp) => encode_and_send_msg!(resp, 259), - None => {}, - } + try_potential_handleerror!(self.message_handler.chan_handler.handle_funding_locked(&peer.their_node_id.unwrap(), &msg)); }, 38 => { let msg = try_potential_decodeerror!(msgs::Shutdown::read(&mut reader)); - let resp_options = try_potential_handleerror!(self.message_handler.chan_handler.handle_shutdown(&peer.their_node_id.unwrap(), &msg)); - if let Some(resp) = resp_options.0 { - encode_and_send_msg!(resp, 38); - } - if let Some(resp) = resp_options.1 { - encode_and_send_msg!(resp, 39); - } + try_potential_handleerror!(self.message_handler.chan_handler.handle_shutdown(&peer.their_node_id.unwrap(), &msg)); }, 39 => { let msg = try_potential_decodeerror!(msgs::ClosingSigned::read(&mut reader)); - let resp_option = try_potential_handleerror!(self.message_handler.chan_handler.handle_closing_signed(&peer.their_node_id.unwrap(), &msg)); - if let Some(resp) = resp_option { - encode_and_send_msg!(resp, 39); - } + try_potential_handleerror!(self.message_handler.chan_handler.handle_closing_signed(&peer.their_node_id.unwrap(), &msg)); }, 128 => { @@ -599,10 +598,7 @@ impl PeerManager { }, 131 => { let msg = try_potential_decodeerror!(msgs::UpdateFailHTLC::read(&mut reader)); - let chan_update = try_potential_handleerror!(self.message_handler.chan_handler.handle_update_fail_htlc(&peer.their_node_id.unwrap(), &msg)); - if let Some(update) = chan_update { - self.message_handler.route_handler.handle_htlc_fail_channel_update(&update); - } + try_potential_handleerror!(self.message_handler.chan_handler.handle_update_fail_htlc(&peer.their_node_id.unwrap(), &msg)); }, 135 => { let msg = try_potential_decodeerror!(msgs::UpdateFailMalformedHTLC::read(&mut reader)); @@ -611,30 +607,11 @@ impl PeerManager { 132 => { let msg = try_potential_decodeerror!(msgs::CommitmentSigned::read(&mut reader)); - let resps = try_potential_handleerror!(self.message_handler.chan_handler.handle_commitment_signed(&peer.their_node_id.unwrap(), &msg)); - encode_and_send_msg!(resps.0, 133); - if let Some(resp) = resps.1 { - encode_and_send_msg!(resp, 132); - } + try_potential_handleerror!(self.message_handler.chan_handler.handle_commitment_signed(&peer.their_node_id.unwrap(), &msg)); }, 133 => { let msg = try_potential_decodeerror!(msgs::RevokeAndACK::read(&mut reader)); - let resp_option = try_potential_handleerror!(self.message_handler.chan_handler.handle_revoke_and_ack(&peer.their_node_id.unwrap(), &msg)); - match resp_option { - Some(resps) => { - for resp in resps.update_add_htlcs { - encode_and_send_msg!(resp, 128); - } - for resp in resps.update_fulfill_htlcs { - encode_and_send_msg!(resp, 130); - } - for resp in resps.update_fail_htlcs { - encode_and_send_msg!(resp, 131); - } - encode_and_send_msg!(resps.commitment_signed, 132); - }, - None => {}, - } + try_potential_handleerror!(self.message_handler.chan_handler.handle_revoke_and_ack(&peer.their_node_id.unwrap(), &msg)); }, 134 => { let msg = try_potential_decodeerror!(msgs::UpdateFee::read(&mut reader)); @@ -642,27 +619,44 @@ impl PeerManager { }, 136 => { let msg = try_potential_decodeerror!(msgs::ChannelReestablish::read(&mut reader)); - let (funding_locked, revoke_and_ack, commitment_update) = try_potential_handleerror!(self.message_handler.chan_handler.handle_channel_reestablish(&peer.their_node_id.unwrap(), &msg)); + let (funding_locked, revoke_and_ack, commitment_update, order) = try_potential_handleerror!(self.message_handler.chan_handler.handle_channel_reestablish(&peer.their_node_id.unwrap(), &msg)); if let Some(lock_msg) = funding_locked { encode_and_send_msg!(lock_msg, 36); } - if let Some(revoke_msg) = revoke_and_ack { - encode_and_send_msg!(revoke_msg, 133); - } - match commitment_update { - Some(resps) => { - for resp in resps.update_add_htlcs { - encode_and_send_msg!(resp, 128); - } - for resp in resps.update_fulfill_htlcs { - encode_and_send_msg!(resp, 130); - } - for resp in resps.update_fail_htlcs { - encode_and_send_msg!(resp, 131); - } - encode_and_send_msg!(resps.commitment_signed, 132); + macro_rules! handle_raa { () => { + if let Some(revoke_msg) = revoke_and_ack { + encode_and_send_msg!(revoke_msg, 133); + } + } } + macro_rules! handle_cu { () => { + match commitment_update { + Some(resps) => { + for resp in resps.update_add_htlcs { + encode_and_send_msg!(resp, 128); + } + for resp in resps.update_fulfill_htlcs { + encode_and_send_msg!(resp, 130); + } + for resp in resps.update_fail_htlcs { + encode_and_send_msg!(resp, 131); + } + if let Some(resp) = resps.update_fee { + encode_and_send_msg!(resp, 134); + } + encode_and_send_msg!(resps.commitment_signed, 132); + }, + None => {}, + } + } } + match order { + msgs::RAACommitmentOrder::RevokeAndACKFirst => { + handle_raa!(); + handle_cu!(); + }, + msgs::RAACommitmentOrder::CommitmentFirst => { + handle_cu!(); + handle_raa!(); }, - None => {}, } }, @@ -724,13 +718,12 @@ impl PeerManager { /// Checks for any events generated by our handlers and processes them. May be needed after eg /// calls to ChannelManager::process_pending_htlc_forward. pub fn process_events(&self) { - let mut upstream_events = Vec::new(); { // TODO: There are some DoS attacks here where you can flood someone's outbound send // buffer by doing things like announcing channels on another node. We should be willing to // drop optional-ish messages when send buffers get full! - let mut events_generated = self.message_handler.chan_handler.get_and_clear_pending_events(); + let mut events_generated = self.message_handler.chan_handler.get_and_clear_pending_msg_events(); let mut peers = self.peers.lock().unwrap(); for event in events_generated.drain(..) { macro_rules! get_peer_for_forwarding { @@ -757,14 +750,17 @@ impl PeerManager { } } match event { - Event::FundingGenerationReady {..} => { /* Hand upstream */ }, - Event::FundingBroadcastSafe {..} => { /* Hand upstream */ }, - Event::PaymentReceived {..} => { /* Hand upstream */ }, - Event::PaymentSent {..} => { /* Hand upstream */ }, - Event::PaymentFailed {..} => { /* Hand upstream */ }, - Event::PendingHTLCsForwardable {..} => { /* Hand upstream */ }, - - Event::SendOpenChannel { ref node_id, ref msg } => { + MessageSendEvent::SendAcceptChannel { ref node_id, ref msg } => { + log_trace!(self, "Handling SendAcceptChannel event in peer_handler for node {} for channel {}", + log_pubkey!(node_id), + log_bytes!(msg.temporary_channel_id)); + let (mut descriptor, peer) = get_peer_for_forwarding!(node_id, { + //TODO: Drop the pending channel? (or just let it timeout, but that sucks) + }); + peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg, 33))); + Self::do_attempt_write_data(&mut descriptor, peer); + }, + MessageSendEvent::SendOpenChannel { ref node_id, ref msg } => { log_trace!(self, "Handling SendOpenChannel event in peer_handler for node {} for channel {}", log_pubkey!(node_id), log_bytes!(msg.temporary_channel_id)); @@ -773,9 +769,8 @@ impl PeerManager { }); peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg, 32))); Self::do_attempt_write_data(&mut descriptor, peer); - continue; }, - Event::SendFundingCreated { ref node_id, ref msg } => { + MessageSendEvent::SendFundingCreated { ref node_id, ref msg } => { log_trace!(self, "Handling SendFundingCreated event in peer_handler for node {} for channel {} (which becomes {})", log_pubkey!(node_id), log_bytes!(msg.temporary_channel_id), @@ -786,25 +781,40 @@ impl PeerManager { }); peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg, 34))); Self::do_attempt_write_data(&mut descriptor, peer); - continue; }, - Event::SendFundingLocked { ref node_id, ref msg, ref announcement_sigs } => { - log_trace!(self, "Handling SendFundingLocked event in peer_handler for node {}{} for channel {}", + MessageSendEvent::SendFundingSigned { ref node_id, ref msg } => { + log_trace!(self, "Handling SendFundingSigned event in peer_handler for node {} for channel {}", + log_pubkey!(node_id), + log_bytes!(msg.channel_id)); + let (mut descriptor, peer) = get_peer_for_forwarding!(node_id, { + //TODO: generate a DiscardFunding event indicating to the wallet that + //they should just throw away this funding transaction + }); + peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg, 35))); + Self::do_attempt_write_data(&mut descriptor, peer); + }, + MessageSendEvent::SendFundingLocked { ref node_id, ref msg } => { + log_trace!(self, "Handling SendFundingLocked event in peer_handler for node {} for channel {}", log_pubkey!(node_id), - if announcement_sigs.is_some() { " with announcement sigs" } else { "" }, log_bytes!(msg.channel_id)); let (mut descriptor, peer) = get_peer_for_forwarding!(node_id, { //TODO: Do whatever we're gonna do for handling dropped messages }); peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg, 36))); - match announcement_sigs { - &Some(ref announce_msg) => peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(announce_msg, 259))), - &None => {}, - } Self::do_attempt_write_data(&mut descriptor, peer); - continue; }, - Event::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed } } => { + MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } => { + log_trace!(self, "Handling SendAnnouncementSignatures event in peer_handler for node {} for channel {})", + log_pubkey!(node_id), + log_bytes!(msg.channel_id)); + let (mut descriptor, peer) = get_peer_for_forwarding!(node_id, { + //TODO: generate a DiscardFunding event indicating to the wallet that + //they should just throw away this funding transaction + }); + peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg, 259))); + Self::do_attempt_write_data(&mut descriptor, peer); + }, + MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { log_trace!(self, "Handling UpdateHTLCs event in peer_handler for node {} with {} adds, {} fulfills, {} fails for channel {}", log_pubkey!(node_id), update_add_htlcs.len(), @@ -826,11 +836,33 @@ impl PeerManager { for msg in update_fail_malformed_htlcs { peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg, 135))); } + if let &Some(ref msg) = update_fee { + peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg, 134))); + } peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(commitment_signed, 132))); Self::do_attempt_write_data(&mut descriptor, peer); - continue; }, - Event::SendShutdown { ref node_id, ref msg } => { + MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { + log_trace!(self, "Handling SendRevokeAndACK event in peer_handler for node {} for channel {}", + log_pubkey!(node_id), + log_bytes!(msg.channel_id)); + let (mut descriptor, peer) = get_peer_for_forwarding!(node_id, { + //TODO: Do whatever we're gonna do for handling dropped messages + }); + peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg, 133))); + Self::do_attempt_write_data(&mut descriptor, peer); + }, + MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => { + log_trace!(self, "Handling SendClosingSigned event in peer_handler for node {} for channel {}", + log_pubkey!(node_id), + log_bytes!(msg.channel_id)); + let (mut descriptor, peer) = get_peer_for_forwarding!(node_id, { + //TODO: Do whatever we're gonna do for handling dropped messages + }); + peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg, 39))); + Self::do_attempt_write_data(&mut descriptor, peer); + }, + MessageSendEvent::SendShutdown { ref node_id, ref msg } => { log_trace!(self, "Handling Shutdown event in peer_handler for node {} for channel {}", log_pubkey!(node_id), log_bytes!(msg.channel_id)); @@ -839,9 +871,8 @@ impl PeerManager { }); peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg, 38))); Self::do_attempt_write_data(&mut descriptor, peer); - continue; }, - Event::BroadcastChannelAnnouncement { ref msg, ref update_msg } => { + MessageSendEvent::BroadcastChannelAnnouncement { ref msg, ref update_msg } => { log_trace!(self, "Handling BroadcastChannelAnnouncement event in peer_handler for short channel id {}", msg.contents.short_channel_id); if self.message_handler.route_handler.handle_channel_announcement(msg).is_ok() && self.message_handler.route_handler.handle_channel_update(update_msg).is_ok() { let encoded_msg = encode_msg!(msg, 256); @@ -864,9 +895,8 @@ impl PeerManager { Self::do_attempt_write_data(&mut (*descriptor).clone(), peer); } } - continue; }, - Event::BroadcastChannelUpdate { ref msg } => { + MessageSendEvent::BroadcastChannelUpdate { ref msg } => { log_trace!(self, "Handling BroadcastChannelUpdate event in peer_handler for short channel id {}", msg.contents.short_channel_id); if self.message_handler.route_handler.handle_channel_update(msg).is_ok() { let encoded_msg = encode_msg!(msg, 258); @@ -879,9 +909,11 @@ impl PeerManager { Self::do_attempt_write_data(&mut (*descriptor).clone(), peer); } } - continue; }, - Event::HandleError { ref node_id, ref action } => { + MessageSendEvent::PaymentFailureNetworkUpdate { ref update } => { + self.message_handler.route_handler.handle_htlc_fail_channel_update(update); + }, + MessageSendEvent::HandleError { ref node_id, ref action } => { if let Some(ref action) = *action { match *action { msgs::ErrorAction::DisconnectPeer { ref msg } => { @@ -903,9 +935,7 @@ impl PeerManager { self.message_handler.chan_handler.peer_disconnected(&node_id, false); } }, - msgs::ErrorAction::IgnoreError => { - continue; - }, + msgs::ErrorAction::IgnoreError => {}, msgs::ErrorAction::SendErrorMessage { ref msg } => { log_trace!(self, "Handling SendErrorMessage HandleError event in peer_handler for node {} with message {}", log_pubkey!(node_id), @@ -920,23 +950,17 @@ impl PeerManager { } else { log_error!(self, "Got no-action HandleError Event in peer_handler for node {}, no such events should ever be generated!", log_pubkey!(node_id)); } - continue; } } - - upstream_events.push(event); } } - - let mut pending_events = self.pending_events.lock().unwrap(); - for event in upstream_events.drain(..) { - pending_events.push(event); - } } /// Indicates that the given socket descriptor's connection is now closed. + /// /// This must be called even if a PeerHandleError was given for a read_event or write_event, - /// but must NOT be called if a PeerHandleError was provided out of a new_*_connection event! + /// but must NOT be called if a PeerHandleError was provided out of a new_\*\_connection event! + /// /// Panics if the descriptor was not previously registered in a successful new_*_connection event. pub fn disconnect_event(&self, descriptor: &Descriptor) { self.disconnect_event_internal(descriptor, false); @@ -960,15 +984,6 @@ impl PeerManager { } } -impl EventsProvider for PeerManager { - fn get_and_clear_pending_events(&self) -> Vec { - let mut pending_events = self.pending_events.lock().unwrap(); - let mut ret = Vec::new(); - mem::swap(&mut ret, &mut *pending_events); - ret - } -} - #[cfg(test)] mod tests { use ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor}; @@ -1040,7 +1055,7 @@ mod tests { let their_id = PublicKey::from_secret_key(&secp_ctx, &peers[1].our_node_secret); let chan_handler = test_utils::TestChannelMessageHandler::new(); - chan_handler.pending_events.lock().unwrap().push(events::Event::HandleError { + chan_handler.pending_events.lock().unwrap().push(events::MessageSendEvent::HandleError { node_id: their_id, action: Some(msgs::ErrorAction::DisconnectPeer { msg: None }), });