use util::events::{EventsProvider,Event};
use util::logger::Logger;
-use std::collections::{HashMap,LinkedList};
+use std::collections::{HashMap,hash_map,LinkedList};
use std::sync::{Arc, Mutex};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::{cmp,error,mem,hash,fmt};
/// Only add to this set when noise completes:
node_id_to_descriptor: HashMap<PublicKey, Descriptor>,
}
+struct MutPeerHolder<'a, Descriptor: SocketDescriptor + 'a> {
+ peers: &'a mut HashMap<Descriptor, Peer>,
+ node_id_to_descriptor: &'a mut HashMap<PublicKey, Descriptor>,
+}
+impl<Descriptor: SocketDescriptor> PeerHolder<Descriptor> {
+ fn borrow_parts(&mut self) -> MutPeerHolder<Descriptor> {
+ MutPeerHolder {
+ peers: &mut self.peers,
+ node_id_to_descriptor: &mut self.node_id_to_descriptor,
+ }
+ }
+}
pub struct PeerManager<Descriptor: SocketDescriptor> {
message_handler: MessageHandler,
logger: Arc<Logger>,
}
-
macro_rules! encode_msg {
($msg: expr, $msg_code: expr) => {
{
/// completed and we are sure the remote peer has the private key for the given node_id.
pub fn get_peer_node_ids(&self) -> Vec<PublicKey> {
let peers = self.peers.lock().unwrap();
- peers.peers.values().filter_map(|p| p.their_node_id).collect()
+ peers.peers.values().filter_map(|p| {
+ if !p.channel_encryptor.is_ready_for_encryption() || p.their_global_features.is_none() {
+ return None;
+ }
+ p.their_node_id
+ }).collect()
}
/// Indicates a new outbound connection has been established to a node with the given node_id.
fn do_read_event(&self, peer_descriptor: &mut Descriptor, data: Vec<u8>) -> Result<bool, PeerHandleError> {
let pause_read = {
- let mut peers = self.peers.lock().unwrap();
- let (should_insert_node_id, pause_read) = match peers.peers.get_mut(peer_descriptor) {
+ let mut peers_lock = self.peers.lock().unwrap();
+ let peers = peers_lock.borrow_parts();
+ let pause_read = match peers.peers.get_mut(peer_descriptor) {
None => panic!("Descriptor for read_event is not already known to PeerManager"),
Some(peer) => {
assert!(peer.pending_read_buffer.len() > 0);
assert!(peer.pending_read_buffer.len() > peer.pending_read_buffer_pos);
- let mut insert_node_id = None;
let mut read_pos = 0;
while read_pos < data.len() {
{
($thing: expr) => {
match $thing {
Ok(x) => x,
- Err(_e) => {
- //TODO: Handle e?
- return Err(PeerHandleError{ no_connection_possible: false });
+ Err(e) => {
+ match e {
+ msgs::DecodeError::UnknownRealmByte => return Err(PeerHandleError{ no_connection_possible: false }),
+ msgs::DecodeError::UnknownRequiredFeature => {
+ log_debug!(self, "Got a channel/node announcement with an known required feature flag, you may want to udpate!");
+ continue;
+ },
+ msgs::DecodeError::BadPublicKey => return Err(PeerHandleError{ no_connection_possible: false }),
+ msgs::DecodeError::BadSignature => return Err(PeerHandleError{ no_connection_possible: false }),
+ msgs::DecodeError::BadText => return Err(PeerHandleError{ no_connection_possible: false }),
+ msgs::DecodeError::ShortRead => return Err(PeerHandleError{ no_connection_possible: false }),
+ msgs::DecodeError::ExtraAddressesPerType => {
+ log_debug!(self, "Error decoding message, ignoring due to lnd spec incompatibility. See https://github.com/lightningnetwork/lnd/issues/1407");
+ continue;
+ },
+ msgs::DecodeError::BadLengthDescriptor => return Err(PeerHandleError{ no_connection_possible: false }),
+ }
}
};
}
}
- macro_rules! try_ignore_potential_decodeerror {
- ($thing: expr) => {
- match $thing {
- Ok(x) => x,
- Err(_e) => {
- log_debug!(self, "Error decoding message, ignoring due to lnd spec incompatibility. See https://github.com/lightningnetwork/lnd/issues/1407");
- continue;
- }
+ macro_rules! insert_node_id {
+ () => {
+ match peers.node_id_to_descriptor.entry(peer.their_node_id.unwrap()) {
+ hash_map::Entry::Occupied(_) => {
+ peer.their_node_id = None; // Unset so that we don't generate a peer_disconnected event
+ return Err(PeerHandleError{ no_connection_possible: false })
+ },
+ hash_map::Entry::Vacant(entry) => entry.insert(peer_descriptor.clone()),
};
}
}
peer.pending_read_buffer = [0; 18].to_vec(); // Message length header is 18 bytes
peer.pending_read_is_header = true;
- insert_node_id = Some(peer.their_node_id.unwrap());
+ insert_node_id!();
let mut local_features = msgs::LocalFeatures::new();
if self.initial_syncs_sent.load(Ordering::Acquire) < INITIAL_SYNCS_TO_SEND {
self.initial_syncs_sent.fetch_add(1, Ordering::AcqRel);
peer.pending_read_buffer = [0; 18].to_vec(); // Message length header is 18 bytes
peer.pending_read_is_header = true;
peer.their_node_id = Some(their_node_id);
- insert_node_id = Some(peer.their_node_id.unwrap());
+ insert_node_id!();
},
NextNoiseStep::NoiseComplete => {
if peer.pending_read_is_header {
if msg.local_features.requires_unknown_bits() {
return Err(PeerHandleError{ no_connection_possible: true });
}
+ if peer.their_global_features.is_some() {
+ return Err(PeerHandleError{ no_connection_possible: false });
+ }
peer.their_global_features = Some(msg.global_features);
peer.their_local_features = Some(msg.local_features);
}
},
17 => {
- // Error msg
+ let msg = try_potential_decodeerror!(msgs::ErrorMessage::decode(&msg_data[2..]));
+ let mut data_is_printable = true;
+ for b in msg.data.bytes() {
+ if b < 32 || b > 126 {
+ data_is_printable = false;
+ break;
+ }
+ }
+
+ if data_is_printable {
+ log_debug!(self, "Got Err message from {}: {}", log_pubkey!(peer.their_node_id.unwrap()), msg.data);
+ } else {
+ log_debug!(self, "Got Err message from {} with non-ASCII error message", log_pubkey!(peer.their_node_id.unwrap()));
+ }
+ self.message_handler.chan_handler.handle_error(&peer.their_node_id.unwrap(), &msg);
+ if msg.channel_id == [0; 32] {
+ return Err(PeerHandleError{ no_connection_possible: true });
+ }
},
18 => {
}
},
257 => {
- let msg = try_ignore_potential_decodeerror!(msgs::NodeAnnouncement::decode(&msg_data[2..]));
- try_potential_handleerror!(self.message_handler.route_handler.handle_node_announcement(&msg));
+ let msg = try_potential_decodeerror!(msgs::NodeAnnouncement::decode(&msg_data[2..]));
+ let should_forward = try_potential_handleerror!(self.message_handler.route_handler.handle_node_announcement(&msg));
+
+ if should_forward {
+ // TODO: forward msg along to all our other peers!
+ }
},
258 => {
let msg = try_potential_decodeerror!(msgs::ChannelUpdate::decode(&msg_data[2..]));
- try_potential_handleerror!(self.message_handler.route_handler.handle_channel_update(&msg));
+ let should_forward = try_potential_handleerror!(self.message_handler.route_handler.handle_channel_update(&msg));
+
+ if should_forward {
+ // TODO: forward msg along to all our other peers!
+ }
},
_ => {
if (msg_type & 1) == 0 {
Self::do_attempt_write_data(peer_descriptor, peer);
- (insert_node_id /* should_insert_node_id */, peer.pending_outbound_buffer.len() > 10) // pause_read
+ peer.pending_outbound_buffer.len() > 10 // pause_read
}
};
- match should_insert_node_id {
- Some(node_id) => { peers.node_id_to_descriptor.insert(node_id, peer_descriptor.clone()); },
- None => {}
- };
-
pause_read
};
Self::do_attempt_write_data(&mut descriptor, peer);
continue;
},
- Event::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref commitment_signed } } => {
+ Event::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed } } => {
log_trace!(self, "Handling UpdateHTLCs event in peer_handler for node {} with {} adds, {} fulfills, {} fails for channel {}",
log_pubkey!(node_id),
update_add_htlcs.len(),
for msg in update_fail_htlcs {
peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg, 131)));
}
+ for msg in update_fail_malformed_htlcs {
+ peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg, 135)));
+ }
peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(commitment_signed, 132)));
Self::do_attempt_write_data(&mut descriptor, peer);
continue;