X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fmsgs.rs;h=eec1a053802fd7476ce1b43f0518b4ef339c1dfb;hb=58a4f6c4ad3c262f7ed7009f6c99d36936e958b4;hp=004bd3d99d318e2458f41e9c31307614afb93b5f;hpb=5b28744755a4a5ef0303c9cbf927502495194055;p=rust-lightning diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index 004bd3d9..eec1a053 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -32,14 +32,15 @@ use bitcoin::hash_types::{Txid, BlockHash}; use ln::features::{ChannelFeatures, InitFeatures, NodeFeatures}; -use std::{cmp, fmt}; -use std::fmt::Debug; +use prelude::*; +use core::{cmp, fmt}; +use core::fmt::Debug; use std::io::Read; use util::events::MessageSendEventsProvider; use util::ser::{Readable, Writeable, Writer, FixedLengthReader, HighZeroBytesDroppedVarInt}; -use ln::channelmanager::{PaymentPreimage, PaymentHash, PaymentSecret}; +use ln::{PaymentPreimage, PaymentHash, PaymentSecret}; /// 21 million * 10^8 * 1000 pub(crate) const MAX_VALUE_MSAT: u64 = 21_000_000_0000_0000_000; @@ -63,6 +64,8 @@ pub enum DecodeError { /// Error from std::io Io(/// (C-not exported) as ErrorKind doesn't have a reasonable mapping ::std::io::ErrorKind), + /// The message included zlib-compressed values, which we don't support. + UnsupportedCompression, } /// An init message to be sent or received from a peer @@ -390,6 +393,17 @@ pub enum NetAddress { }, } impl NetAddress { + /// Gets the ID of this address type. Addresses in node_announcement messages should be sorted + /// by this. + pub(crate) fn get_id(&self) -> u8 { + match self { + &NetAddress::IPv4 {..} => { 1 }, + &NetAddress::IPv6 {..} => { 2 }, + &NetAddress::OnionV2 {..} => { 3 }, + &NetAddress::OnionV3 {..} => { 4 }, + } + } + /// Strict byte-length of address descriptor, 1-byte type not recorded fn len(&self) -> u16 { match self { @@ -469,6 +483,17 @@ impl Readable for Result { } } +impl Readable for NetAddress { + fn read(reader: &mut R) -> Result { + match Readable::read(reader) { + Ok(Ok(res)) => Ok(res), + Ok(Err(_)) => Err(DecodeError::UnknownVersion), + Err(e) => Err(e), + } + } +} + + /// The unsigned part of a node_announcement #[derive(Clone, Debug, PartialEq)] pub struct UnsignedNodeAnnouncement { @@ -744,7 +769,7 @@ pub enum OptionalField { /// /// Messages MAY be called in parallel when they originate from different their_node_ids, however /// they MUST NOT be called in parallel when the two calls have the same their_node_id. -pub trait ChannelMessageHandler : MessageSendEventsProvider + Send + Sync { +pub trait ChannelMessageHandler : MessageSendEventsProvider { //Channel init: /// Handle an incoming open_channel message from the given peer. fn handle_open_channel(&self, their_node_id: &PublicKey, their_features: InitFeatures, msg: &OpenChannel); @@ -796,6 +821,9 @@ pub trait ChannelMessageHandler : MessageSendEventsProvider + Send + Sync { /// Handle an incoming channel_reestablish message from the given peer. fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &ChannelReestablish); + /// Handle an incoming channel update from the given peer. + fn handle_channel_update(&self, their_node_id: &PublicKey, msg: &ChannelUpdate); + // Error: /// Handle an incoming error message from the given peer. fn handle_error(&self, their_node_id: &PublicKey, msg: &ErrorMessage); @@ -808,7 +836,7 @@ pub trait ChannelMessageHandler : MessageSendEventsProvider + Send + Sync { /// For `gossip_queries` messages there are potential DoS vectors when handling /// inbound queries. Implementors using an on-disk network graph should be aware of /// repeated disk I/O for queries accessing different parts of the network graph. -pub trait RoutingMessageHandler : Send + Sync + MessageSendEventsProvider { +pub trait RoutingMessageHandler : MessageSendEventsProvider { /// Handle an incoming node_announcement message, returning true if it should be forwarded on, /// false or returning an Err otherwise. fn handle_node_announcement(&self, msg: &NodeAnnouncement) -> Result; @@ -851,7 +879,8 @@ pub trait RoutingMessageHandler : Send + Sync + MessageSendEventsProvider { } mod fuzzy_internal_msgs { - use ln::channelmanager::PaymentSecret; + use prelude::*; + use ln::PaymentSecret; // These types aren't intended to be pub, but are exposed for direct fuzzing (as we deserialize // them from untrusted input): @@ -939,6 +968,7 @@ impl fmt::Display for DecodeError { DecodeError::ShortRead => f.write_str("Packet extended beyond the provided bytes"), DecodeError::BadLengthDescriptor => f.write_str("A length descriptor in the packet didn't describe the later data correctly"), DecodeError::Io(ref e) => e.fmt(f), + DecodeError::UnsupportedCompression => f.write_str("We don't support receiving messages with zlib-compressed fields"), } } } @@ -1268,20 +1298,17 @@ impl Writeable for OnionHopData { (2, HighZeroBytesDroppedVarInt(self.amt_to_forward)), (4, HighZeroBytesDroppedVarInt(self.outgoing_cltv_value)), (6, short_channel_id) - }); + }, { }); }, - OnionHopDataFormat::FinalNode { payment_data: Some(ref final_data) } => { - if final_data.total_msat > MAX_VALUE_MSAT { panic!("We should never be sending infinite/overflow onion payments"); } - encode_varint_length_prefixed_tlv!(w, { - (2, HighZeroBytesDroppedVarInt(self.amt_to_forward)), - (4, HighZeroBytesDroppedVarInt(self.outgoing_cltv_value)), - (8, final_data) - }); - }, - OnionHopDataFormat::FinalNode { payment_data: None } => { + OnionHopDataFormat::FinalNode { ref payment_data } => { + if let Some(final_data) = payment_data { + if final_data.total_msat > MAX_VALUE_MSAT { panic!("We should never be sending infinite/overflow onion payments"); } + } encode_varint_length_prefixed_tlv!(w, { (2, HighZeroBytesDroppedVarInt(self.amt_to_forward)), (4, HighZeroBytesDroppedVarInt(self.outgoing_cltv_value)) + }, { + (8, payment_data) }); }, } @@ -1393,7 +1420,7 @@ impl Readable for Pong { impl Writeable for UnsignedChannelAnnouncement { fn write(&self, w: &mut W) -> Result<(), ::std::io::Error> { - w.size_hint(2 + 2*32 + 4*33 + self.features.byte_count() + self.excess_data.len()); + w.size_hint(2 + 32 + 8 + 4*33 + self.features.byte_count() + self.excess_data.len()); self.features.write(w)?; self.chain_hash.write(w)?; self.short_channel_id.write(w)?; @@ -1427,7 +1454,7 @@ impl Readable for UnsignedChannelAnnouncement { impl_writeable_len_match!(ChannelAnnouncement, { { ChannelAnnouncement { contents: UnsignedChannelAnnouncement {ref features, ref excess_data, ..}, .. }, - 2 + 2*32 + 4*33 + features.byte_count() + excess_data.len() + 4*64 } + 2 + 32 + 8 + 4*33 + features.byte_count() + excess_data.len() + 4*64 } }, { node_signature_1, node_signature_2, @@ -1488,8 +1515,8 @@ impl Readable for UnsignedChannelUpdate { } impl_writeable_len_match!(ChannelUpdate, { - { ChannelUpdate { contents: UnsignedChannelUpdate {ref excess_data, ..}, .. }, - 64 + excess_data.len() + 64 } + { ChannelUpdate { contents: UnsignedChannelUpdate {ref excess_data, ref htlc_maximum_msat, ..}, .. }, + 64 + 64 + excess_data.len() + if let OptionalField::Present(_) = htlc_maximum_msat { 8 } else { 0 } } }, { signature, contents @@ -1525,7 +1552,7 @@ impl Readable for ErrorMessage { impl Writeable for UnsignedNodeAnnouncement { fn write(&self, w: &mut W) -> Result<(), ::std::io::Error> { - w.size_hint(64 + 76 + self.features.byte_count() + self.addresses.len()*38 + self.excess_address_data.len() + self.excess_data.len()); + w.size_hint(76 + self.features.byte_count() + self.addresses.len()*38 + self.excess_address_data.len() + self.excess_data.len()); self.features.write(w)?; self.timestamp.write(w)?; self.node_id.write(w)?; @@ -1608,7 +1635,7 @@ impl Readable for UnsignedNodeAnnouncement { } } -impl_writeable_len_match!(NodeAnnouncement, { +impl_writeable_len_match!(NodeAnnouncement, <=, { { NodeAnnouncement { contents: UnsignedNodeAnnouncement { ref features, ref addresses, ref excess_address_data, ref excess_data, ..}, .. }, 64 + 76 + features.byte_count() + addresses.len()*(NetAddress::MAX_LEN as usize + 1) + excess_address_data.len() + excess_data.len() } }, { @@ -1620,17 +1647,18 @@ impl Readable for QueryShortChannelIds { fn read(r: &mut R) -> Result { let chain_hash: BlockHash = Readable::read(r)?; - // We expect the encoding_len to always includes the 1-byte - // encoding_type and that short_channel_ids are 8-bytes each let encoding_len: u16 = Readable::read(r)?; - if encoding_len == 0 || (encoding_len - 1) % 8 != 0 { - return Err(DecodeError::InvalidValue); - } + let encoding_type: u8 = Readable::read(r)?; // Must be encoding_type=0 uncompressed serialization. We do not // support encoding_type=1 zlib serialization. - let encoding_type: u8 = Readable::read(r)?; if encoding_type != EncodingType::Uncompressed as u8 { + return Err(DecodeError::UnsupportedCompression); + } + + // We expect the encoding_len to always includes the 1-byte + // encoding_type and that short_channel_ids are 8-bytes each + if encoding_len == 0 || (encoding_len - 1) % 8 != 0 { return Err(DecodeError::InvalidValue); } @@ -1689,6 +1717,19 @@ impl Writeable for ReplyShortChannelIdsEnd { } } +impl QueryChannelRange { + /** + * Calculates the overflow safe ending block height for the query. + * Overflow returns `0xffffffff`, otherwise returns `first_blocknum + number_of_blocks` + */ + pub fn end_blocknum(&self) -> u32 { + match self.first_blocknum.checked_add(self.number_of_blocks) { + Some(block) => block, + None => u32::max_value(), + } + } +} + impl Readable for QueryChannelRange { fn read(r: &mut R) -> Result { let chain_hash: BlockHash = Readable::read(r)?; @@ -1719,17 +1760,18 @@ impl Readable for ReplyChannelRange { let number_of_blocks: u32 = Readable::read(r)?; let sync_complete: bool = Readable::read(r)?; - // We expect the encoding_len to always includes the 1-byte - // encoding_type and that short_channel_ids are 8-bytes each let encoding_len: u16 = Readable::read(r)?; - if encoding_len == 0 || (encoding_len - 1) % 8 != 0 { - return Err(DecodeError::InvalidValue); - } + let encoding_type: u8 = Readable::read(r)?; // Must be encoding_type=0 uncompressed serialization. We do not // support encoding_type=1 zlib serialization. - let encoding_type: u8 = Readable::read(r)?; if encoding_type != EncodingType::Uncompressed as u8 { + return Err(DecodeError::UnsupportedCompression); + } + + // We expect the encoding_len to always includes the 1-byte + // encoding_type and that short_channel_ids are 8-bytes each + if encoding_len == 0 || (encoding_len - 1) % 8 != 0 { return Err(DecodeError::InvalidValue); } @@ -1797,9 +1839,9 @@ impl Writeable for GossipTimestampFilter { #[cfg(test)] mod tests { use hex; + use ln::{PaymentPreimage, PaymentHash, PaymentSecret}; use ln::msgs; use ln::msgs::{ChannelFeatures, FinalOnionHopData, InitFeatures, NodeFeatures, OptionalField, OnionErrorPacket, OnionHopDataFormat}; - use ln::channelmanager::{PaymentPreimage, PaymentHash, PaymentSecret}; use util::ser::{Writeable, Readable}; use bitcoin::hashes::hex::FromHex; @@ -1812,6 +1854,7 @@ mod tests { use bitcoin::secp256k1::key::{PublicKey,SecretKey}; use bitcoin::secp256k1::{Secp256k1, Message}; + use prelude::*; use std::io::Cursor; #[test] @@ -2530,6 +2573,24 @@ mod tests { assert_eq!(msg.outgoing_cltv_value, 0xffffffff); } + #[test] + fn query_channel_range_end_blocknum() { + let tests: Vec<(u32, u32, u32)> = vec![ + (10000, 1500, 11500), + (0, 0xffffffff, 0xffffffff), + (1, 0xffffffff, 0xffffffff), + ]; + + for (first_blocknum, number_of_blocks, expected) in tests.into_iter() { + let sut = msgs::QueryChannelRange { + chain_hash: BlockHash::from_hex("06226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf188910f").unwrap(), + first_blocknum, + number_of_blocks, + }; + assert_eq!(sut.end_blocknum(), expected); + } + } + #[test] fn encoding_query_channel_range() { let mut query_channel_range = msgs::QueryChannelRange {