Do not log_debug when we receive duplicate gossip messages
[rust-lightning] / lightning / src / ln / msgs.rs
index 03a8866025ef009e1736b54270123c5c4ddfbbbc..c45e2277fb8db5a307a2b0c13fb546d97d62ec74 100644 (file)
@@ -32,20 +32,22 @@ use bitcoin::hash_types::{Txid, BlockHash};
 
 use ln::features::{ChannelFeatures, InitFeatures, NodeFeatures};
 
-use std::{cmp, fmt};
-use std::fmt::Debug;
+use prelude::*;
+use core::{cmp, fmt};
+use core::fmt::Debug;
 use std::io::Read;
 
 use util::events::MessageSendEventsProvider;
+use util::logger;
 use util::ser::{Readable, Writeable, Writer, FixedLengthReader, HighZeroBytesDroppedVarInt};
 
-use ln::channelmanager::{PaymentPreimage, PaymentHash, PaymentSecret};
+use ln::{PaymentPreimage, PaymentHash, PaymentSecret};
 
 /// 21 million * 10^8 * 1000
 pub(crate) const MAX_VALUE_MSAT: u64 = 21_000_000_0000_0000_000;
 
 /// An error in decoding a message or struct.
-#[derive(Clone, Debug)]
+#[derive(Clone, Debug, PartialEq)]
 pub enum DecodeError {
        /// A version byte specified something we don't know how to handle.
        /// Includes unknown realm byte in an OnionHopData packet
@@ -63,6 +65,8 @@ pub enum DecodeError {
        /// Error from std::io
        Io(/// (C-not exported) as ErrorKind doesn't have a reasonable mapping
         ::std::io::ErrorKind),
+       /// The message included zlib-compressed values, which we don't support.
+       UnsupportedCompression,
 }
 
 /// An init message to be sent or received from a peer
@@ -390,6 +394,17 @@ pub enum NetAddress {
        },
 }
 impl NetAddress {
+       /// Gets the ID of this address type. Addresses in node_announcement messages should be sorted
+       /// by this.
+       pub(crate) fn get_id(&self) -> u8 {
+               match self {
+                       &NetAddress::IPv4 {..} => { 1 },
+                       &NetAddress::IPv6 {..} => { 2 },
+                       &NetAddress::OnionV2 {..} => { 3 },
+                       &NetAddress::OnionV3 {..} => { 4 },
+               }
+       }
+
        /// Strict byte-length of address descriptor, 1-byte type not recorded
        fn len(&self) -> u16 {
                match self {
@@ -469,6 +484,17 @@ impl Readable for Result<NetAddress, u8> {
        }
 }
 
+impl Readable for NetAddress {
+       fn read<R: Read>(reader: &mut R) -> Result<NetAddress, DecodeError> {
+               match Readable::read(reader) {
+                       Ok(Ok(res)) => Ok(res),
+                       Ok(Err(_)) => Err(DecodeError::UnknownVersion),
+                       Err(e) => Err(e),
+               }
+       }
+}
+
+
 /// The unsigned part of a node_announcement
 #[derive(Clone, Debug, PartialEq)]
 pub struct UnsignedNodeAnnouncement {
@@ -663,7 +689,11 @@ pub enum ErrorAction {
                msg: Option<ErrorMessage>
        },
        /// The peer did something harmless that we weren't able to process, just log and ignore
+       // New code should *not* use this. New code must use IgnoreAndLog, below!
        IgnoreError,
+       /// The peer did something harmless that we weren't able to meaningfully process.
+       /// If the error is logged, log it at the given level.
+       IgnoreAndLog(logger::Level),
        /// The peer did something incorrect. Tell them.
        SendErrorMessage {
                /// The message to send.
@@ -744,7 +774,7 @@ pub enum OptionalField<T> {
 ///
 /// Messages MAY be called in parallel when they originate from different their_node_ids, however
 /// they MUST NOT be called in parallel when the two calls have the same their_node_id.
-pub trait ChannelMessageHandler : MessageSendEventsProvider + Send + Sync {
+pub trait ChannelMessageHandler : MessageSendEventsProvider {
        //Channel init:
        /// Handle an incoming open_channel message from the given peer.
        fn handle_open_channel(&self, their_node_id: &PublicKey, their_features: InitFeatures, msg: &OpenChannel);
@@ -811,7 +841,7 @@ pub trait ChannelMessageHandler : MessageSendEventsProvider + Send + Sync {
 /// For `gossip_queries` messages there are potential DoS vectors when handling
 /// inbound queries. Implementors using an on-disk network graph should be aware of
 /// repeated disk I/O for queries accessing different parts of the network graph.
-pub trait RoutingMessageHandler : Send + Sync + MessageSendEventsProvider {
+pub trait RoutingMessageHandler : MessageSendEventsProvider {
        /// Handle an incoming node_announcement message, returning true if it should be forwarded on,
        /// false or returning an Err otherwise.
        fn handle_node_announcement(&self, msg: &NodeAnnouncement) -> Result<bool, LightningError>;
@@ -854,7 +884,8 @@ pub trait RoutingMessageHandler : Send + Sync + MessageSendEventsProvider {
 }
 
 mod fuzzy_internal_msgs {
-       use ln::channelmanager::PaymentSecret;
+       use prelude::*;
+       use ln::PaymentSecret;
 
        // These types aren't intended to be pub, but are exposed for direct fuzzing (as we deserialize
        // them from untrusted input):
@@ -942,6 +973,7 @@ impl fmt::Display for DecodeError {
                        DecodeError::ShortRead => f.write_str("Packet extended beyond the provided bytes"),
                        DecodeError::BadLengthDescriptor => f.write_str("A length descriptor in the packet didn't describe the later data correctly"),
                        DecodeError::Io(ref e) => e.fmt(f),
+                       DecodeError::UnsupportedCompression => f.write_str("We don't support receiving messages with zlib-compressed fields"),
                }
        }
 }
@@ -1271,20 +1303,17 @@ impl Writeable for OnionHopData {
                                        (2, HighZeroBytesDroppedVarInt(self.amt_to_forward)),
                                        (4, HighZeroBytesDroppedVarInt(self.outgoing_cltv_value)),
                                        (6, short_channel_id)
-                               });
-                       },
-                       OnionHopDataFormat::FinalNode { payment_data: Some(ref final_data) } => {
-                               if final_data.total_msat > MAX_VALUE_MSAT { panic!("We should never be sending infinite/overflow onion payments"); }
-                               encode_varint_length_prefixed_tlv!(w, {
-                                       (2, HighZeroBytesDroppedVarInt(self.amt_to_forward)),
-                                       (4, HighZeroBytesDroppedVarInt(self.outgoing_cltv_value)),
-                                       (8, final_data)
-                               });
+                               }, { });
                        },
-                       OnionHopDataFormat::FinalNode { payment_data: None } => {
+                       OnionHopDataFormat::FinalNode { ref payment_data } => {
+                               if let Some(final_data) = payment_data {
+                                       if final_data.total_msat > MAX_VALUE_MSAT { panic!("We should never be sending infinite/overflow onion payments"); }
+                               }
                                encode_varint_length_prefixed_tlv!(w, {
                                        (2, HighZeroBytesDroppedVarInt(self.amt_to_forward)),
                                        (4, HighZeroBytesDroppedVarInt(self.outgoing_cltv_value))
+                               }, {
+                                       (8, payment_data)
                                });
                        },
                }
@@ -1396,7 +1425,7 @@ impl Readable for Pong {
 
 impl Writeable for UnsignedChannelAnnouncement {
        fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
-               w.size_hint(2 + 2*32 + 4*33 + self.features.byte_count() + self.excess_data.len());
+               w.size_hint(2 + 32 + 8 + 4*33 + self.features.byte_count() + self.excess_data.len());
                self.features.write(w)?;
                self.chain_hash.write(w)?;
                self.short_channel_id.write(w)?;
@@ -1430,7 +1459,7 @@ impl Readable for UnsignedChannelAnnouncement {
 
 impl_writeable_len_match!(ChannelAnnouncement, {
                { ChannelAnnouncement { contents: UnsignedChannelAnnouncement {ref features, ref excess_data, ..}, .. },
-                       2 + 2*32 + 4*33 + features.byte_count() + excess_data.len() + 4*64 }
+                       2 + 32 + 8 + 4*33 + features.byte_count() + excess_data.len() + 4*64 }
        }, {
        node_signature_1,
        node_signature_2,
@@ -1491,8 +1520,8 @@ impl Readable for UnsignedChannelUpdate {
 }
 
 impl_writeable_len_match!(ChannelUpdate, {
-               { ChannelUpdate { contents: UnsignedChannelUpdate {ref excess_data, ..}, .. },
-                       64 + excess_data.len() + 64 }
+               { ChannelUpdate { contents: UnsignedChannelUpdate {ref excess_data, ref htlc_maximum_msat, ..}, .. },
+                       64 + 64 + excess_data.len() + if let OptionalField::Present(_) = htlc_maximum_msat { 8 } else { 0 } }
        }, {
        signature,
        contents
@@ -1528,7 +1557,7 @@ impl Readable for ErrorMessage {
 
 impl Writeable for UnsignedNodeAnnouncement {
        fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
-               w.size_hint(64 + 76 + self.features.byte_count() + self.addresses.len()*38 + self.excess_address_data.len() + self.excess_data.len());
+               w.size_hint(76 + self.features.byte_count() + self.addresses.len()*38 + self.excess_address_data.len() + self.excess_data.len());
                self.features.write(w)?;
                self.timestamp.write(w)?;
                self.node_id.write(w)?;
@@ -1611,7 +1640,7 @@ impl Readable for UnsignedNodeAnnouncement {
        }
 }
 
-impl_writeable_len_match!(NodeAnnouncement, {
+impl_writeable_len_match!(NodeAnnouncement, <=, {
                { NodeAnnouncement { contents: UnsignedNodeAnnouncement { ref features, ref addresses, ref excess_address_data, ref excess_data, ..}, .. },
                        64 + 76 + features.byte_count() + addresses.len()*(NetAddress::MAX_LEN as usize + 1) + excess_address_data.len() + excess_data.len() }
        }, {
@@ -1623,17 +1652,18 @@ impl Readable for QueryShortChannelIds {
        fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
                let chain_hash: BlockHash = Readable::read(r)?;
 
-               // We expect the encoding_len to always includes the 1-byte
-               // encoding_type and that short_channel_ids are 8-bytes each
                let encoding_len: u16 = Readable::read(r)?;
-               if encoding_len == 0 || (encoding_len - 1) % 8 != 0 {
-                       return Err(DecodeError::InvalidValue);
-               }
+               let encoding_type: u8 = Readable::read(r)?;
 
                // Must be encoding_type=0 uncompressed serialization. We do not
                // support encoding_type=1 zlib serialization.
-               let encoding_type: u8 = Readable::read(r)?;
                if encoding_type != EncodingType::Uncompressed as u8 {
+                       return Err(DecodeError::UnsupportedCompression);
+               }
+
+               // We expect the encoding_len to always includes the 1-byte
+               // encoding_type and that short_channel_ids are 8-bytes each
+               if encoding_len == 0 || (encoding_len - 1) % 8 != 0 {
                        return Err(DecodeError::InvalidValue);
                }
 
@@ -1692,6 +1722,19 @@ impl Writeable for ReplyShortChannelIdsEnd {
        }
 }
 
+impl QueryChannelRange {
+       /**
+        * Calculates the overflow safe ending block height for the query.
+        * Overflow returns `0xffffffff`, otherwise returns `first_blocknum + number_of_blocks`
+        */
+       pub fn end_blocknum(&self) -> u32 {
+               match self.first_blocknum.checked_add(self.number_of_blocks) {
+                       Some(block) => block,
+                       None => u32::max_value(),
+               }
+       }
+}
+
 impl Readable for QueryChannelRange {
        fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
                let chain_hash: BlockHash = Readable::read(r)?;
@@ -1722,17 +1765,18 @@ impl Readable for ReplyChannelRange {
                let number_of_blocks: u32 = Readable::read(r)?;
                let sync_complete: bool = Readable::read(r)?;
 
-               // We expect the encoding_len to always includes the 1-byte
-               // encoding_type and that short_channel_ids are 8-bytes each
                let encoding_len: u16 = Readable::read(r)?;
-               if encoding_len == 0 || (encoding_len - 1) % 8 != 0 {
-                       return Err(DecodeError::InvalidValue);
-               }
+               let encoding_type: u8 = Readable::read(r)?;
 
                // Must be encoding_type=0 uncompressed serialization. We do not
                // support encoding_type=1 zlib serialization.
-               let encoding_type: u8 = Readable::read(r)?;
                if encoding_type != EncodingType::Uncompressed as u8 {
+                       return Err(DecodeError::UnsupportedCompression);
+               }
+
+               // We expect the encoding_len to always includes the 1-byte
+               // encoding_type and that short_channel_ids are 8-bytes each
+               if encoding_len == 0 || (encoding_len - 1) % 8 != 0 {
                        return Err(DecodeError::InvalidValue);
                }
 
@@ -1800,9 +1844,9 @@ impl Writeable for GossipTimestampFilter {
 #[cfg(test)]
 mod tests {
        use hex;
+       use ln::{PaymentPreimage, PaymentHash, PaymentSecret};
        use ln::msgs;
        use ln::msgs::{ChannelFeatures, FinalOnionHopData, InitFeatures, NodeFeatures, OptionalField, OnionErrorPacket, OnionHopDataFormat};
-       use ln::channelmanager::{PaymentPreimage, PaymentHash, PaymentSecret};
        use util::ser::{Writeable, Readable};
 
        use bitcoin::hashes::hex::FromHex;
@@ -1815,6 +1859,7 @@ mod tests {
        use bitcoin::secp256k1::key::{PublicKey,SecretKey};
        use bitcoin::secp256k1::{Secp256k1, Message};
 
+       use prelude::*;
        use std::io::Cursor;
 
        #[test]
@@ -2533,6 +2578,24 @@ mod tests {
                assert_eq!(msg.outgoing_cltv_value, 0xffffffff);
        }
 
+       #[test]
+       fn query_channel_range_end_blocknum() {
+               let tests: Vec<(u32, u32, u32)> = vec![
+                       (10000, 1500, 11500),
+                       (0, 0xffffffff, 0xffffffff),
+                       (1, 0xffffffff, 0xffffffff),
+               ];
+
+               for (first_blocknum, number_of_blocks, expected) in tests.into_iter() {
+                       let sut = msgs::QueryChannelRange {
+                               chain_hash: BlockHash::from_hex("06226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf188910f").unwrap(),
+                               first_blocknum,
+                               number_of_blocks,
+                       };
+                       assert_eq!(sut.end_blocknum(), expected);
+               }
+       }
+
        #[test]
        fn encoding_query_channel_range() {
                let mut query_channel_range = msgs::QueryChannelRange {