use ln::features::{ChannelFeatures, InitFeatures, NodeFeatures};
-use std::{cmp, fmt};
+use prelude::*;
+use core::{cmp, fmt};
+use core::fmt::Debug;
use std::io::Read;
-use util::events;
+use util::events::MessageSendEventsProvider;
use util::ser::{Readable, Writeable, Writer, FixedLengthReader, HighZeroBytesDroppedVarInt};
-use ln::channelmanager::{PaymentPreimage, PaymentHash, PaymentSecret};
+use ln::{PaymentPreimage, PaymentHash, PaymentSecret};
/// 21 million * 10^8 * 1000
pub(crate) const MAX_VALUE_MSAT: u64 = 21_000_000_0000_0000_000;
/// An error in decoding a message or struct.
-#[derive(Debug)]
+#[derive(Clone, Debug)]
pub enum DecodeError {
/// A version byte specified something we don't know how to handle.
/// Includes unknown realm byte in an OnionHopData packet
/// A length descriptor in the packet didn't describe the later data correctly
BadLengthDescriptor,
/// Error from std::io
- Io(::std::io::Error),
+ Io(/// (C-not exported) as ErrorKind doesn't have a reasonable mapping
+ ::std::io::ErrorKind),
+ /// The message included zlib-compressed values, which we don't support.
+ UnsupportedCompression,
}
/// An init message to be sent or received from a peer
+#[derive(Clone, Debug, PartialEq)]
pub struct Init {
- #[cfg(not(feature = "fuzztarget"))]
- pub(crate) features: InitFeatures,
- #[cfg(feature = "fuzztarget")]
+ /// The relevant features which the sender supports
pub features: InitFeatures,
}
/// An error message to be sent or received from a peer
-#[derive(Clone)]
+#[derive(Clone, Debug, PartialEq)]
pub struct ErrorMessage {
/// The channel ID involved in the error
pub channel_id: [u8; 32],
}
/// A ping message to be sent or received from a peer
+#[derive(Clone, Debug, PartialEq)]
pub struct Ping {
/// The desired response length
pub ponglen: u16,
}
/// A pong message to be sent or received from a peer
+#[derive(Clone, Debug, PartialEq)]
pub struct Pong {
/// The pong packet size.
/// This field is not sent on the wire. byteslen zeros are sent.
}
/// An open_channel message to be sent or received from a peer
-#[derive(Clone)]
+#[derive(Clone, Debug, PartialEq)]
pub struct OpenChannel {
/// The genesis hash of the blockchain where the channel is to be opened
pub chain_hash: BlockHash,
}
/// An accept_channel message to be sent or received from a peer
-#[derive(Clone)]
+#[derive(Clone, Debug, PartialEq)]
pub struct AcceptChannel {
/// A temporary channel ID, until the funding outpoint is announced
pub temporary_channel_id: [u8; 32],
}
/// A funding_created message to be sent or received from a peer
-#[derive(Clone)]
+#[derive(Clone, Debug, PartialEq)]
pub struct FundingCreated {
/// A temporary channel ID, until the funding is established
pub temporary_channel_id: [u8; 32],
}
/// A funding_signed message to be sent or received from a peer
-#[derive(Clone)]
+#[derive(Clone, Debug, PartialEq)]
pub struct FundingSigned {
/// The channel ID
pub channel_id: [u8; 32],
}
/// A funding_locked message to be sent or received from a peer
-#[derive(Clone, PartialEq)]
+#[derive(Clone, Debug, PartialEq)]
pub struct FundingLocked {
/// The channel ID
pub channel_id: [u8; 32],
}
/// A shutdown message to be sent or received from a peer
-#[derive(Clone, PartialEq)]
+#[derive(Clone, Debug, PartialEq)]
pub struct Shutdown {
/// The channel ID
pub channel_id: [u8; 32],
}
/// A closing_signed message to be sent or received from a peer
-#[derive(Clone, PartialEq)]
+#[derive(Clone, Debug, PartialEq)]
pub struct ClosingSigned {
/// The channel ID
pub channel_id: [u8; 32],
}
/// An update_add_htlc message to be sent or received from a peer
-#[derive(Clone, PartialEq)]
+#[derive(Clone, Debug, PartialEq)]
pub struct UpdateAddHTLC {
/// The channel ID
pub channel_id: [u8; 32],
}
/// An update_fulfill_htlc message to be sent or received from a peer
-#[derive(Clone, PartialEq)]
+#[derive(Clone, Debug, PartialEq)]
pub struct UpdateFulfillHTLC {
/// The channel ID
pub channel_id: [u8; 32],
}
/// An update_fail_htlc message to be sent or received from a peer
-#[derive(Clone, PartialEq)]
+#[derive(Clone, Debug, PartialEq)]
pub struct UpdateFailHTLC {
/// The channel ID
pub channel_id: [u8; 32],
}
/// An update_fail_malformed_htlc message to be sent or received from a peer
-#[derive(Clone, PartialEq)]
+#[derive(Clone, Debug, PartialEq)]
pub struct UpdateFailMalformedHTLC {
/// The channel ID
pub channel_id: [u8; 32],
}
/// A commitment_signed message to be sent or received from a peer
-#[derive(Clone, PartialEq)]
+#[derive(Clone, Debug, PartialEq)]
pub struct CommitmentSigned {
/// The channel ID
pub channel_id: [u8; 32],
}
/// A revoke_and_ack message to be sent or received from a peer
-#[derive(Clone, PartialEq)]
+#[derive(Clone, Debug, PartialEq)]
pub struct RevokeAndACK {
/// The channel ID
pub channel_id: [u8; 32],
}
/// An update_fee message to be sent or received from a peer
-#[derive(PartialEq, Clone)]
+#[derive(Clone, Debug, PartialEq)]
pub struct UpdateFee {
/// The channel ID
pub channel_id: [u8; 32],
pub feerate_per_kw: u32,
}
-#[derive(PartialEq, Clone)]
+#[derive(Clone, Debug, PartialEq)]
/// Proof that the sender knows the per-commitment secret of the previous commitment transaction.
/// This is used to convince the recipient that the channel is at a certain commitment
/// number even if they lost that data due to a local failure. Of course, the peer may lie
}
/// A channel_reestablish message to be sent or received from a peer
-#[derive(PartialEq, Clone)]
+#[derive(Clone, Debug, PartialEq)]
pub struct ChannelReestablish {
/// The channel ID
pub channel_id: [u8; 32],
}
/// An announcement_signatures message to be sent or received from a peer
-#[derive(PartialEq, Clone, Debug)]
+#[derive(Clone, Debug, PartialEq)]
pub struct AnnouncementSignatures {
/// The channel ID
pub channel_id: [u8; 32],
}
/// An address which can be used to connect to a remote peer
-#[derive(Clone, PartialEq, Debug)]
+#[derive(Clone, Debug, PartialEq)]
pub enum NetAddress {
/// An IPv4 address/port on which the peer is listening.
IPv4 {
},
}
impl NetAddress {
- fn get_id(&self) -> u8 {
+ /// Gets the ID of this address type. Addresses in node_announcement messages should be sorted
+ /// by this.
+ pub(crate) fn get_id(&self) -> u8 {
match self {
&NetAddress::IPv4 {..} => { 1 },
&NetAddress::IPv6 {..} => { 2 },
}
}
+impl Readable for NetAddress {
+ fn read<R: Read>(reader: &mut R) -> Result<NetAddress, DecodeError> {
+ match Readable::read(reader) {
+ Ok(Ok(res)) => Ok(res),
+ Ok(Err(_)) => Err(DecodeError::UnknownVersion),
+ Err(e) => Err(e),
+ }
+ }
+}
+
+
/// The unsigned part of a node_announcement
-#[derive(PartialEq, Clone, Debug)]
+#[derive(Clone, Debug, PartialEq)]
pub struct UnsignedNodeAnnouncement {
/// The advertised features
pub features: NodeFeatures,
pub(crate) excess_address_data: Vec<u8>,
pub(crate) excess_data: Vec<u8>,
}
-#[derive(PartialEq, Clone, Debug)]
+#[derive(Clone, Debug, PartialEq)]
/// A node_announcement message to be sent or received from a peer
pub struct NodeAnnouncement {
/// The signature by the node key
}
/// The unsigned part of a channel_announcement
-#[derive(PartialEq, Clone, Debug)]
+#[derive(Clone, Debug, PartialEq)]
pub struct UnsignedChannelAnnouncement {
/// The advertised channel features
pub features: ChannelFeatures,
pub(crate) excess_data: Vec<u8>,
}
/// A channel_announcement message to be sent or received from a peer
-#[derive(PartialEq, Clone, Debug)]
+#[derive(Clone, Debug, PartialEq)]
pub struct ChannelAnnouncement {
/// Authentication of the announcement by the first public node
pub node_signature_1: Signature,
}
/// The unsigned part of a channel_update
-#[derive(PartialEq, Clone, Debug)]
+#[derive(Clone, Debug, PartialEq)]
pub struct UnsignedChannelUpdate {
/// The genesis hash of the blockchain where the channel is to be opened
pub chain_hash: BlockHash,
pub timestamp: u32,
/// Channel flags
pub flags: u8,
- /// The number of blocks to subtract from incoming HTLC cltv_expiry values
+ /// The number of blocks such that if:
+ /// `incoming_htlc.cltv_expiry < outgoing_htlc.cltv_expiry + cltv_expiry_delta`
+ /// then we need to fail the HTLC backwards. When forwarding an HTLC, cltv_expiry_delta determines
+ /// the outgoing HTLC's minimum cltv_expiry value -- so, if an incoming HTLC comes in with a
+ /// cltv_expiry of 100000, and the node we're forwarding to has a cltv_expiry_delta value of 10,
+ /// then we'll check that the outgoing HTLC's cltv_expiry value is at least 100010 before
+ /// forwarding. Note that the HTLC sender is the one who originally sets this value when
+ /// constructing the route.
pub cltv_expiry_delta: u16,
/// The minimum HTLC size incoming to sender, in milli-satoshi
pub htlc_minimum_msat: u64,
pub(crate) excess_data: Vec<u8>,
}
/// A channel_update message to be sent or received from a peer
-#[derive(PartialEq, Clone, Debug)]
+#[derive(Clone, Debug, PartialEq)]
pub struct ChannelUpdate {
/// A signature of the channel update
pub signature: Signature,
pub contents: UnsignedChannelUpdate,
}
+/// A query_channel_range message is used to query a peer for channel
+/// UTXOs in a range of blocks. The recipient of a query makes a best
+/// effort to reply to the query using one or more reply_channel_range
+/// messages.
+#[derive(Clone, Debug, PartialEq)]
+pub struct QueryChannelRange {
+ /// The genesis hash of the blockchain being queried
+ pub chain_hash: BlockHash,
+ /// The height of the first block for the channel UTXOs being queried
+ pub first_blocknum: u32,
+ /// The number of blocks to include in the query results
+ pub number_of_blocks: u32,
+}
+
+/// A reply_channel_range message is a reply to a query_channel_range
+/// message. Multiple reply_channel_range messages can be sent in reply
+/// to a single query_channel_range message. The query recipient makes a
+/// best effort to respond based on their local network view which may
+/// not be a perfect view of the network. The short_channel_ids in the
+/// reply are encoded. We only support encoding_type=0 uncompressed
+/// serialization and do not support encoding_type=1 zlib serialization.
+#[derive(Clone, Debug, PartialEq)]
+pub struct ReplyChannelRange {
+ /// The genesis hash of the blockchain being queried
+ pub chain_hash: BlockHash,
+ /// The height of the first block in the range of the reply
+ pub first_blocknum: u32,
+ /// The number of blocks included in the range of the reply
+ pub number_of_blocks: u32,
+ /// True when this is the final reply for a query
+ pub sync_complete: bool,
+ /// The short_channel_ids in the channel range
+ pub short_channel_ids: Vec<u64>,
+}
+
+/// A query_short_channel_ids message is used to query a peer for
+/// routing gossip messages related to one or more short_channel_ids.
+/// The query recipient will reply with the latest, if available,
+/// channel_announcement, channel_update and node_announcement messages
+/// it maintains for the requested short_channel_ids followed by a
+/// reply_short_channel_ids_end message. The short_channel_ids sent in
+/// this query are encoded. We only support encoding_type=0 uncompressed
+/// serialization and do not support encoding_type=1 zlib serialization.
+#[derive(Clone, Debug, PartialEq)]
+pub struct QueryShortChannelIds {
+ /// The genesis hash of the blockchain being queried
+ pub chain_hash: BlockHash,
+ /// The short_channel_ids that are being queried
+ pub short_channel_ids: Vec<u64>,
+}
+
+/// A reply_short_channel_ids_end message is sent as a reply to a
+/// query_short_channel_ids message. The query recipient makes a best
+/// effort to respond based on their local network view which may not be
+/// a perfect view of the network.
+#[derive(Clone, Debug, PartialEq)]
+pub struct ReplyShortChannelIdsEnd {
+ /// The genesis hash of the blockchain that was queried
+ pub chain_hash: BlockHash,
+ /// Indicates if the query recipient maintains up-to-date channel
+ /// information for the chain_hash
+ pub full_information: bool,
+}
+
+/// A gossip_timestamp_filter message is used by a node to request
+/// gossip relay for messages in the requested time range when the
+/// gossip_queries feature has been negotiated.
+#[derive(Clone, Debug, PartialEq)]
+pub struct GossipTimestampFilter {
+ /// The genesis hash of the blockchain for channel and node information
+ pub chain_hash: BlockHash,
+ /// The starting unix timestamp
+ pub first_timestamp: u32,
+ /// The range of information in seconds
+ pub timestamp_range: u32,
+}
+
+/// Encoding type for data compression of collections in gossip queries.
+/// We do not support encoding_type=1 zlib serialization defined in BOLT #7.
+enum EncodingType {
+ Uncompressed = 0x00,
+}
+
/// Used to put an error message in a LightningError
-#[derive(Clone)]
+#[derive(Clone, Debug)]
pub enum ErrorAction {
/// The peer took some action which made us think they were useless. Disconnect them.
DisconnectPeer {
}
/// An Err type for failure to process messages.
+#[derive(Clone, Debug)]
pub struct LightningError {
/// A human-readable message describing the error
pub err: String,
/// Struct used to return values from revoke_and_ack messages, containing a bunch of commitment
/// transaction updates if they were pending.
-#[derive(PartialEq, Clone)]
+#[derive(Clone, Debug, PartialEq)]
pub struct CommitmentUpdate {
/// update_add_htlc messages which should be sent
pub update_add_htlcs: Vec<UpdateAddHTLC>,
/// The information we received from a peer along the route of a payment we originated. This is
/// returned by ChannelMessageHandler::handle_update_fail_htlc to be passed into
/// RoutingMessageHandler::handle_htlc_fail_channel_update to update our network map.
-#[derive(Clone)]
+#[derive(Clone, Debug, PartialEq)]
pub enum HTLCFailChannelUpdate {
/// We received an error which included a full ChannelUpdate message.
ChannelUpdateMessage {
/// OptionalFeild simply gets Present if there are enough bytes to read into it), we have a
/// separate enum type for them.
/// (C-not exported) due to a free generic in T
-#[derive(Clone, PartialEq, Debug)]
+#[derive(Clone, Debug, PartialEq)]
pub enum OptionalField<T> {
/// Optional field is included in message
Present(T),
///
/// Messages MAY be called in parallel when they originate from different their_node_ids, however
/// they MUST NOT be called in parallel when the two calls have the same their_node_id.
-pub trait ChannelMessageHandler : events::MessageSendEventsProvider + Send + Sync {
+pub trait ChannelMessageHandler : MessageSendEventsProvider {
//Channel init:
/// Handle an incoming open_channel message from the given peer.
fn handle_open_channel(&self, their_node_id: &PublicKey, their_features: InitFeatures, msg: &OpenChannel);
// Channl close:
/// Handle an incoming shutdown message from the given peer.
- fn handle_shutdown(&self, their_node_id: &PublicKey, msg: &Shutdown);
+ fn handle_shutdown(&self, their_node_id: &PublicKey, their_features: &InitFeatures, msg: &Shutdown);
/// Handle an incoming closing_signed message from the given peer.
fn handle_closing_signed(&self, their_node_id: &PublicKey, msg: &ClosingSigned);
/// Handle an incoming channel_reestablish message from the given peer.
fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &ChannelReestablish);
+ /// Handle an incoming channel update from the given peer.
+ fn handle_channel_update(&self, their_node_id: &PublicKey, msg: &ChannelUpdate);
+
// Error:
/// Handle an incoming error message from the given peer.
fn handle_error(&self, their_node_id: &PublicKey, msg: &ErrorMessage);
}
/// A trait to describe an object which can receive routing messages.
-pub trait RoutingMessageHandler : Send + Sync {
+///
+/// # Implementor DoS Warnings
+///
+/// For `gossip_queries` messages there are potential DoS vectors when handling
+/// inbound queries. Implementors using an on-disk network graph should be aware of
+/// repeated disk I/O for queries accessing different parts of the network graph.
+pub trait RoutingMessageHandler : MessageSendEventsProvider {
/// Handle an incoming node_announcement message, returning true if it should be forwarded on,
/// false or returning an Err otherwise.
fn handle_node_announcement(&self, msg: &NodeAnnouncement) -> Result<bool, LightningError>;
/// immediately higher (as defined by <PublicKey as Ord>::cmp) than starting_point.
/// If None is provided for starting_point, we start at the first node.
fn get_next_node_announcements(&self, starting_point: Option<&PublicKey>, batch_amount: u8) -> Vec<NodeAnnouncement>;
- /// Returns whether a full sync should be requested from a peer.
- fn should_request_full_sync(&self, node_id: &PublicKey) -> bool;
+ /// Called when a connection is established with a peer. This can be used to
+ /// perform routing table synchronization using a strategy defined by the
+ /// implementor.
+ fn sync_routing_table(&self, their_node_id: &PublicKey, init: &Init);
+ /// Handles the reply of a query we initiated to learn about channels
+ /// for a given range of blocks. We can expect to receive one or more
+ /// replies to a single query.
+ fn handle_reply_channel_range(&self, their_node_id: &PublicKey, msg: ReplyChannelRange) -> Result<(), LightningError>;
+ /// Handles the reply of a query we initiated asking for routing gossip
+ /// messages for a list of channels. We should receive this message when
+ /// a node has completed its best effort to send us the pertaining routing
+ /// gossip messages.
+ fn handle_reply_short_channel_ids_end(&self, their_node_id: &PublicKey, msg: ReplyShortChannelIdsEnd) -> Result<(), LightningError>;
+ /// Handles when a peer asks us to send a list of short_channel_ids
+ /// for the requested range of blocks.
+ fn handle_query_channel_range(&self, their_node_id: &PublicKey, msg: QueryChannelRange) -> Result<(), LightningError>;
+ /// Handles when a peer asks us to send routing gossip messages for a
+ /// list of short_channel_ids.
+ fn handle_query_short_channel_ids(&self, their_node_id: &PublicKey, msg: QueryShortChannelIds) -> Result<(), LightningError>;
}
mod fuzzy_internal_msgs {
- use ln::channelmanager::PaymentSecret;
+ use prelude::*;
+ use ln::PaymentSecret;
// These types aren't intended to be pub, but are exposed for direct fuzzing (as we deserialize
// them from untrusted input):
}
}
-#[derive(Clone, PartialEq)]
+impl fmt::Debug for OnionPacket {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.write_fmt(format_args!("OnionPacket version {} with hmac {:?}", self.version, &self.hmac[..]))
+ }
+}
+
+#[derive(Clone, Debug, PartialEq)]
pub(crate) struct OnionErrorPacket {
// This really should be a constant size slice, but the spec lets these things be up to 128KB?
// (TODO) We limit it in decode to much lower...
DecodeError::ShortRead => f.write_str("Packet extended beyond the provided bytes"),
DecodeError::BadLengthDescriptor => f.write_str("A length descriptor in the packet didn't describe the later data correctly"),
DecodeError::Io(ref e) => e.fmt(f),
+ DecodeError::UnsupportedCompression => f.write_str("We don't support receiving messages with zlib-compressed fields"),
}
}
}
-impl fmt::Debug for LightningError {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- f.write_str(self.err.as_str())
- }
-}
-
impl From<::std::io::Error> for DecodeError {
fn from(e: ::std::io::Error) -> Self {
if e.kind() == ::std::io::ErrorKind::UnexpectedEof {
DecodeError::ShortRead
} else {
- DecodeError::Io(e)
+ DecodeError::Io(e.kind())
}
}
}
(2, HighZeroBytesDroppedVarInt(self.amt_to_forward)),
(4, HighZeroBytesDroppedVarInt(self.outgoing_cltv_value)),
(6, short_channel_id)
- });
- },
- OnionHopDataFormat::FinalNode { payment_data: Some(ref final_data) } => {
- if final_data.total_msat > MAX_VALUE_MSAT { panic!("We should never be sending infinite/overflow onion payments"); }
- encode_varint_length_prefixed_tlv!(w, {
- (2, HighZeroBytesDroppedVarInt(self.amt_to_forward)),
- (4, HighZeroBytesDroppedVarInt(self.outgoing_cltv_value)),
- (8, final_data)
- });
+ }, { });
},
- OnionHopDataFormat::FinalNode { payment_data: None } => {
+ OnionHopDataFormat::FinalNode { ref payment_data } => {
+ if let Some(final_data) = payment_data {
+ if final_data.total_msat > MAX_VALUE_MSAT { panic!("We should never be sending infinite/overflow onion payments"); }
+ }
encode_varint_length_prefixed_tlv!(w, {
(2, HighZeroBytesDroppedVarInt(self.amt_to_forward)),
(4, HighZeroBytesDroppedVarInt(self.outgoing_cltv_value))
+ }, {
+ (8, payment_data)
});
},
}
impl Writeable for UnsignedChannelAnnouncement {
fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
- w.size_hint(2 + 2*32 + 4*33 + self.features.byte_count() + self.excess_data.len());
+ w.size_hint(2 + 32 + 8 + 4*33 + self.features.byte_count() + self.excess_data.len());
self.features.write(w)?;
self.chain_hash.write(w)?;
self.short_channel_id.write(w)?;
impl_writeable_len_match!(ChannelAnnouncement, {
{ ChannelAnnouncement { contents: UnsignedChannelAnnouncement {ref features, ref excess_data, ..}, .. },
- 2 + 2*32 + 4*33 + features.byte_count() + excess_data.len() + 4*64 }
+ 2 + 32 + 8 + 4*33 + features.byte_count() + excess_data.len() + 4*64 }
}, {
node_signature_1,
node_signature_2,
}
impl_writeable_len_match!(ChannelUpdate, {
- { ChannelUpdate { contents: UnsignedChannelUpdate {ref excess_data, ..}, .. },
- 64 + excess_data.len() + 64 }
+ { ChannelUpdate { contents: UnsignedChannelUpdate {ref excess_data, ref htlc_maximum_msat, ..}, .. },
+ 64 + 64 + excess_data.len() + if let OptionalField::Present(_) = htlc_maximum_msat { 8 } else { 0 } }
}, {
signature,
contents
impl Writeable for UnsignedNodeAnnouncement {
fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
- w.size_hint(64 + 76 + self.features.byte_count() + self.addresses.len()*38 + self.excess_address_data.len() + self.excess_data.len());
+ w.size_hint(76 + self.features.byte_count() + self.addresses.len()*38 + self.excess_address_data.len() + self.excess_data.len());
self.features.write(w)?;
self.timestamp.write(w)?;
self.node_id.write(w)?;
w.write_all(&self.rgb)?;
self.alias.write(w)?;
- let mut addrs_to_encode = self.addresses.clone();
- addrs_to_encode.sort_by(|a, b| { a.get_id().cmp(&b.get_id()) });
let mut addr_len = 0;
- for addr in &addrs_to_encode {
+ for addr in self.addresses.iter() {
addr_len += 1 + addr.len();
}
(addr_len + self.excess_address_data.len() as u16).write(w)?;
- for addr in addrs_to_encode {
+ for addr in self.addresses.iter() {
addr.write(w)?;
}
w.write_all(&self.excess_address_data[..])?;
let addr_len: u16 = Readable::read(r)?;
let mut addresses: Vec<NetAddress> = Vec::new();
- let mut highest_addr_type = 0;
let mut addr_readpos = 0;
let mut excess = false;
let mut excess_byte = 0;
if addr_len <= addr_readpos { break; }
match Readable::read(r) {
Ok(Ok(addr)) => {
- if addr.get_id() < highest_addr_type {
- // Addresses must be sorted in increasing order
- return Err(DecodeError::InvalidValue);
- }
- highest_addr_type = addr.get_id();
if addr_len < addr_readpos + 1 + addr.len() {
return Err(DecodeError::BadLengthDescriptor);
}
}
}
-impl_writeable_len_match!(NodeAnnouncement, {
+impl_writeable_len_match!(NodeAnnouncement, <=, {
{ NodeAnnouncement { contents: UnsignedNodeAnnouncement { ref features, ref addresses, ref excess_address_data, ref excess_data, ..}, .. },
64 + 76 + features.byte_count() + addresses.len()*(NetAddress::MAX_LEN as usize + 1) + excess_address_data.len() + excess_data.len() }
}, {
contents
});
+impl Readable for QueryShortChannelIds {
+ fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
+ let chain_hash: BlockHash = Readable::read(r)?;
+
+ let encoding_len: u16 = Readable::read(r)?;
+ let encoding_type: u8 = Readable::read(r)?;
+
+ // Must be encoding_type=0 uncompressed serialization. We do not
+ // support encoding_type=1 zlib serialization.
+ if encoding_type != EncodingType::Uncompressed as u8 {
+ return Err(DecodeError::UnsupportedCompression);
+ }
+
+ // We expect the encoding_len to always includes the 1-byte
+ // encoding_type and that short_channel_ids are 8-bytes each
+ if encoding_len == 0 || (encoding_len - 1) % 8 != 0 {
+ return Err(DecodeError::InvalidValue);
+ }
+
+ // Read short_channel_ids (8-bytes each), for the u16 encoding_len
+ // less the 1-byte encoding_type
+ let short_channel_id_count: u16 = (encoding_len - 1)/8;
+ let mut short_channel_ids = Vec::with_capacity(short_channel_id_count as usize);
+ for _ in 0..short_channel_id_count {
+ short_channel_ids.push(Readable::read(r)?);
+ }
+
+ Ok(QueryShortChannelIds {
+ chain_hash,
+ short_channel_ids,
+ })
+ }
+}
+
+impl Writeable for QueryShortChannelIds {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ // Calculated from 1-byte encoding_type plus 8-bytes per short_channel_id
+ let encoding_len: u16 = 1 + self.short_channel_ids.len() as u16 * 8;
+
+ w.size_hint(32 + 2 + encoding_len as usize);
+ self.chain_hash.write(w)?;
+ encoding_len.write(w)?;
+
+ // We only support type=0 uncompressed serialization
+ (EncodingType::Uncompressed as u8).write(w)?;
+
+ for scid in self.short_channel_ids.iter() {
+ scid.write(w)?;
+ }
+
+ Ok(())
+ }
+}
+
+impl Readable for ReplyShortChannelIdsEnd {
+ fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
+ let chain_hash: BlockHash = Readable::read(r)?;
+ let full_information: bool = Readable::read(r)?;
+ Ok(ReplyShortChannelIdsEnd {
+ chain_hash,
+ full_information,
+ })
+ }
+}
+
+impl Writeable for ReplyShortChannelIdsEnd {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ w.size_hint(32 + 1);
+ self.chain_hash.write(w)?;
+ self.full_information.write(w)?;
+ Ok(())
+ }
+}
+
+impl QueryChannelRange {
+ /**
+ * Calculates the overflow safe ending block height for the query.
+ * Overflow returns `0xffffffff`, otherwise returns `first_blocknum + number_of_blocks`
+ */
+ pub fn end_blocknum(&self) -> u32 {
+ match self.first_blocknum.checked_add(self.number_of_blocks) {
+ Some(block) => block,
+ None => u32::max_value(),
+ }
+ }
+}
+
+impl Readable for QueryChannelRange {
+ fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
+ let chain_hash: BlockHash = Readable::read(r)?;
+ let first_blocknum: u32 = Readable::read(r)?;
+ let number_of_blocks: u32 = Readable::read(r)?;
+ Ok(QueryChannelRange {
+ chain_hash,
+ first_blocknum,
+ number_of_blocks
+ })
+ }
+}
+
+impl Writeable for QueryChannelRange {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ w.size_hint(32 + 4 + 4);
+ self.chain_hash.write(w)?;
+ self.first_blocknum.write(w)?;
+ self.number_of_blocks.write(w)?;
+ Ok(())
+ }
+}
+
+impl Readable for ReplyChannelRange {
+ fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
+ let chain_hash: BlockHash = Readable::read(r)?;
+ let first_blocknum: u32 = Readable::read(r)?;
+ let number_of_blocks: u32 = Readable::read(r)?;
+ let sync_complete: bool = Readable::read(r)?;
+
+ let encoding_len: u16 = Readable::read(r)?;
+ let encoding_type: u8 = Readable::read(r)?;
+
+ // Must be encoding_type=0 uncompressed serialization. We do not
+ // support encoding_type=1 zlib serialization.
+ if encoding_type != EncodingType::Uncompressed as u8 {
+ return Err(DecodeError::UnsupportedCompression);
+ }
+
+ // We expect the encoding_len to always includes the 1-byte
+ // encoding_type and that short_channel_ids are 8-bytes each
+ if encoding_len == 0 || (encoding_len - 1) % 8 != 0 {
+ return Err(DecodeError::InvalidValue);
+ }
+
+ // Read short_channel_ids (8-bytes each), for the u16 encoding_len
+ // less the 1-byte encoding_type
+ let short_channel_id_count: u16 = (encoding_len - 1)/8;
+ let mut short_channel_ids = Vec::with_capacity(short_channel_id_count as usize);
+ for _ in 0..short_channel_id_count {
+ short_channel_ids.push(Readable::read(r)?);
+ }
+
+ Ok(ReplyChannelRange {
+ chain_hash,
+ first_blocknum,
+ number_of_blocks,
+ sync_complete,
+ short_channel_ids
+ })
+ }
+}
+
+impl Writeable for ReplyChannelRange {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ let encoding_len: u16 = 1 + self.short_channel_ids.len() as u16 * 8;
+ w.size_hint(32 + 4 + 4 + 1 + 2 + encoding_len as usize);
+ self.chain_hash.write(w)?;
+ self.first_blocknum.write(w)?;
+ self.number_of_blocks.write(w)?;
+ self.sync_complete.write(w)?;
+
+ encoding_len.write(w)?;
+ (EncodingType::Uncompressed as u8).write(w)?;
+ for scid in self.short_channel_ids.iter() {
+ scid.write(w)?;
+ }
+
+ Ok(())
+ }
+}
+
+impl Readable for GossipTimestampFilter {
+ fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
+ let chain_hash: BlockHash = Readable::read(r)?;
+ let first_timestamp: u32 = Readable::read(r)?;
+ let timestamp_range: u32 = Readable::read(r)?;
+ Ok(GossipTimestampFilter {
+ chain_hash,
+ first_timestamp,
+ timestamp_range,
+ })
+ }
+}
+
+impl Writeable for GossipTimestampFilter {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ w.size_hint(32 + 4 + 4);
+ self.chain_hash.write(w)?;
+ self.first_timestamp.write(w)?;
+ self.timestamp_range.write(w)?;
+ Ok(())
+ }
+}
+
+
#[cfg(test)]
mod tests {
use hex;
+ use ln::{PaymentPreimage, PaymentHash, PaymentSecret};
use ln::msgs;
use ln::msgs::{ChannelFeatures, FinalOnionHopData, InitFeatures, NodeFeatures, OptionalField, OnionErrorPacket, OnionHopDataFormat};
- use ln::channelmanager::{PaymentPreimage, PaymentHash, PaymentSecret};
use util::ser::{Writeable, Readable};
use bitcoin::hashes::hex::FromHex;
use bitcoin::secp256k1::key::{PublicKey,SecretKey};
use bitcoin::secp256k1::{Secp256k1, Message};
+ use prelude::*;
use std::io::Cursor;
#[test]
assert_eq!(msg.amt_to_forward, 0x0badf00d01020304);
assert_eq!(msg.outgoing_cltv_value, 0xffffffff);
}
+
+ #[test]
+ fn query_channel_range_end_blocknum() {
+ let tests: Vec<(u32, u32, u32)> = vec![
+ (10000, 1500, 11500),
+ (0, 0xffffffff, 0xffffffff),
+ (1, 0xffffffff, 0xffffffff),
+ ];
+
+ for (first_blocknum, number_of_blocks, expected) in tests.into_iter() {
+ let sut = msgs::QueryChannelRange {
+ chain_hash: BlockHash::from_hex("06226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf188910f").unwrap(),
+ first_blocknum,
+ number_of_blocks,
+ };
+ assert_eq!(sut.end_blocknum(), expected);
+ }
+ }
+
+ #[test]
+ fn encoding_query_channel_range() {
+ let mut query_channel_range = msgs::QueryChannelRange {
+ chain_hash: BlockHash::from_hex("06226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf188910f").unwrap(),
+ first_blocknum: 100000,
+ number_of_blocks: 1500,
+ };
+ let encoded_value = query_channel_range.encode();
+ let target_value = hex::decode("0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206000186a0000005dc").unwrap();
+ assert_eq!(encoded_value, target_value);
+
+ query_channel_range = Readable::read(&mut Cursor::new(&target_value[..])).unwrap();
+ assert_eq!(query_channel_range.first_blocknum, 100000);
+ assert_eq!(query_channel_range.number_of_blocks, 1500);
+ }
+
+ #[test]
+ fn encoding_reply_channel_range() {
+ do_encoding_reply_channel_range(0);
+ do_encoding_reply_channel_range(1);
+ }
+
+ fn do_encoding_reply_channel_range(encoding_type: u8) {
+ let mut target_value = hex::decode("0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206000b8a06000005dc01").unwrap();
+ let expected_chain_hash = BlockHash::from_hex("06226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf188910f").unwrap();
+ let mut reply_channel_range = msgs::ReplyChannelRange {
+ chain_hash: expected_chain_hash,
+ first_blocknum: 756230,
+ number_of_blocks: 1500,
+ sync_complete: true,
+ short_channel_ids: vec![0x000000000000008e, 0x0000000000003c69, 0x000000000045a6c4],
+ };
+
+ if encoding_type == 0 {
+ target_value.append(&mut hex::decode("001900000000000000008e0000000000003c69000000000045a6c4").unwrap());
+ let encoded_value = reply_channel_range.encode();
+ assert_eq!(encoded_value, target_value);
+
+ reply_channel_range = Readable::read(&mut Cursor::new(&target_value[..])).unwrap();
+ assert_eq!(reply_channel_range.chain_hash, expected_chain_hash);
+ assert_eq!(reply_channel_range.first_blocknum, 756230);
+ assert_eq!(reply_channel_range.number_of_blocks, 1500);
+ assert_eq!(reply_channel_range.sync_complete, true);
+ assert_eq!(reply_channel_range.short_channel_ids[0], 0x000000000000008e);
+ assert_eq!(reply_channel_range.short_channel_ids[1], 0x0000000000003c69);
+ assert_eq!(reply_channel_range.short_channel_ids[2], 0x000000000045a6c4);
+ } else {
+ target_value.append(&mut hex::decode("001601789c636000833e08659309a65878be010010a9023a").unwrap());
+ let result: Result<msgs::ReplyChannelRange, msgs::DecodeError> = Readable::read(&mut Cursor::new(&target_value[..]));
+ assert!(result.is_err(), "Expected decode failure with unsupported zlib encoding");
+ }
+ }
+
+ #[test]
+ fn encoding_query_short_channel_ids() {
+ do_encoding_query_short_channel_ids(0);
+ do_encoding_query_short_channel_ids(1);
+ }
+
+ fn do_encoding_query_short_channel_ids(encoding_type: u8) {
+ let mut target_value = hex::decode("0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206").unwrap();
+ let expected_chain_hash = BlockHash::from_hex("06226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf188910f").unwrap();
+ let mut query_short_channel_ids = msgs::QueryShortChannelIds {
+ chain_hash: expected_chain_hash,
+ short_channel_ids: vec![0x0000000000008e, 0x0000000000003c69, 0x000000000045a6c4],
+ };
+
+ if encoding_type == 0 {
+ target_value.append(&mut hex::decode("001900000000000000008e0000000000003c69000000000045a6c4").unwrap());
+ let encoded_value = query_short_channel_ids.encode();
+ assert_eq!(encoded_value, target_value);
+
+ query_short_channel_ids = Readable::read(&mut Cursor::new(&target_value[..])).unwrap();
+ assert_eq!(query_short_channel_ids.chain_hash, expected_chain_hash);
+ assert_eq!(query_short_channel_ids.short_channel_ids[0], 0x000000000000008e);
+ assert_eq!(query_short_channel_ids.short_channel_ids[1], 0x0000000000003c69);
+ assert_eq!(query_short_channel_ids.short_channel_ids[2], 0x000000000045a6c4);
+ } else {
+ target_value.append(&mut hex::decode("001601789c636000833e08659309a65878be010010a9023a").unwrap());
+ let result: Result<msgs::QueryShortChannelIds, msgs::DecodeError> = Readable::read(&mut Cursor::new(&target_value[..]));
+ assert!(result.is_err(), "Expected decode failure with unsupported zlib encoding");
+ }
+ }
+
+ #[test]
+ fn encoding_reply_short_channel_ids_end() {
+ let expected_chain_hash = BlockHash::from_hex("06226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf188910f").unwrap();
+ let mut reply_short_channel_ids_end = msgs::ReplyShortChannelIdsEnd {
+ chain_hash: expected_chain_hash,
+ full_information: true,
+ };
+ let encoded_value = reply_short_channel_ids_end.encode();
+ let target_value = hex::decode("0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e220601").unwrap();
+ assert_eq!(encoded_value, target_value);
+
+ reply_short_channel_ids_end = Readable::read(&mut Cursor::new(&target_value[..])).unwrap();
+ assert_eq!(reply_short_channel_ids_end.chain_hash, expected_chain_hash);
+ assert_eq!(reply_short_channel_ids_end.full_information, true);
+ }
+
+ #[test]
+ fn encoding_gossip_timestamp_filter(){
+ let expected_chain_hash = BlockHash::from_hex("06226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf188910f").unwrap();
+ let mut gossip_timestamp_filter = msgs::GossipTimestampFilter {
+ chain_hash: expected_chain_hash,
+ first_timestamp: 1590000000,
+ timestamp_range: 0xffff_ffff,
+ };
+ let encoded_value = gossip_timestamp_filter.encode();
+ let target_value = hex::decode("0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e22065ec57980ffffffff").unwrap();
+ assert_eq!(encoded_value, target_value);
+
+ gossip_timestamp_filter = Readable::read(&mut Cursor::new(&target_value[..])).unwrap();
+ assert_eq!(gossip_timestamp_filter.chain_hash, expected_chain_hash);
+ assert_eq!(gossip_timestamp_filter.first_timestamp, 1590000000);
+ assert_eq!(gossip_timestamp_filter.timestamp_range, 0xffff_ffff);
+ }
}