let mut channel_txn = Vec::new();
macro_rules! make_channel {
($source: expr, $dest: expr, $chan_id: expr) => { {
- $source.peer_connected(&$dest.get_our_node_id(), &Init { features: InitFeatures::known() });
- $dest.peer_connected(&$source.get_our_node_id(), &Init { features: InitFeatures::known() });
+ $source.peer_connected(&$dest.get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
+ $dest.peer_connected(&$source.get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
$source.create_channel($dest.get_our_node_id(), 100_000, 42, 0, None).unwrap();
let open_channel = {
},
0x0e => {
if chan_a_disconnected {
- nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init { features: InitFeatures::known() });
- nodes[1].peer_connected(&nodes[0].get_our_node_id(), &Init { features: InitFeatures::known() });
+ nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
+ nodes[1].peer_connected(&nodes[0].get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
chan_a_disconnected = false;
}
},
0x0f => {
if chan_b_disconnected {
- nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init { features: InitFeatures::known() });
- nodes[2].peer_connected(&nodes[1].get_our_node_id(), &Init { features: InitFeatures::known() });
+ nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
+ nodes[2].peer_connected(&nodes[1].get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
chan_b_disconnected = false;
}
},
// Next, make sure peers are all connected to each other
if chan_a_disconnected {
- nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init { features: InitFeatures::known() });
- nodes[1].peer_connected(&nodes[0].get_our_node_id(), &Init { features: InitFeatures::known() });
+ nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
+ nodes[1].peer_connected(&nodes[0].get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
chan_a_disconnected = false;
}
if chan_b_disconnected {
- nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init { features: InitFeatures::known() });
- nodes[2].peer_connected(&nodes[1].get_our_node_id(), &Init { features: InitFeatures::known() });
+ nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
+ nodes[2].peer_connected(&nodes[1].get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
chan_b_disconnected = false;
}
}
}
if new_id == 0 { return; }
- loss_detector.handler.new_outbound_connection(get_pubkey!(), Peer{id: (new_id - 1) as u8, peers_connected: &peers}).unwrap();
+ loss_detector.handler.new_outbound_connection(get_pubkey!(), Peer{id: (new_id - 1) as u8, peers_connected: &peers}, None).unwrap();
peers.borrow_mut()[new_id - 1] = true;
},
1 => {
}
}
if new_id == 0 { return; }
- loss_detector.handler.new_inbound_connection(Peer{id: (new_id - 1) as u8, peers_connected: &peers}).unwrap();
+ loss_detector.handler.new_inbound_connection(Peer{id: (new_id - 1) as u8, peers_connected: &peers}, None).unwrap();
peers.borrow_mut()[new_id - 1] = true;
},
2 => {
for i in 0..num_nodes {
for j in (i+1)..num_nodes {
- nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &Init { features: InitFeatures::known() });
- nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &Init { features: InitFeatures::known() });
+ nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
+ nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
}
}
use lightning::ln::peer_handler;
use lightning::ln::peer_handler::SocketDescriptor as LnSocketTrait;
use lightning::ln::peer_handler::CustomMessageHandler;
-use lightning::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler};
+use lightning::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, NetAddress};
use lightning::util::logger::Logger;
use std::task;
+use std::net::IpAddr;
use std::net::SocketAddr;
use std::net::TcpStream as StdTcpStream;
use std::sync::{Arc, Mutex};
RMH: RoutingMessageHandler + 'static + Send + Sync,
L: Logger + 'static + ?Sized + Send + Sync,
UMH: CustomMessageHandler + 'static + Send + Sync {
+ let ip_addr = stream.peer_addr().unwrap();
let (reader, write_receiver, read_receiver, us) = Connection::new(stream);
#[cfg(debug_assertions)]
let last_us = Arc::clone(&us);
- let handle_opt = if let Ok(_) = peer_manager.new_inbound_connection(SocketDescriptor::new(us.clone())) {
+ let handle_opt = if let Ok(_) = peer_manager.new_inbound_connection(SocketDescriptor::new(us.clone()), match ip_addr.ip() {
+ IpAddr::V4(ip) => Some(NetAddress::IPv4 {
+ addr: ip.octets(),
+ port: ip_addr.port(),
+ }),
+ IpAddr::V6(ip) => Some(NetAddress::IPv6 {
+ addr: ip.octets(),
+ port: ip_addr.port(),
+ }),
+ }) {
Some(tokio::spawn(Connection::schedule_read(peer_manager, us, reader, read_receiver, write_receiver)))
} else {
// Note that we will skip socket_disconnected here, in accordance with the PeerManager
RMH: RoutingMessageHandler + 'static + Send + Sync,
L: Logger + 'static + ?Sized + Send + Sync,
UMH: CustomMessageHandler + 'static + Send + Sync {
+ let ip_addr = stream.peer_addr().unwrap();
let (reader, mut write_receiver, read_receiver, us) = Connection::new(stream);
#[cfg(debug_assertions)]
let last_us = Arc::clone(&us);
-
- let handle_opt = if let Ok(initial_send) = peer_manager.new_outbound_connection(their_node_id, SocketDescriptor::new(us.clone())) {
+ let handle_opt = if let Ok(initial_send) = peer_manager.new_outbound_connection(their_node_id, SocketDescriptor::new(us.clone()), match ip_addr.ip() {
+ IpAddr::V4(ip) => Some(NetAddress::IPv4 {
+ addr: ip.octets(),
+ port: ip_addr.port(),
+ }),
+ IpAddr::V6(ip) => Some(NetAddress::IPv6 {
+ addr: ip.octets(),
+ port: ip_addr.port(),
+ }),
+ }) {
Some(tokio::spawn(async move {
// We should essentially always have enough room in a TCP socket buffer to send the
// initial 10s of bytes. However, tokio running in single-threaded mode will always
use bitcoin::blockdata::constants::genesis_block;
use bitcoin::hash_types::BlockHash;
use bitcoin::network::constants::Network;
-use chain::channelmonitor::ChannelMonitor;
+use chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor};
use chain::transaction::OutPoint;
use chain::{ChannelMonitorUpdateErr, Listen, Watch};
use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentSendFailure};
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
assert_eq!(reestablish_1.len(), 1);
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
assert_eq!(reestablish_2.len(), 1);
assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
assert_eq!(reestablish_1.len(), 1);
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
assert_eq!(reestablish_2.len(), 1);
commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
let as_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
let bs_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
assert!(as_reestablish == get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()));
assert!(bs_reestablish == get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()));
assert!(nodes[1].node.claim_funds(payment_preimage_1));
check_added_monitors!(nodes[1], 1);
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
let as_reconnect = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
let bs_reconnect = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
let as_reconnect = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
let bs_reconnect = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known() });
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
let as_connect_msg = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known() });
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
let bs_connect_msg = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_connect_msg);
claim_payment(&nodes[1], &[&nodes[0]], payment_preimage);
}
+#[test]
+fn test_fail_htlc_on_broadcast_after_claim() {
+ // In an earlier version of 7e78fa660cec8a73286c94c1073ee588140e7a01 we'd also fail the inbound
+ // channel backwards if we received an HTLC failure after a HTLC fulfillment. Here we test a
+ // specific case of that by having the HTLC failure come from the ChannelMonitor after a dust
+ // HTLC was not included in a confirmed commitment transaction.
+ //
+ // We first forward a payment, then claim it with an update_fulfill_htlc message, closing the
+ // channel immediately before commitment occurs. After the commitment transaction reaches
+ // ANTI_REORG_DELAY confirmations, will will try to fail the HTLC which was already fulfilled.
+ let chanmon_cfgs = create_chanmon_cfgs(3);
+ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+ let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+ create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+ let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()).2;
+
+ let payment_preimage = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 2000).0;
+
+ let bs_txn = get_local_commitment_txn!(nodes[2], chan_id_2);
+ assert_eq!(bs_txn.len(), 1);
+
+ nodes[2].node.claim_funds(payment_preimage);
+ check_added_monitors!(nodes[2], 1);
+ let cs_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]);
+ let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ check_added_monitors!(nodes[1], 1);
+ expect_payment_forwarded!(nodes[1], Some(1000), false);
+
+ mine_transaction(&nodes[1], &bs_txn[0]);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_broadcast!(nodes[1], true);
+ connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
+ check_added_monitors!(nodes[1], 1);
+ expect_pending_htlcs_forwardable!(nodes[1]);
+
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
+ expect_payment_sent_without_paths!(nodes[0], payment_preimage);
+ commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, true, true);
+ expect_payment_path_successful!(nodes[0]);
+}
+
fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) {
// In early versions we did not handle resending of update_fee on reconnect correctly. The
// chanmon_consistency fuzz target, of course, immediately found it, but we test a few cases
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known() });
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
let as_connect_msg = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known() });
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
let bs_connect_msg = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_connect_msg);
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
// Now reconnect the two
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
assert_eq!(reestablish_1.len(), 1);
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
assert_eq!(reestablish_2.len(), 1);
/// should be sent back to the counterparty node.
///
/// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
- pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel {
+ pub fn accept_inbound_channel(&mut self, user_id: u64) -> msgs::AcceptChannel {
if self.is_outbound() {
panic!("Tried to send accept_channel for an outbound channel?");
}
panic!("The inbound channel has already been accepted");
}
+ self.user_id = user_id;
self.inbound_awaiting_accept = false;
self.generate_accept_channel_message()
let mut node_b_chan = Channel::<EnforcingSigner>::new_from_req(&&feeest, &&keys_provider, node_b_node_id, &InitFeatures::known(), &open_channel_msg, 7, &config, 0, &&logger, 42).unwrap();
// Node B --> Node A: accept channel, explicitly setting B's dust limit.
- let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
+ let mut accept_channel_msg = node_b_chan.accept_inbound_channel(0);
accept_channel_msg.dust_limit_satoshis = 546;
node_a_chan.accept_channel(&accept_channel_msg, &config.peer_channel_config_limits, &InitFeatures::known()).unwrap();
node_a_chan.holder_dust_limit_satoshis = 1560;
let mut node_b_chan = Channel::<EnforcingSigner>::new_from_req(&&feeest, &&keys_provider, node_b_node_id, &InitFeatures::known(), &open_channel_msg, 7, &config, 0, &&logger, 42).unwrap();
// Node B --> Node A: accept channel
- let accept_channel_msg = node_b_chan.accept_inbound_channel();
+ let accept_channel_msg = node_b_chan.accept_inbound_channel(0);
node_a_chan.accept_channel(&accept_channel_msg, &config.peer_channel_config_limits, &InitFeatures::known()).unwrap();
// Node A --> Node B: funding created
///
/// The `temporary_channel_id` parameter indicates which inbound channel should be accepted.
///
- /// [`Event::OpenChannelRequest`]: crate::util::events::Event::OpenChannelRequest
- pub fn accept_inbound_channel(&self, temporary_channel_id: &[u8; 32]) -> Result<(), APIError> {
+ /// For inbound channels, the `user_channel_id` parameter will be provided back in
+ /// [`Event::ChannelClosed::user_channel_id`] to allow tracking of which events correspond
+ /// with which `accept_inbound_channel` call.
+ ///
+ /// [`Event::OpenChannelRequest`]: events::Event::OpenChannelRequest
+ /// [`Event::ChannelClosed::user_channel_id`]: events::Event::ChannelClosed::user_channel_id
+ pub fn accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], user_channel_id: u64) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let mut channel_state_lock = self.channel_state.lock().unwrap();
}
channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
node_id: channel.get().get_counterparty_node_id(),
- msg: channel.get_mut().accept_inbound_channel(),
+ msg: channel.get_mut().accept_inbound_channel(user_channel_id),
});
}
hash_map::Entry::Vacant(_) => {
if !self.default_configuration.manually_accept_inbound_channels {
channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
node_id: counterparty_node_id.clone(),
- msg: channel.accept_inbound_channel(),
+ msg: channel.accept_inbound_channel(0),
});
} else {
let mut pending_events = self.pending_events.lock().unwrap();
let payer_pubkey = nodes[0].node.get_our_node_id();
let payee_pubkey = nodes[1].node.get_our_node_id();
- nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known() });
- nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known() });
+ nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
+ nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known());
let route_params = RouteParameters {
let payer_pubkey = nodes[0].node.get_our_node_id();
let payee_pubkey = nodes[1].node.get_our_node_id();
- nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known() });
- nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known() });
+ nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
+ nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known());
let route_params = RouteParameters {
});
let node_b_holder = NodeHolder { node: &node_b };
- node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: InitFeatures::known() });
- node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: InitFeatures::known() });
+ node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
+ node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
node_a.create_channel(node_b.get_our_node_id(), 8_000_000, 100_000_000, 42, None).unwrap();
node_b.handle_open_channel(&node_a.get_our_node_id(), InitFeatures::known(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id()));
node_a.handle_accept_channel(&node_b.get_our_node_id(), InitFeatures::known(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id()));
for i in 0..node_count {
for j in (i+1)..node_count {
- nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &msgs::Init { features: cfgs[j].features.clone() });
- nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &msgs::Init { features: cfgs[i].features.clone() });
+ nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &msgs::Init { features: cfgs[j].features.clone(), remote_network_address: None });
+ nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &msgs::Init { features: cfgs[i].features.clone(), remote_network_address: None });
}
}
/// pending_htlc_adds includes both the holding cell and in-flight update_add_htlcs, whereas
/// for claims/fails they are separated out.
pub fn reconnect_nodes<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, send_funding_locked: (bool, bool), pending_htlc_adds: (i64, i64), pending_htlc_claims: (usize, usize), pending_htlc_fails: (usize, usize), pending_cell_htlc_claims: (usize, usize), pending_cell_htlc_fails: (usize, usize), pending_raa: (bool, bool)) {
- node_a.node.peer_connected(&node_b.node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ node_a.node.peer_connected(&node_b.node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
let reestablish_1 = get_chan_reestablish_msgs!(node_a, node_b);
- node_b.node.peer_connected(&node_a.node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ node_b.node.peer_connected(&node_a.node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
let reestablish_2 = get_chan_reestablish_msgs!(node_b, node_a);
if send_funding_locked.0 {
let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
assert!(events_2.is_empty());
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
let as_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
let bs_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
// nodes[0] hasn't yet received a funding_locked, so it only sends that on reconnect.
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
assert_eq!(reestablish_1.len(), 1);
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
assert_eq!(reestablish_2.len(), 1);
assert_eq!(nodes[0].node.list_channels().len(), 1);
check_added_monitors!(nodes[0], 1);
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
assert_eq!(nodes[0].node.list_channels().len(), 1);
check_added_monitors!(nodes[0], 1);
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
//... and we can even still claim the payment!
claim_payment(&nodes[2], &[&nodes[0], &nodes[1]], our_payment_preimage);
- nodes[3].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ nodes[3].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
let reestablish = get_event_msg!(nodes[3], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
- nodes[0].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ nodes[0].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
nodes[0].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish);
let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(msg_events.len(), 1);
//Disconnect and Reconnect
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
assert_eq!(reestablish_1.len(), 1);
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
assert_eq!(reestablish_2.len(), 1);
nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
check_added_monitors!(nodes[0], 1);
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
let reestablish_0 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
}
}
// Reconnect peers
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
assert_eq!(reestablish_1.len(), 3);
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
assert_eq!(reestablish_2.len(), 3);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, Some(manually_accept_conf)).unwrap();
+ let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, Some(manually_accept_conf)).unwrap();
let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &res);
let events = nodes[1].node.get_and_clear_pending_events();
match events[0] {
Event::OpenChannelRequest { temporary_channel_id, .. } => {
- nodes[1].node.accept_inbound_channel(&temporary_channel_id).unwrap();
+ nodes[1].node.accept_inbound_channel(&temporary_channel_id, 23).unwrap();
}
_ => panic!("Unexpected event"),
}
}
_ => panic!("Unexpected event"),
}
+
+ nodes[1].node.force_close_channel(&temp_channel_id).unwrap();
+
+ let close_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(close_msg_ev.len(), 1);
+
+ let events = nodes[1].node.get_and_clear_pending_events();
+ match events[0] {
+ Event::ChannelClosed { user_channel_id, .. } => {
+ assert_eq!(user_channel_id, 23);
+ }
+ _ => panic!("Unexpected event"),
+ }
}
#[test]
let events = nodes[1].node.get_and_clear_pending_events();
match events[0] {
Event::OpenChannelRequest { temporary_channel_id, .. } => {
- nodes[1].node.accept_inbound_channel(&temporary_channel_id).unwrap();
- let api_res = nodes[1].node.accept_inbound_channel(&temporary_channel_id);
+ nodes[1].node.accept_inbound_channel(&temporary_channel_id, 0).unwrap();
+ let api_res = nodes[1].node.accept_inbound_channel(&temporary_channel_id, 0);
match api_res {
Err(APIError::APIMisuseError { err }) => {
assert_eq!(err, "The channel isn't currently awaiting to be accepted.");
let node = create_network(1, &node_cfg, &node_chanmgr)[0].node;
let unknown_channel_id = [0; 32];
- let api_res = node.accept_inbound_channel(&unknown_channel_id);
+ let api_res = node.accept_inbound_channel(&unknown_channel_id, 0);
match api_res {
Err(APIError::ChannelUnavailable { err }) => {
assert_eq!(err, "Can't accept a channel that doesn't exist");
let payer_pubkey = nodes[0].node.get_our_node_id();
let payee_pubkey = nodes[1].node.get_our_node_id();
- nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known() });
- nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known() });
+ nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
+ nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known());
let route_params = RouteParameters {
pub struct Init {
/// The relevant features which the sender supports
pub features: InitFeatures,
+ /// The receipient's network address. This adds the option to report a remote IP address
+ /// back to a connecting peer using the init message. A node can decide to use that information
+ /// to discover a potential update to its public IPv4 address (NAT) and use
+ /// that for a node_announcement update message containing the new address.
+ pub remote_network_address: Option<NetAddress>,
}
/// An error message to be sent or received from a peer
// global_features gets the bottom 13 bits of our features, and local_features gets all of
// our relevant feature bits. This keeps us compatible with old nodes.
self.features.write_up_to_13(w)?;
- self.features.write(w)
+ self.features.write(w)?;
+ encode_tlv_stream!(w, {
+ (3, self.remote_network_address, option)
+ });
+ Ok(())
}
}
fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
let global_features: InitFeatures = Readable::read(r)?;
let features: InitFeatures = Readable::read(r)?;
+ let mut remote_network_address: Option<NetAddress> = None;
+ decode_tlv_stream!(r, {
+ (3, remote_network_address, option)
+ });
Ok(Init {
features: features.or(global_features),
+ remote_network_address,
})
}
}
fn encoding_init() {
assert_eq!(msgs::Init {
features: InitFeatures::from_le_bytes(vec![0xFF, 0xFF, 0xFF]),
+ remote_network_address: None,
}.encode(), hex::decode("00023fff0003ffffff").unwrap());
assert_eq!(msgs::Init {
features: InitFeatures::from_le_bytes(vec![0xFF]),
+ remote_network_address: None,
}.encode(), hex::decode("0001ff0001ff").unwrap());
assert_eq!(msgs::Init {
features: InitFeatures::from_le_bytes(vec![]),
+ remote_network_address: None,
}.encode(), hex::decode("00000000").unwrap());
+
+ let init_msg = msgs::Init { features: InitFeatures::from_le_bytes(vec![]),
+ remote_network_address: Some(msgs::NetAddress::IPv4 {
+ addr: [127, 0, 0, 1],
+ port: 1000,
+ }),
+ };
+ let encoded_value = init_msg.encode();
+ let target_value = hex::decode("000000000307017f00000103e8").unwrap();
+ assert_eq!(encoded_value, target_value);
+ assert_eq!(msgs::Init::read(&mut Cursor::new(&target_value)).unwrap(), init_msg);
}
#[test]
assert_eq!(as_broadcasted_txn[0], as_commitment_tx);
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known()});
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
// Now nodes[1] should send a channel reestablish, which nodes[0] will respond to with an
// error, as the channel has hit the chain.
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known()});
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
let bs_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
let as_err = nodes[0].node.get_and_clear_pending_msg_events();
use ln::features::InitFeatures;
use ln::msgs;
-use ln::msgs::{ChannelMessageHandler, LightningError, RoutingMessageHandler};
+use ln::msgs::{ChannelMessageHandler, LightningError, NetAddress, RoutingMessageHandler};
use ln::channelmanager::{SimpleArcChannelManager, SimpleRefChannelManager};
use util::ser::{VecWriter, Writeable, Writer};
use ln::peer_channel_encryptor::{PeerChannelEncryptor,NextNoiseStep};
channel_encryptor: PeerChannelEncryptor,
their_node_id: Option<PublicKey>,
their_features: Option<InitFeatures>,
+ their_net_address: Option<NetAddress>,
pending_outbound_buffer: LinkedList<Vec<u8>>,
pending_outbound_buffer_first_msg_offset: usize,
}
}
+/// A function used to filter out local or private addresses
+/// https://www.iana.org./assignments/ipv4-address-space/ipv4-address-space.xhtml
+/// https://www.iana.org/assignments/ipv6-address-space/ipv6-address-space.xhtml
+fn filter_addresses(ip_address: Option<NetAddress>) -> Option<NetAddress> {
+ match ip_address{
+ // For IPv4 range 10.0.0.0 - 10.255.255.255 (10/8)
+ Some(NetAddress::IPv4{addr: [10, _, _, _], port: _}) => None,
+ // For IPv4 range 0.0.0.0 - 0.255.255.255 (0/8)
+ Some(NetAddress::IPv4{addr: [0, _, _, _], port: _}) => None,
+ // For IPv4 range 100.64.0.0 - 100.127.255.255 (100.64/10)
+ Some(NetAddress::IPv4{addr: [100, 64..=127, _, _], port: _}) => None,
+ // For IPv4 range 127.0.0.0 - 127.255.255.255 (127/8)
+ Some(NetAddress::IPv4{addr: [127, _, _, _], port: _}) => None,
+ // For IPv4 range 169.254.0.0 - 169.254.255.255 (169.254/16)
+ Some(NetAddress::IPv4{addr: [169, 254, _, _], port: _}) => None,
+ // For IPv4 range 172.16.0.0 - 172.31.255.255 (172.16/12)
+ Some(NetAddress::IPv4{addr: [172, 16..=31, _, _], port: _}) => None,
+ // For IPv4 range 192.168.0.0 - 192.168.255.255 (192.168/16)
+ Some(NetAddress::IPv4{addr: [192, 168, _, _], port: _}) => None,
+ // For IPv4 range 192.88.99.0 - 192.88.99.255 (192.88.99/24)
+ Some(NetAddress::IPv4{addr: [192, 88, 99, _], port: _}) => None,
+ // For IPv6 range 2000:0000:0000:0000:0000:0000:0000:0000 - 3fff:ffff:ffff:ffff:ffff:ffff:ffff:ffff (2000::/3)
+ Some(NetAddress::IPv6{addr: [0x20..=0x3F, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _], port: _}) => ip_address,
+ // For remaining addresses
+ Some(NetAddress::IPv6{addr: _, port: _}) => None,
+ Some(..) => ip_address,
+ None => None,
+ }
+}
+
impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> PeerManager<Descriptor, CM, RM, L, CMH> where
CM::Target: ChannelMessageHandler,
RM::Target: RoutingMessageHandler,
SecretKey::from_slice(&Sha256::from_engine(ephemeral_hash).into_inner()).expect("You broke SHA-256!")
}
- /// Indicates a new outbound connection has been established to a node with the given node_id.
+ /// Indicates a new outbound connection has been established to a node with the given node_id
+ /// and an optional remote network address.
+ ///
+ /// The remote network address adds the option to report a remote IP address back to a connecting
+ /// peer using the init message.
+ /// The user should pass the remote network address of the host they are connected to.
+ ///
/// Note that if an Err is returned here you MUST NOT call socket_disconnected for the new
/// descriptor but must disconnect the connection immediately.
///
/// [`socket_disconnected()`].
///
/// [`socket_disconnected()`]: PeerManager::socket_disconnected
- pub fn new_outbound_connection(&self, their_node_id: PublicKey, descriptor: Descriptor) -> Result<Vec<u8>, PeerHandleError> {
+ pub fn new_outbound_connection(&self, their_node_id: PublicKey, descriptor: Descriptor, remote_network_address: Option<NetAddress>) -> Result<Vec<u8>, PeerHandleError> {
let mut peer_encryptor = PeerChannelEncryptor::new_outbound(their_node_id.clone(), self.get_ephemeral_key());
let res = peer_encryptor.get_act_one().to_vec();
let pending_read_buffer = [0; 50].to_vec(); // Noise act two is 50 bytes
channel_encryptor: peer_encryptor,
their_node_id: None,
their_features: None,
+ their_net_address: remote_network_address,
pending_outbound_buffer: LinkedList::new(),
pending_outbound_buffer_first_msg_offset: 0,
Ok(res)
}
- /// Indicates a new inbound connection has been established.
+ /// Indicates a new inbound connection has been established to a node with an optional remote
+ /// network address.
+ ///
+ /// The remote network address adds the option to report a remote IP address back to a connecting
+ /// peer using the init message.
+ /// The user should pass the remote network address of the host they are connected to.
///
/// May refuse the connection by returning an Err, but will never write bytes to the remote end
/// (outbound connector always speaks first). Note that if an Err is returned here you MUST NOT
/// [`socket_disconnected()`].
///
/// [`socket_disconnected()`]: PeerManager::socket_disconnected
- pub fn new_inbound_connection(&self, descriptor: Descriptor) -> Result<(), PeerHandleError> {
+ pub fn new_inbound_connection(&self, descriptor: Descriptor, remote_network_address: Option<NetAddress>) -> Result<(), PeerHandleError> {
let peer_encryptor = PeerChannelEncryptor::new_inbound(&self.our_node_secret);
let pending_read_buffer = [0; 50].to_vec(); // Noise act one is 50 bytes
channel_encryptor: peer_encryptor,
their_node_id: None,
their_features: None,
+ their_net_address: remote_network_address,
pending_outbound_buffer: LinkedList::new(),
pending_outbound_buffer_first_msg_offset: 0,
peer.their_node_id = Some(their_node_id);
insert_node_id!();
let features = InitFeatures::known();
- let resp = msgs::Init { features };
+ let resp = msgs::Init { features, remote_network_address: filter_addresses(peer.their_net_address.clone())};
self.enqueue_message(peer, &resp);
peer.awaiting_pong_timer_tick_intervals = 0;
},
peer.their_node_id = Some(their_node_id);
insert_node_id!();
let features = InitFeatures::known();
- let resp = msgs::Init { features };
+ let resp = msgs::Init { features, remote_network_address: filter_addresses(peer.their_net_address.clone())};
self.enqueue_message(peer, &resp);
peer.awaiting_pong_timer_tick_intervals = 0;
},
#[cfg(test)]
mod tests {
- use ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler};
+ use ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler, filter_addresses};
use ln::msgs;
+ use ln::msgs::NetAddress;
use util::events;
use util::test_utils;
let a_id = PublicKey::from_secret_key(&secp_ctx, &peer_a.our_node_secret);
let mut fd_a = FileDescriptor { fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())) };
let mut fd_b = FileDescriptor { fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())) };
- let initial_data = peer_b.new_outbound_connection(a_id, fd_b.clone()).unwrap();
- peer_a.new_inbound_connection(fd_a.clone()).unwrap();
+ let initial_data = peer_b.new_outbound_connection(a_id, fd_b.clone(), None).unwrap();
+ peer_a.new_inbound_connection(fd_a.clone(), None).unwrap();
assert_eq!(peer_a.read_event(&mut fd_a, &initial_data).unwrap(), false);
peer_a.process_events();
assert_eq!(peer_b.read_event(&mut fd_b, &fd_a.outbound_data.lock().unwrap().split_off(0)).unwrap(), false);
let a_id = PublicKey::from_secret_key(&secp_ctx, &peers[0].our_node_secret);
let mut fd_a = FileDescriptor { fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())) };
let mut fd_b = FileDescriptor { fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())) };
- let initial_data = peers[1].new_outbound_connection(a_id, fd_b.clone()).unwrap();
- peers[0].new_inbound_connection(fd_a.clone()).unwrap();
+ let initial_data = peers[1].new_outbound_connection(a_id, fd_b.clone(), None).unwrap();
+ peers[0].new_inbound_connection(fd_a.clone(), None).unwrap();
// If we get a single timer tick before completion, that's fine
assert_eq!(peers[0].peers.lock().unwrap().peers.len(), 1);
assert!(peers[0].read_event(&mut fd_a, &fd_b.outbound_data.lock().unwrap().split_off(0)).is_err());
}
+
+ #[test]
+ fn test_filter_addresses(){
+ // Tests the filter_addresses function.
+
+ // For (10/8)
+ let ip_address = NetAddress::IPv4{addr: [10, 0, 0, 0], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+ let ip_address = NetAddress::IPv4{addr: [10, 0, 255, 201], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+ let ip_address = NetAddress::IPv4{addr: [10, 255, 255, 255], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+
+ // For (0/8)
+ let ip_address = NetAddress::IPv4{addr: [0, 0, 0, 0], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+ let ip_address = NetAddress::IPv4{addr: [0, 0, 255, 187], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+ let ip_address = NetAddress::IPv4{addr: [0, 255, 255, 255], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+
+ // For (100.64/10)
+ let ip_address = NetAddress::IPv4{addr: [100, 64, 0, 0], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+ let ip_address = NetAddress::IPv4{addr: [100, 78, 255, 0], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+ let ip_address = NetAddress::IPv4{addr: [100, 127, 255, 255], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+
+ // For (127/8)
+ let ip_address = NetAddress::IPv4{addr: [127, 0, 0, 0], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+ let ip_address = NetAddress::IPv4{addr: [127, 65, 73, 0], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+ let ip_address = NetAddress::IPv4{addr: [127, 255, 255, 255], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+
+ // For (169.254/16)
+ let ip_address = NetAddress::IPv4{addr: [169, 254, 0, 0], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+ let ip_address = NetAddress::IPv4{addr: [169, 254, 221, 101], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+ let ip_address = NetAddress::IPv4{addr: [169, 254, 255, 255], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+
+ // For (172.16/12)
+ let ip_address = NetAddress::IPv4{addr: [172, 16, 0, 0], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+ let ip_address = NetAddress::IPv4{addr: [172, 27, 101, 23], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+ let ip_address = NetAddress::IPv4{addr: [172, 31, 255, 255], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+
+ // For (192.168/16)
+ let ip_address = NetAddress::IPv4{addr: [192, 168, 0, 0], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+ let ip_address = NetAddress::IPv4{addr: [192, 168, 205, 159], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+ let ip_address = NetAddress::IPv4{addr: [192, 168, 255, 255], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+
+ // For (192.88.99/24)
+ let ip_address = NetAddress::IPv4{addr: [192, 88, 99, 0], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+ let ip_address = NetAddress::IPv4{addr: [192, 88, 99, 140], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+ let ip_address = NetAddress::IPv4{addr: [192, 88, 99, 255], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+
+ // For other IPv4 addresses
+ let ip_address = NetAddress::IPv4{addr: [188, 255, 99, 0], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone()));
+ let ip_address = NetAddress::IPv4{addr: [123, 8, 129, 14], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone()));
+ let ip_address = NetAddress::IPv4{addr: [2, 88, 9, 255], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone()));
+
+ // For (2000::/3)
+ let ip_address = NetAddress::IPv6{addr: [32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone()));
+ let ip_address = NetAddress::IPv6{addr: [45, 34, 209, 190, 0, 123, 55, 34, 0, 0, 3, 27, 201, 0, 0, 0], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone()));
+ let ip_address = NetAddress::IPv6{addr: [63, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone()));
+
+ // For other IPv6 addresses
+ let ip_address = NetAddress::IPv6{addr: [24, 240, 12, 32, 0, 0, 0, 0, 20, 97, 0, 32, 121, 254, 0, 0], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+ let ip_address = NetAddress::IPv6{addr: [68, 23, 56, 63, 0, 0, 2, 7, 75, 109, 0, 39, 0, 0, 0, 0], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+ let ip_address = NetAddress::IPv6{addr: [101, 38, 140, 230, 100, 0, 30, 98, 0, 26, 0, 0, 57, 96, 0, 0], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+
+ // For (None)
+ assert_eq!(filter_addresses(None), None);
+ }
}
check_added_monitors!(nodes[1], 2);
nodes[1].node = &nodes_1_deserialized;
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known() });
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
let as_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
let bs_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish);
get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
- nodes[1].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known() });
- nodes[2].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ nodes[1].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
+ nodes[2].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
let bs_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[2].node.get_our_node_id());
let cs_reestablish = get_event_msg!(nodes[2], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
nodes[2].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
let node_0_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
let node_1_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_reestablish);
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
let node_1_2nd_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
if recv_count == 0 {
// If all closing_signeds weren't delivered we can just resume where we left off...
let node_0_2nd_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
let mut reader = io::Cursor::new(buffer);
let decoded_msg = read(&mut reader, &IgnoringMessageHandler{}).unwrap();
match decoded_msg {
- Message::Init(msgs::Init { features }) => {
+ Message::Init(msgs::Init { features, .. }) => {
assert!(features.supports_variable_length_onion());
assert!(features.supports_upfront_shutdown_script());
assert!(features.supports_gossip_queries());
return ();
}
- // Send a gossip_timestamp_filter to enable gossip message receipt. Note that we have to
- // use a "all timestamps" filter as sending the current timestamp would result in missing
- // gossip messages that are simply sent late. We could calculate the intended filter time
- // by looking at the current time and subtracting two weeks (before which we'll reject
- // messages), but there's not a lot of reason to bother - our peers should be discarding
- // the same messages.
+ // The lightning network's gossip sync system is completely broken in numerous ways.
+ //
+ // Given no broadly-available set-reconciliation protocol, the only reasonable approach is
+ // to do a full sync from the first few peers we connect to, and then receive gossip
+ // updates from all our peers normally.
+ //
+ // Originally, we could simply tell a peer to dump us the entire gossip table on startup,
+ // wasting lots of bandwidth but ensuring we have the full network graph. After the initial
+ // dump peers would always send gossip and we'd stay up-to-date with whatever our peer has
+ // seen.
+ //
+ // In order to reduce the bandwidth waste, "gossip queries" were introduced, allowing you
+ // to ask for the SCIDs of all channels in your peer's routing graph, and then only request
+ // channel data which you are missing. Except there was no way at all to identify which
+ // `channel_update`s you were missing, so you still had to request everything, just in a
+ // very complicated way with some queries instead of just getting the dump.
+ //
+ // Later, an option was added to fetch the latest timestamps of the `channel_update`s to
+ // make efficient sync possible, however it has yet to be implemented in lnd, which makes
+ // relying on it useless.
+ //
+ // After gossip queries were introduced, support for receiving a full gossip table dump on
+ // connection was removed from several nodes, making it impossible to get a full sync
+ // without using the "gossip queries" messages.
+ //
+ // Once you opt into "gossip queries" the only way to receive any gossip updates that a
+ // peer receives after you connect, you must send a `gossip_timestamp_filter` message. This
+ // message, as the name implies, tells the peer to not forward any gossip messages with a
+ // timestamp older than a given value (not the time the peer received the filter, but the
+ // timestamp in the update message, which is often hours behind when the peer received the
+ // message).
+ //
+ // Obnoxiously, `gossip_timestamp_filter` isn't *just* a filter, but its also a request for
+ // your peer to send you the full routing graph (subject to the filter). Thus, in order to
+ // tell a peer to send you any updates as it sees them, you have to also ask for the full
+ // routing graph to be synced. If you set a timestamp filter near the current time, peers
+ // will simply not forward any new updates they see to you which were generated some time
+ // ago (which is not uncommon). If you instead set a timestamp filter near 0 (or two weeks
+ // ago), you will always get the full routing graph from all your peers.
+ //
+ // Most lightning nodes today opt to simply turn off receiving gossip data which only
+ // propagated some time after it was generated, and, worse, often disable gossiping with
+ // several peers after their first connection. The second behavior can cause gossip to not
+ // propagate fully if there are cuts in the gossiping subgraph.
+ //
+ // In an attempt to cut a middle ground between always fetching the full graph from all of
+ // our peers and never receiving gossip from peers at all, we send all of our peers a
+ // `gossip_timestamp_filter`, with the filter time set either two weeks ago or an hour ago.
+ //
+ // For no-std builds, we bury our head in the sand and do a full sync on each connection.
+ let should_request_full_sync = self.should_request_full_sync(&their_node_id);
+ #[allow(unused_mut, unused_assignments)]
+ let mut gossip_start_time = 0;
+ #[cfg(feature = "std")]
+ {
+ gossip_start_time = SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs();
+ if should_request_full_sync {
+ gossip_start_time -= 60 * 60 * 24 * 7 * 2; // 2 weeks ago
+ } else {
+ gossip_start_time -= 60 * 60; // an hour ago
+ }
+ }
+
let mut pending_events = self.pending_events.lock().unwrap();
pending_events.push(MessageSendEvent::SendGossipTimestampFilter {
node_id: their_node_id.clone(),
msg: GossipTimestampFilter {
chain_hash: self.network_graph.genesis_hash,
- first_timestamp: 0,
+ first_timestamp: gossip_start_time as u32, // 2106 issue!
timestamp_range: u32::max_value(),
},
});
-
- // Check if we need to perform a full synchronization with this peer
- if !self.should_request_full_sync(&their_node_id) {
- return ();
- }
-
- let first_blocknum = 0;
- let number_of_blocks = 0xffffffff;
- log_debug!(self.logger, "Sending query_channel_range peer={}, first_blocknum={}, number_of_blocks={}", log_pubkey!(their_node_id), first_blocknum, number_of_blocks);
- pending_events.push(MessageSendEvent::SendChannelRangeQuery {
- node_id: their_node_id.clone(),
- msg: QueryChannelRange {
- chain_hash: self.network_graph.genesis_hash,
- first_blocknum,
- number_of_blocks,
- },
- });
}
- /// Statelessly processes a reply to a channel range query by immediately
- /// sending an SCID query with SCIDs in the reply. To keep this handler
- /// stateless, it does not validate the sequencing of replies for multi-
- /// reply ranges. It does not validate whether the reply(ies) cover the
- /// queried range. It also does not filter SCIDs to only those in the
- /// original query range. We also do not validate that the chain_hash
- /// matches the chain_hash of the NetworkGraph. Any chan_ann message that
- /// does not match our chain_hash will be rejected when the announcement is
- /// processed.
- fn handle_reply_channel_range(&self, their_node_id: &PublicKey, msg: ReplyChannelRange) -> Result<(), LightningError> {
- log_debug!(self.logger, "Handling reply_channel_range peer={}, first_blocknum={}, number_of_blocks={}, sync_complete={}, scids={}", log_pubkey!(their_node_id), msg.first_blocknum, msg.number_of_blocks, msg.sync_complete, msg.short_channel_ids.len(),);
-
- log_debug!(self.logger, "Sending query_short_channel_ids peer={}, batch_size={}", log_pubkey!(their_node_id), msg.short_channel_ids.len());
- let mut pending_events = self.pending_events.lock().unwrap();
- pending_events.push(MessageSendEvent::SendShortIdsQuery {
- node_id: their_node_id.clone(),
- msg: QueryShortChannelIds {
- chain_hash: msg.chain_hash,
- short_channel_ids: msg.short_channel_ids,
- }
- });
-
+ fn handle_reply_channel_range(&self, _their_node_id: &PublicKey, _msg: ReplyChannelRange) -> Result<(), LightningError> {
+ // We don't make queries, so should never receive replies. If, in the future, the set
+ // reconciliation extensions to gossip queries become broadly supported, we should revert
+ // this code to its state pre-0.0.106.
Ok(())
}
- /// When an SCID query is initiated the remote peer will begin streaming
- /// gossip messages. In the event of a failure, we may have received
- /// some channel information. Before trying with another peer, the
- /// caller should update its set of SCIDs that need to be queried.
- fn handle_reply_short_channel_ids_end(&self, their_node_id: &PublicKey, msg: ReplyShortChannelIdsEnd) -> Result<(), LightningError> {
- log_debug!(self.logger, "Handling reply_short_channel_ids_end peer={}, full_information={}", log_pubkey!(their_node_id), msg.full_information);
-
- // If the remote node does not have up-to-date information for the
- // chain_hash they will set full_information=false. We can fail
- // the result and try again with a different peer.
- if !msg.full_information {
- return Err(LightningError {
- err: String::from("Received reply_short_channel_ids_end with no information"),
- action: ErrorAction::IgnoreError
- });
- }
-
+ fn handle_reply_short_channel_ids_end(&self, _their_node_id: &PublicKey, _msg: ReplyShortChannelIdsEnd) -> Result<(), LightningError> {
+ // We don't make queries, so should never receive replies. If, in the future, the set
+ // reconciliation extensions to gossip queries become broadly supported, we should revert
+ // this code to its state pre-0.0.106.
Ok(())
}
}
}
}
- macro_rules! maybe_update_channel_info {
- ( $target: expr, $src_node: expr) => {
+ macro_rules! check_update_latest {
+ ($target: expr) => {
if let Some(existing_chan_info) = $target.as_ref() {
// The timestamp field is somewhat of a misnomer - the BOLTs use it to
// order updates to ensure you always have the latest one, only
} else {
chan_was_enabled = false;
}
+ }
+ }
+ macro_rules! get_new_channel_info {
+ () => { {
let last_update_message = if msg.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY
{ full_msg.cloned() } else { None };
},
last_update_message
};
- $target = Some(updated_channel_update_info);
- }
+ Some(updated_channel_update_info)
+ } }
}
let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.encode()[..])[..]);
if msg.flags & 1 == 1 {
dest_node_id = channel.node_one.clone();
+ check_update_latest!(channel.two_to_one);
if let Some((sig, ctx)) = sig_info {
secp_verify_sig!(ctx, &msg_hash, &sig, &PublicKey::from_slice(channel.node_two.as_slice()).map_err(|_| LightningError{
err: "Couldn't parse source node pubkey".to_owned(),
action: ErrorAction::IgnoreAndLog(Level::Debug)
})?, "channel_update");
}
- maybe_update_channel_info!(channel.two_to_one, channel.node_two);
+ channel.two_to_one = get_new_channel_info!();
} else {
dest_node_id = channel.node_two.clone();
+ check_update_latest!(channel.one_to_two);
if let Some((sig, ctx)) = sig_info {
secp_verify_sig!(ctx, &msg_hash, &sig, &PublicKey::from_slice(channel.node_one.as_slice()).map_err(|_| LightningError{
err: "Couldn't parse destination node pubkey".to_owned(),
action: ErrorAction::IgnoreAndLog(Level::Debug)
})?, "channel_update");
}
- maybe_update_channel_info!(channel.one_to_two, channel.node_one);
+ channel.one_to_two = get_new_channel_info!();
}
}
}
use routing::network_graph::{NetGraphMsgHandler, NetworkGraph, NetworkUpdate, MAX_EXCESS_BYTES_FOR_RELAY};
use ln::msgs::{Init, OptionalField, RoutingMessageHandler, UnsignedNodeAnnouncement, NodeAnnouncement,
UnsignedChannelAnnouncement, ChannelAnnouncement, UnsignedChannelUpdate, ChannelUpdate,
- ReplyChannelRange, ReplyShortChannelIdsEnd, QueryChannelRange, QueryShortChannelIds, MAX_VALUE_MSAT};
+ ReplyChannelRange, QueryChannelRange, QueryShortChannelIds, MAX_VALUE_MSAT};
use util::test_utils;
use util::logger::Logger;
use util::ser::{Readable, Writeable};
}
#[test]
+ #[cfg(feature = "std")]
fn calling_sync_routing_table() {
+ use std::time::{SystemTime, UNIX_EPOCH};
+
let network_graph = create_network_graph();
let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(&network_graph);
let node_privkey_1 = &SecretKey::from_slice(&[42; 32]).unwrap();
let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_privkey_1);
let chain_hash = genesis_block(Network::Testnet).header.block_hash();
- let first_blocknum = 0;
- let number_of_blocks = 0xffff_ffff;
// It should ignore if gossip_queries feature is not enabled
{
- let init_msg = Init { features: InitFeatures::known().clear_gossip_queries() };
+ let init_msg = Init { features: InitFeatures::known().clear_gossip_queries(), remote_network_address: None };
net_graph_msg_handler.peer_connected(&node_id_1, &init_msg);
let events = net_graph_msg_handler.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 0);
}
- // It should send a query_channel_message with the correct information
+ // It should send a gossip_timestamp_filter with the correct information
{
- let init_msg = Init { features: InitFeatures::known() };
+ let init_msg = Init { features: InitFeatures::known(), remote_network_address: None };
net_graph_msg_handler.peer_connected(&node_id_1, &init_msg);
let events = net_graph_msg_handler.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 2);
+ assert_eq!(events.len(), 1);
match &events[0] {
MessageSendEvent::SendGossipTimestampFilter{ node_id, msg } => {
assert_eq!(node_id, &node_id_1);
assert_eq!(msg.chain_hash, chain_hash);
- assert_eq!(msg.first_timestamp, 0);
+ let expected_timestamp = SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs();
+ assert!((msg.first_timestamp as u64) >= expected_timestamp - 60*60*24*7*2);
+ assert!((msg.first_timestamp as u64) < expected_timestamp - 60*60*24*7*2 + 10);
assert_eq!(msg.timestamp_range, u32::max_value());
},
_ => panic!("Expected MessageSendEvent::SendChannelRangeQuery")
};
- match &events[1] {
- MessageSendEvent::SendChannelRangeQuery{ node_id, msg } => {
- assert_eq!(node_id, &node_id_1);
- assert_eq!(msg.chain_hash, chain_hash);
- assert_eq!(msg.first_blocknum, first_blocknum);
- assert_eq!(msg.number_of_blocks, number_of_blocks);
- },
- _ => panic!("Expected MessageSendEvent::SendChannelRangeQuery")
- };
- }
-
- // It should not enqueue a query when should_request_full_sync return false.
- // The initial implementation allows syncing with the first 5 peers after
- // which should_request_full_sync will return false
- {
- let network_graph = create_network_graph();
- let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(&network_graph);
- let init_msg = Init { features: InitFeatures::known() };
- for n in 1..7 {
- let node_privkey = &SecretKey::from_slice(&[n; 32]).unwrap();
- let node_id = PublicKey::from_secret_key(&secp_ctx, node_privkey);
- net_graph_msg_handler.peer_connected(&node_id, &init_msg);
- let events = net_graph_msg_handler.get_and_clear_pending_msg_events();
- if n <= 5 {
- assert_eq!(events.len(), 2);
- } else {
- // Even after the we stop sending the explicit query, we should still send a
- // gossip_timestamp_filter on each new connection.
- assert_eq!(events.len(), 1);
- }
-
- }
- }
- }
-
- #[test]
- fn handling_reply_channel_range() {
- let network_graph = create_network_graph();
- let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(&network_graph);
- let node_privkey_1 = &SecretKey::from_slice(&[42; 32]).unwrap();
- let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_privkey_1);
-
- let chain_hash = genesis_block(Network::Testnet).header.block_hash();
-
- // Test receipt of a single reply that should enqueue an SCID query
- // matching the SCIDs in the reply
- {
- let result = net_graph_msg_handler.handle_reply_channel_range(&node_id_1, ReplyChannelRange {
- chain_hash,
- sync_complete: true,
- first_blocknum: 0,
- number_of_blocks: 2000,
- short_channel_ids: vec![
- 0x0003e0_000000_0000, // 992x0x0
- 0x0003e8_000000_0000, // 1000x0x0
- 0x0003e9_000000_0000, // 1001x0x0
- 0x0003f0_000000_0000, // 1008x0x0
- 0x00044c_000000_0000, // 1100x0x0
- 0x0006e0_000000_0000, // 1760x0x0
- ],
- });
- assert!(result.is_ok());
-
- // We expect to emit a query_short_channel_ids message with the received scids
- let events = net_graph_msg_handler.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 1);
- match &events[0] {
- MessageSendEvent::SendShortIdsQuery { node_id, msg } => {
- assert_eq!(node_id, &node_id_1);
- assert_eq!(msg.chain_hash, chain_hash);
- assert_eq!(msg.short_channel_ids, vec![
- 0x0003e0_000000_0000, // 992x0x0
- 0x0003e8_000000_0000, // 1000x0x0
- 0x0003e9_000000_0000, // 1001x0x0
- 0x0003f0_000000_0000, // 1008x0x0
- 0x00044c_000000_0000, // 1100x0x0
- 0x0006e0_000000_0000, // 1760x0x0
- ]);
- },
- _ => panic!("expected MessageSendEvent::SendShortIdsQuery"),
- }
- }
- }
-
- #[test]
- fn handling_reply_short_channel_ids() {
- let network_graph = create_network_graph();
- let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(&network_graph);
- let node_privkey = &SecretKey::from_slice(&[41; 32]).unwrap();
- let node_id = PublicKey::from_secret_key(&secp_ctx, node_privkey);
-
- let chain_hash = genesis_block(Network::Testnet).header.block_hash();
-
- // Test receipt of a successful reply
- {
- let result = net_graph_msg_handler.handle_reply_short_channel_ids_end(&node_id, ReplyShortChannelIdsEnd {
- chain_hash,
- full_information: true,
- });
- assert!(result.is_ok());
- }
-
- // Test receipt of a reply that indicates the peer does not maintain up-to-date information
- // for the chain_hash requested in the query.
- {
- let result = net_graph_msg_handler.handle_reply_short_channel_ids_end(&node_id, ReplyShortChannelIdsEnd {
- chain_hash,
- full_information: false,
- });
- assert!(result.is_err());
- assert_eq!(result.err().unwrap().err, "Received reply_short_channel_ids_end with no information");
}
}
impl cmp::Ord for RouteGraphNode {
fn cmp(&self, other: &RouteGraphNode) -> cmp::Ordering {
let other_score = cmp::max(other.lowest_fee_to_peer_through_node, other.path_htlc_minimum_msat)
- .checked_add(other.path_penalty_msat)
- .unwrap_or_else(|| u64::max_value());
+ .saturating_add(other.path_penalty_msat);
let self_score = cmp::max(self.lowest_fee_to_peer_through_node, self.path_htlc_minimum_msat)
- .checked_add(self.path_penalty_msat)
- .unwrap_or_else(|| u64::max_value());
+ .saturating_add(self.path_penalty_msat);
other_score.cmp(&self_score).then_with(|| other.node_id.cmp(&self.node_id))
}
}
self.hops.last().unwrap().0.fee_msat
}
+ fn get_path_penalty_msat(&self) -> u64 {
+ self.hops.first().map(|h| h.0.path_penalty_msat).unwrap_or(u64::max_value())
+ }
+
fn get_total_fee_paid_msat(&self) -> u64 {
if self.hops.len() < 1 {
return 0;
pub(crate) fn get_route<L: Deref, S: Score>(
our_node_pubkey: &PublicKey, payment_params: &PaymentParameters, network_graph: &ReadOnlyNetworkGraph,
first_hops: Option<&[&ChannelDetails]>, final_value_msat: u64, final_cltv_expiry_delta: u32,
- logger: L, scorer: &S, _random_seed_bytes: &[u8; 32]
+ logger: L, scorer: &S, random_seed_bytes: &[u8; 32]
) -> Result<Route, LightningError>
where L::Target: Logger {
let payee_node_id = NodeId::from_pubkey(&payment_params.payee_pubkey);
// - when we want to stop looking for new paths.
let mut already_collected_value_msat = 0;
+ for (_, channels) in first_hop_targets.iter_mut() {
+ // Sort the first_hops channels to the same node(s) in priority order of which channel we'd
+ // most like to use.
+ //
+ // First, if channels are below `recommended_value_msat`, sort them in descending order,
+ // preferring larger channels to avoid splitting the payment into more MPP parts than is
+ // required.
+ //
+ // Second, because simply always sorting in descending order would always use our largest
+ // available outbound capacity, needlessly fragmenting our available channel capacities,
+ // sort channels above `recommended_value_msat` in ascending order, preferring channels
+ // which have enough, but not too much, capacity for the payment.
+ channels.sort_unstable_by(|chan_a, chan_b| {
+ if chan_b.outbound_capacity_msat < recommended_value_msat || chan_a.outbound_capacity_msat < recommended_value_msat {
+ // Sort in descending order
+ chan_b.outbound_capacity_msat.cmp(&chan_a.outbound_capacity_msat)
+ } else {
+ // Sort in ascending order
+ chan_a.outbound_capacity_msat.cmp(&chan_b.outbound_capacity_msat)
+ }
+ });
+ }
+
log_trace!(logger, "Building path from {} (payee) to {} (us/payer) for value {} msat.", payment_params.payee_pubkey, our_node_pubkey, final_value_msat);
macro_rules! add_entry {
.entry(short_channel_id)
.or_insert_with(|| $candidate.effective_capacity().as_msat());
- // It is tricky to substract $next_hops_fee_msat from available liquidity here.
+ // It is tricky to subtract $next_hops_fee_msat from available liquidity here.
// It may be misleading because we might later choose to reduce the value transferred
// over these channels, and the channel which was insufficient might become sufficient.
// Worst case: we drop a good channel here because it can't cover the high following
.checked_sub(2*MEDIAN_HOP_CLTV_EXPIRY_DELTA)
.unwrap_or(payment_params.max_total_cltv_expiry_delta - final_cltv_expiry_delta);
let hop_total_cltv_delta = ($next_hops_cltv_delta as u32)
- .checked_add($candidate.cltv_expiry_delta())
- .unwrap_or(u32::max_value());
+ .saturating_add($candidate.cltv_expiry_delta());
let doesnt_exceed_cltv_delta_limit = hop_total_cltv_delta <= max_total_cltv_expiry_delta;
let value_contribution_msat = cmp::min(available_value_contribution_msat, $next_hops_value_contribution);
}
}
- let path_penalty_msat = $next_hops_path_penalty_msat.checked_add(
- scorer.channel_penalty_msat(short_channel_id, amount_to_transfer_over_msat, *available_liquidity_msat,
- &$src_node_id, &$dest_node_id)).unwrap_or_else(|| u64::max_value());
+ let path_penalty_msat = $next_hops_path_penalty_msat.saturating_add(
+ scorer.channel_penalty_msat(short_channel_id, amount_to_transfer_over_msat,
+ *available_liquidity_msat, &$src_node_id, &$dest_node_id));
let new_graph_node = RouteGraphNode {
node_id: $src_node_id,
lowest_fee_to_peer_through_node: total_fee_msat,
// the fees included in $next_hops_path_htlc_minimum_msat, but also
// can't use something that may decrease on future hops.
let old_cost = cmp::max(old_entry.total_fee_msat, old_entry.path_htlc_minimum_msat)
- .checked_add(old_entry.path_penalty_msat)
- .unwrap_or_else(|| u64::max_value());
+ .saturating_add(old_entry.path_penalty_msat);
let new_cost = cmp::max(total_fee_msat, path_htlc_minimum_msat)
- .checked_add(path_penalty_msat)
- .unwrap_or_else(|| u64::max_value());
+ .saturating_add(path_penalty_msat);
if !old_entry.was_processed && new_cost < old_cost {
targets.push(new_graph_node);
.unwrap_or_else(|| CandidateRouteHop::PrivateHop { hint: hop });
let capacity_msat = candidate.effective_capacity().as_msat();
aggregate_next_hops_path_penalty_msat = aggregate_next_hops_path_penalty_msat
- .checked_add(scorer.channel_penalty_msat(hop.short_channel_id, final_value_msat, capacity_msat, &source, &target))
- .unwrap_or_else(|| u64::max_value());
+ .saturating_add(scorer.channel_penalty_msat(hop.short_channel_id, final_value_msat, capacity_msat, &source, &target));
aggregate_next_hops_cltv_delta = aggregate_next_hops_cltv_delta
- .checked_add(hop.cltv_expiry_delta as u32)
- .unwrap_or_else(|| u32::max_value());
+ .saturating_add(hop.cltv_expiry_delta as u32);
if !add_entry!(candidate, source, target, aggregate_next_hops_fee_msat, path_value_msat, aggregate_next_hops_path_htlc_minimum_msat, aggregate_next_hops_path_penalty_msat, aggregate_next_hops_cltv_delta) {
// If this hop was not used then there is no use checking the preceding hops
}
// Sort by total fees and take the best paths.
- payment_paths.sort_by_key(|path| path.get_total_fee_paid_msat());
+ payment_paths.sort_unstable_by_key(|path| path.get_total_fee_paid_msat());
if payment_paths.len() > 50 {
payment_paths.truncate(50);
}
// Draw multiple sufficient routes by randomly combining the selected paths.
let mut drawn_routes = Vec::new();
- for i in 0..payment_paths.len() {
+ let mut prng = ChaCha20::new(random_seed_bytes, &[0u8; 12]);
+ let mut random_index_bytes = [0u8; ::core::mem::size_of::<usize>()];
+
+ let num_permutations = payment_paths.len();
+ for _ in 0..num_permutations {
let mut cur_route = Vec::<PaymentPath>::new();
let mut aggregate_route_value_msat = 0;
// Step (6).
- // TODO: real random shuffle
- // Currently just starts with i_th and goes up to i-1_th in a looped way.
- let cur_payment_paths = [&payment_paths[i..], &payment_paths[..i]].concat();
+ // Do a Fisher-Yates shuffle to create a random permutation of the payment paths
+ for cur_index in (1..payment_paths.len()).rev() {
+ prng.process_in_place(&mut random_index_bytes);
+ let random_index = usize::from_be_bytes(random_index_bytes).wrapping_rem(cur_index+1);
+ payment_paths.swap(cur_index, random_index);
+ }
// Step (7).
- for payment_path in cur_payment_paths {
+ for payment_path in &payment_paths {
cur_route.push(payment_path.clone());
aggregate_route_value_msat += payment_path.get_value_msat();
if aggregate_route_value_msat > final_value_msat {
// also makes routing more reliable.
let mut overpaid_value_msat = aggregate_route_value_msat - final_value_msat;
- // First, drop some expensive low-value paths entirely if possible.
- // Sort by value so that we drop many really-low values first, since
- // fewer paths is better: the payment is less likely to fail.
- // TODO: this could also be optimized by also sorting by feerate_per_sat_routed,
- // so that the sender pays less fees overall. And also htlc_minimum_msat.
- cur_route.sort_by_key(|path| path.get_value_msat());
+ // First, we drop some expensive low-value paths entirely if possible, since fewer
+ // paths is better: the payment is less likely to fail. In order to do so, we sort
+ // by value and fall back to total fees paid, i.e., in case of equal values we
+ // prefer lower cost paths.
+ cur_route.sort_unstable_by(|a, b| {
+ a.get_value_msat().cmp(&b.get_value_msat())
+ // Reverse ordering for fees, so we drop higher-fee paths first
+ .then_with(|| b.get_total_fee_paid_msat().saturating_add(b.get_path_penalty_msat())
+ .cmp(&a.get_total_fee_paid_msat().saturating_add(a.get_path_penalty_msat())))
+ });
+
// We should make sure that at least 1 path left.
let mut paths_left = cur_route.len();
cur_route.retain(|path| {
assert!(cur_route.len() > 0);
// Step (8).
- // Now, substract the overpaid value from the most-expensive path.
+ // Now, subtract the overpaid value from the most-expensive path.
// TODO: this could also be optimized by also sorting by feerate_per_sat_routed,
// so that the sender pays less fees overall. And also htlc_minimum_msat.
- cur_route.sort_by_key(|path| { path.hops.iter().map(|hop| hop.0.candidate.fees().proportional_millionths as u64).sum::<u64>() });
+ cur_route.sort_unstable_by_key(|path| { path.hops.iter().map(|hop| hop.0.candidate.fees().proportional_millionths as u64).sum::<u64>() });
let expensive_payment_path = cur_route.first_mut().unwrap();
- // We already dropped all the small channels above, meaning all the
- // remaining channels are larger than remaining overpaid_value_msat.
+
+ // We already dropped all the small value paths above, meaning all the
+ // remaining paths are larger than remaining overpaid_value_msat.
// Thus, this can't be negative.
let expensive_path_new_value_msat = expensive_payment_path.get_value_msat() - overpaid_value_msat;
expensive_payment_path.update_value_and_recompute_fees(expensive_path_new_value_msat);
// Step (9).
// Select the best route by lowest total fee.
- drawn_routes.sort_by_key(|paths| paths.iter().map(|path| path.get_total_fee_paid_msat()).sum::<u64>());
+ drawn_routes.sort_unstable_by_key(|paths| paths.iter().map(|path| path.get_total_fee_paid_msat()).sum::<u64>());
let mut selected_paths = Vec::<Vec<Result<RouteHop, LightningError>>>::new();
for payment_path in drawn_routes.first().unwrap() {
let mut path = payment_path.hops.iter().map(|(payment_hop, node_features)| {
for path in route.paths.iter_mut() {
let mut shadow_ctlv_expiry_delta_offset: u32 = 0;
- // Choose the last publicly known node as the starting point for the random walk
- if let Some(starting_hop) = path.iter().rev().find(|h| network_nodes.contains_key(&NodeId::from_pubkey(&h.pubkey))) {
- let mut cur_node_id = NodeId::from_pubkey(&starting_hop.pubkey);
+ // Remember the last three nodes of the random walk and avoid looping back on them.
+ // Init with the last three nodes from the actual path, if possible.
+ let mut nodes_to_avoid: [NodeId; 3] = [NodeId::from_pubkey(&path.last().unwrap().pubkey),
+ NodeId::from_pubkey(&path.get(path.len().saturating_sub(2)).unwrap().pubkey),
+ NodeId::from_pubkey(&path.get(path.len().saturating_sub(3)).unwrap().pubkey)];
+
+ // Choose the last publicly known node as the starting point for the random walk.
+ let mut cur_hop: Option<NodeId> = None;
+ let mut path_nonce = [0u8; 12];
+ if let Some(starting_hop) = path.iter().rev()
+ .find(|h| network_nodes.contains_key(&NodeId::from_pubkey(&h.pubkey))) {
+ cur_hop = Some(NodeId::from_pubkey(&starting_hop.pubkey));
+ path_nonce.copy_from_slice(&cur_hop.unwrap().as_slice()[..12]);
+ }
+
+ // Init PRNG with the path-dependant nonce, which is static for private paths.
+ let mut prng = ChaCha20::new(random_seed_bytes, &path_nonce);
+ let mut random_path_bytes = [0u8; ::core::mem::size_of::<usize>()];
- // Init PRNG with path nonce
- let mut path_nonce = [0u8; 12];
- path_nonce.copy_from_slice(&cur_node_id.as_slice()[..12]);
- let mut prng = ChaCha20::new(random_seed_bytes, &path_nonce);
- let mut random_path_bytes = [0u8; ::core::mem::size_of::<usize>()];
+ // Pick a random path length in [1 .. 3]
+ prng.process_in_place(&mut random_path_bytes);
+ let random_walk_length = usize::from_be_bytes(random_path_bytes).wrapping_rem(3).wrapping_add(1);
- // Pick a random path length in [1 .. 3]
- prng.process_in_place(&mut random_path_bytes);
- let random_walk_length = usize::from_be_bytes(random_path_bytes).wrapping_rem(3).wrapping_add(1);
+ for random_hop in 0..random_walk_length {
+ // If we don't find a suitable offset in the public network graph, we default to
+ // MEDIAN_HOP_CLTV_EXPIRY_DELTA.
+ let mut random_hop_offset = MEDIAN_HOP_CLTV_EXPIRY_DELTA;
- for _random_hop in 0..random_walk_length {
+ if let Some(cur_node_id) = cur_hop {
if let Some(cur_node) = network_nodes.get(&cur_node_id) {
- // Randomly choose the next hop
+ // Randomly choose the next unvisited hop.
prng.process_in_place(&mut random_path_bytes);
- if let Some(random_channel) = usize::from_be_bytes(random_path_bytes).checked_rem(cur_node.channels.len())
+ if let Some(random_channel) = usize::from_be_bytes(random_path_bytes)
+ .checked_rem(cur_node.channels.len())
.and_then(|index| cur_node.channels.get(index))
.and_then(|id| network_channels.get(id)) {
random_channel.as_directed_from(&cur_node_id).map(|(dir_info, next_id)| {
- dir_info.direction().map(|channel_update_info|
- shadow_ctlv_expiry_delta_offset = shadow_ctlv_expiry_delta_offset
- .checked_add(channel_update_info.cltv_expiry_delta.into())
- .unwrap_or(shadow_ctlv_expiry_delta_offset));
- cur_node_id = *next_id;
+ if !nodes_to_avoid.iter().any(|x| x == next_id) {
+ nodes_to_avoid[random_hop] = *next_id;
+ dir_info.direction().map(|channel_update_info| {
+ random_hop_offset = channel_update_info.cltv_expiry_delta.into();
+ cur_hop = Some(*next_id);
+ });
+ }
});
}
}
}
- } else {
- // If the entire path is private, choose a random offset from multiples of
- // MEDIAN_HOP_CLTV_EXPIRY_DELTA
- let mut prng = ChaCha20::new(random_seed_bytes, &[0u8; 8]);
- let mut random_bytes = [0u8; 4];
- prng.process_in_place(&mut random_bytes);
- let random_walk_length = u32::from_be_bytes(random_bytes).wrapping_rem(3).wrapping_add(1);
- shadow_ctlv_expiry_delta_offset = random_walk_length * MEDIAN_HOP_CLTV_EXPIRY_DELTA;
+
+ shadow_ctlv_expiry_delta_offset = shadow_ctlv_expiry_delta_offset
+ .checked_add(random_hop_offset)
+ .unwrap_or(shadow_ctlv_expiry_delta_offset);
}
// Limit the total offset to reduce the worst-case locked liquidity timevalue
assert_eq!(route.paths[1][0].short_channel_id, 2);
assert_eq!(route.paths[1][0].fee_msat, 50_000);
}
+
+ {
+ // If we have a bunch of outbound channels to the same node, where most are not
+ // sufficient to pay the full payment, but one is, we should default to just using the
+ // one single channel that has sufficient balance, avoiding MPP.
+ //
+ // If we have several options above the 3xpayment value threshold, we should pick the
+ // smallest of them, avoiding further fragmenting our available outbound balance to
+ // this node.
+ let route = get_route(&our_id, &payment_params, &network_graph.read_only(), Some(&[
+ &get_channel_details(Some(2), nodes[0], InitFeatures::known(), 50_000),
+ &get_channel_details(Some(3), nodes[0], InitFeatures::known(), 50_000),
+ &get_channel_details(Some(5), nodes[0], InitFeatures::known(), 50_000),
+ &get_channel_details(Some(6), nodes[0], InitFeatures::known(), 300_000),
+ &get_channel_details(Some(7), nodes[0], InitFeatures::known(), 50_000),
+ &get_channel_details(Some(8), nodes[0], InitFeatures::known(), 50_000),
+ &get_channel_details(Some(9), nodes[0], InitFeatures::known(), 50_000),
+ &get_channel_details(Some(4), nodes[0], InitFeatures::known(), 1_000_000),
+ ]), 100_000, 42, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+ assert_eq!(route.paths.len(), 1);
+ assert_eq!(route.paths[0].len(), 1);
+
+ assert_eq!(route.paths[0][0].pubkey, nodes[0]);
+ assert_eq!(route.paths[0][0].short_channel_id, 6);
+ assert_eq!(route.paths[0][0].fee_msat, 100_000);
+ }
}
#[test]
/// The channel_id of the channel which has been closed. Note that on-chain transactions
/// resolving the channel are likely still awaiting confirmation.
channel_id: [u8; 32],
- /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`], or 0 for
- /// an inbound channel. This will always be zero for objects serialized with LDK versions
- /// prior to 0.0.102.
+ /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`] for outbound
+ /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels if
+ /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. Otherwise
+ /// `user_channel_id` will be 0 for an inbound channel.
+ /// This will always be zero for objects serialized with LDK versions prior to 0.0.102.
///
/// [`ChannelManager::create_channel`]: crate::ln::channelmanager::ChannelManager::create_channel
+ /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel
+ /// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels
user_channel_id: u64,
/// The reason the channel was closed.
reason: ClosureReason