+++ /dev/null
-# https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
-
-version: 2
-updates:
- - package-ecosystem: "cargo"
- directory: "/"
- schedule:
- interval: "daily"
- - package-ecosystem: "cargo"
- directory: "/lightning-background-processor"
- schedule:
- interval: "daily"
- - package-ecosystem: "cargo"
- directory: "/lightning-block-sync"
- schedule:
- interval: "daily"
- - package-ecosystem: "cargo"
- directory: "/lightning-invoice"
- schedule:
- interval: "daily"
- - package-ecosystem: "cargo"
- directory: "/lightning-net-tokio"
- schedule:
- interval: "daily"
- - package-ecosystem: "cargo"
- directory: "/lightning-persister"
- schedule:
- interval: "daily"
- - package-ecosystem: "cargo"
- directory: "/lightning"
- schedule:
- interval: "daily"
- - package-ecosystem: "github-actions"
- directory: "/"
- schedule:
- interval: "daily"
}
}
+/// (C-not exported) as the bindings concretize everything and have constructors for us
+impl<P: Deref<Target = P2PGossipSync<G, A, L>>, G: Deref<Target = NetworkGraph<L>>, A: Deref, L: Deref>
+ GossipSync<P, &RapidGossipSync<G, L>, G, A, L>
+where
+ A::Target: chain::Access,
+ L::Target: Logger,
+{
+ /// Initializes a new [`GossipSync::P2P`] variant.
+ pub fn p2p(gossip_sync: P) -> Self {
+ GossipSync::P2P(gossip_sync)
+ }
+}
+
+/// (C-not exported) as the bindings concretize everything and have constructors for us
+impl<'a, R: Deref<Target = RapidGossipSync<G, L>>, G: Deref<Target = NetworkGraph<L>>, L: Deref>
+ GossipSync<
+ &P2PGossipSync<G, &'a (dyn chain::Access + Send + Sync), L>,
+ R,
+ G,
+ &'a (dyn chain::Access + Send + Sync),
+ L,
+ >
+where
+ L::Target: Logger,
+{
+ /// Initializes a new [`GossipSync::Rapid`] variant.
+ pub fn rapid(gossip_sync: R) -> Self {
+ GossipSync::Rapid(gossip_sync)
+ }
+}
+
+/// (C-not exported) as the bindings concretize everything and have constructors for us
+impl<'a, L: Deref>
+ GossipSync<
+ &P2PGossipSync<&'a NetworkGraph<L>, &'a (dyn chain::Access + Send + Sync), L>,
+ &RapidGossipSync<&'a NetworkGraph<L>, L>,
+ &'a NetworkGraph<L>,
+ &'a (dyn chain::Access + Send + Sync),
+ L,
+ >
+where
+ L::Target: Logger,
+{
+ /// Initializes a new [`GossipSync::None`] variant.
+ pub fn none() -> Self {
+ GossipSync::None
+ }
+}
+
/// Decorates an [`EventHandler`] with common functionality provided by standard [`EventHandler`]s.
struct DecoratingEventHandler<
'a,
use lightning::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, NetAddress};
use lightning::util::logger::Logger;
+use std::ops::Deref;
use std::task;
use std::net::SocketAddr;
use std::net::TcpStream as StdTcpStream;
id: u64,
}
impl Connection {
- async fn poll_event_process<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>, Arc<RMH>, Arc<L>, Arc<UMH>>>, mut event_receiver: mpsc::Receiver<()>) where
- CMH: ChannelMessageHandler + 'static + Send + Sync,
- RMH: RoutingMessageHandler + 'static + Send + Sync,
- L: Logger + 'static + ?Sized + Send + Sync,
- UMH: CustomMessageHandler + 'static + Send + Sync {
+ async fn poll_event_process<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, CMH, RMH, L, UMH>>, mut event_receiver: mpsc::Receiver<()>) where
+ CMH: Deref + 'static + Send + Sync,
+ RMH: Deref + 'static + Send + Sync,
+ L: Deref + 'static + Send + Sync,
+ UMH: Deref + 'static + Send + Sync,
+ CMH::Target: ChannelMessageHandler + Send + Sync,
+ RMH::Target: RoutingMessageHandler + Send + Sync,
+ L::Target: Logger + Send + Sync,
+ UMH::Target: CustomMessageHandler + Send + Sync,
+ {
loop {
if event_receiver.recv().await.is_none() {
return;
}
}
- async fn schedule_read<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>, Arc<RMH>, Arc<L>, Arc<UMH>>>, us: Arc<Mutex<Self>>, mut reader: io::ReadHalf<TcpStream>, mut read_wake_receiver: mpsc::Receiver<()>, mut write_avail_receiver: mpsc::Receiver<()>) where
- CMH: ChannelMessageHandler + 'static + Send + Sync,
- RMH: RoutingMessageHandler + 'static + Send + Sync,
- L: Logger + 'static + ?Sized + Send + Sync,
- UMH: CustomMessageHandler + 'static + Send + Sync {
+ async fn schedule_read<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, CMH, RMH, L, UMH>>, us: Arc<Mutex<Self>>, mut reader: io::ReadHalf<TcpStream>, mut read_wake_receiver: mpsc::Receiver<()>, mut write_avail_receiver: mpsc::Receiver<()>) where
+ CMH: Deref + 'static + Send + Sync,
+ RMH: Deref + 'static + Send + Sync,
+ L: Deref + 'static + Send + Sync,
+ UMH: Deref + 'static + Send + Sync,
+ CMH::Target: ChannelMessageHandler + 'static + Send + Sync,
+ RMH::Target: RoutingMessageHandler + 'static + Send + Sync,
+ L::Target: Logger + 'static + Send + Sync,
+ UMH::Target: CustomMessageHandler + 'static + Send + Sync,
+ {
// Create a waker to wake up poll_event_process, above
let (event_waker, event_receiver) = mpsc::channel(1);
tokio::spawn(Self::poll_event_process(Arc::clone(&peer_manager), event_receiver));
/// The returned future will complete when the peer is disconnected and associated handling
/// futures are freed, though, because all processing futures are spawned with tokio::spawn, you do
/// not need to poll the provided future in order to make progress.
-pub fn setup_inbound<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>, Arc<RMH>, Arc<L>, Arc<UMH>>>, stream: StdTcpStream) -> impl std::future::Future<Output=()> where
- CMH: ChannelMessageHandler + 'static + Send + Sync,
- RMH: RoutingMessageHandler + 'static + Send + Sync,
- L: Logger + 'static + ?Sized + Send + Sync,
- UMH: CustomMessageHandler + 'static + Send + Sync {
+pub fn setup_inbound<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, CMH, RMH, L, UMH>>, stream: StdTcpStream) -> impl std::future::Future<Output=()> where
+ CMH: Deref + 'static + Send + Sync,
+ RMH: Deref + 'static + Send + Sync,
+ L: Deref + 'static + Send + Sync,
+ UMH: Deref + 'static + Send + Sync,
+ CMH::Target: ChannelMessageHandler + Send + Sync,
+ RMH::Target: RoutingMessageHandler + Send + Sync,
+ L::Target: Logger + Send + Sync,
+ UMH::Target: CustomMessageHandler + Send + Sync,
+{
let remote_addr = get_addr_from_stream(&stream);
let (reader, write_receiver, read_receiver, us) = Connection::new(stream);
#[cfg(debug_assertions)]
/// The returned future will complete when the peer is disconnected and associated handling
/// futures are freed, though, because all processing futures are spawned with tokio::spawn, you do
/// not need to poll the provided future in order to make progress.
-pub fn setup_outbound<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>, Arc<RMH>, Arc<L>, Arc<UMH>>>, their_node_id: PublicKey, stream: StdTcpStream) -> impl std::future::Future<Output=()> where
- CMH: ChannelMessageHandler + 'static + Send + Sync,
- RMH: RoutingMessageHandler + 'static + Send + Sync,
- L: Logger + 'static + ?Sized + Send + Sync,
- UMH: CustomMessageHandler + 'static + Send + Sync {
+pub fn setup_outbound<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, CMH, RMH, L, UMH>>, their_node_id: PublicKey, stream: StdTcpStream) -> impl std::future::Future<Output=()> where
+ CMH: Deref + 'static + Send + Sync,
+ RMH: Deref + 'static + Send + Sync,
+ L: Deref + 'static + Send + Sync,
+ UMH: Deref + 'static + Send + Sync,
+ CMH::Target: ChannelMessageHandler + Send + Sync,
+ RMH::Target: RoutingMessageHandler + Send + Sync,
+ L::Target: Logger + Send + Sync,
+ UMH::Target: CustomMessageHandler + Send + Sync,
+{
let remote_addr = get_addr_from_stream(&stream);
let (reader, mut write_receiver, read_receiver, us) = Connection::new(stream);
#[cfg(debug_assertions)]
/// disconnected and associated handling futures are freed, though, because all processing in said
/// futures are spawned with tokio::spawn, you do not need to poll the second future in order to
/// make progress.
-pub async fn connect_outbound<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>, Arc<RMH>, Arc<L>, Arc<UMH>>>, their_node_id: PublicKey, addr: SocketAddr) -> Option<impl std::future::Future<Output=()>> where
- CMH: ChannelMessageHandler + 'static + Send + Sync,
- RMH: RoutingMessageHandler + 'static + Send + Sync,
- L: Logger + 'static + ?Sized + Send + Sync,
- UMH: CustomMessageHandler + 'static + Send + Sync {
+pub async fn connect_outbound<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, CMH, RMH, L, UMH>>, their_node_id: PublicKey, addr: SocketAddr) -> Option<impl std::future::Future<Output=()>> where
+ CMH: Deref + 'static + Send + Sync,
+ RMH: Deref + 'static + Send + Sync,
+ L: Deref + 'static + Send + Sync,
+ UMH: Deref + 'static + Send + Sync,
+ CMH::Target: ChannelMessageHandler + Send + Sync,
+ RMH::Target: RoutingMessageHandler + Send + Sync,
+ L::Target: Logger + Send + Sync,
+ UMH::Target: CustomMessageHandler + Send + Sync,
+{
if let Ok(Ok(stream)) = time::timeout(Duration::from_secs(10), async { TcpStream::connect(&addr).await.map(|s| s.into_std().unwrap()) }).await {
Some(setup_outbound(peer_manager, their_node_id, stream))
} else { None }
pub const MIN_RELAY_FEE_SAT_PER_1000_WEIGHT: u64 = 4000;
/// Minimum feerate that takes a sane approach to bitcoind weight-to-vbytes rounding.
/// See the following Core Lightning commit for an explanation:
-/// https://github.com/ElementsProject/lightning/commit/2e687b9b352c9092b5e8bd4a688916ac50b44af0
+/// <https://github.com/ElementsProject/lightning/commit/2e687b9b352c9092b5e8bd4a688916ac50b44af0>
pub const FEERATE_FLOOR_SATS_PER_KW: u32 = 253;
/// Wraps a `Deref` to a `FeeEstimator` so that any fee estimations provided by it
return;
}
mem::drop(channel_state_lock);
- let retry = if let Some(payment_params_data) = payment_params {
+ let mut retry = if let Some(payment_params_data) = payment_params {
let path_last_hop = path.last().expect("Outbound payments must have had a valid path");
Some(RouteParameters {
payment_params: payment_params_data.clone(),
// TODO: If we decided to blame ourselves (or one of our channels) in
// process_onion_failure we should close that channel as it implies our
// next-hop is needlessly blaming us!
+ if let Some(scid) = short_channel_id {
+ retry.as_mut().map(|r| r.payment_params.previously_failed_channels.push(scid));
+ }
events::Event::PaymentPathFailed {
payment_id: Some(payment_id),
payment_hash: payment_hash.clone(),
// ChannelDetails.
// TODO: For non-temporary failures, we really should be closing the
// channel here as we apparently can't relay through them anyway.
+ let scid = path.first().unwrap().short_channel_id;
+ retry.as_mut().map(|r| r.payment_params.previously_failed_channels.push(scid));
events::Event::PaymentPathFailed {
payment_id: Some(payment_id),
payment_hash: payment_hash.clone(),
network_update: None,
all_paths_failed,
path: path.clone(),
- short_channel_id: Some(path.first().unwrap().short_channel_id),
+ short_channel_id: Some(scid),
retry,
#[cfg(test)]
error_code: Some(*failure_code),
let mut events = node.node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
let expected_payment_id = match events.pop().unwrap() {
- Event::PaymentPathFailed { payment_hash, rejected_by_dest, path, retry, payment_id, network_update,
+ Event::PaymentPathFailed { payment_hash, rejected_by_dest, path, retry, payment_id, network_update, short_channel_id,
#[cfg(test)]
error_code,
#[cfg(test)]
assert!(retry.is_some(), "expected retry.is_some()");
assert_eq!(retry.as_ref().unwrap().final_value_msat, path.last().unwrap().fee_msat, "Retry amount should match last hop in path");
assert_eq!(retry.as_ref().unwrap().payment_params.payee_pubkey, path.last().unwrap().pubkey, "Retry payee node_id should match last hop in path");
+ if let Some(scid) = short_channel_id {
+ assert!(retry.as_ref().unwrap().payment_params.previously_failed_channels.contains(&scid));
+ }
#[cfg(test)]
{
fail_payment(&nodes[1], &vec!(&nodes[3], &nodes[2], &nodes[1])[..], payment_hash_2);
claim_payment(&nodes[1], &vec!(&nodes[2], &nodes[3], &nodes[1])[..], payment_preimage_1);
- // Add a duplicate new channel from 2 to 4
- let chan_5 = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known());
-
- // Send some payments across both channels
- let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
- let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
- let payment_preimage_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
-
-
- route_over_limit(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000);
- let events = nodes[0].node.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 0);
- nodes[0].logger.assert_log_regex("lightning::ln::channelmanager".to_string(), regex::Regex::new(r"Cannot send value that would put us over the max HTLC value in flight our peer will accept \(\d+\)").unwrap(), 1);
-
- //TODO: Test that routes work again here as we've been notified that the channel is full
-
- claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_3);
- claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_4);
- claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_5);
-
// Close down the channels...
close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true);
check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false);
check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure);
- close_channel(&nodes[1], &nodes[3], &chan_5.2, chan_5.3, false);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
- check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure);
}
#[test]
// attempt to send amt_msat > their_max_htlc_value_in_flight_msat
{
- let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_0);
+ let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id())
+ .with_features(InvoiceFeatures::known()).with_max_channel_saturation_power_of_half(0);
+ let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], payment_params, recv_value_0, TEST_FINAL_CLTV);
route.paths[0].last_mut().unwrap().fee_msat += 1;
assert!(route.paths[0].iter().rev().skip(1).all(|h| h.fee_msat == feemsat));
+
unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)), true, APIError::ChannelUnavailable { ref err },
assert!(regex::Regex::new(r"Cannot send value that would put us over the max HTLC value in flight our peer will accept \(\d+\)").unwrap().is_match(err)));
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
if stat01.value_to_self_msat < stat01.channel_reserve_msat + commit_tx_fee_all_htlcs + ensure_htlc_amounts_above_dust_buffer + amt_msat {
break;
}
- send_payment(&nodes[0], &vec![&nodes[1], &nodes[2]][..], recv_value_0);
+
+ let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id())
+ .with_features(InvoiceFeatures::known()).with_max_channel_saturation_power_of_half(0);
+ let route = get_route!(nodes[0], payment_params, recv_value_0, TEST_FINAL_CLTV).unwrap();
+ let (payment_preimage, ..) = send_along_route(&nodes[0], route, &[&nodes[1], &nodes[2]], recv_value_0);
+ claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
let (stat01_, stat11_, stat12_, stat22_) = (
get_channel_value_stat!(nodes[0], chan_1.2),
let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id());
- let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[2], &payment_params, 9_999_000, 42);
+ let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[2], &payment_params, 9_998_000, 42);
let (payment_hash, payment_id) = nodes[0].node.send_probe(route.paths[0].clone()).unwrap();
/// Returns the [`EffectiveCapacity`] of the channel in the direction.
#[inline]
pub(super) fn effective_capacity(&self) -> EffectiveCapacity { self.inner.effective_capacity() }
-
- /// Returns the maximum HTLC amount allowed over the channel in the direction.
- #[inline]
- pub(super) fn htlc_maximum_msat(&self) -> u64 { self.inner.htlc_maximum_msat() }
}
impl<'a> fmt::Debug for DirectedChannelInfoWithUpdate<'a> {
&*self.channels
}
+ /// Returns information on a channel with the given id.
+ pub fn channel(&self, short_channel_id: u64) -> Option<&ChannelInfo> {
+ self.channels.get(&short_channel_id)
+ }
+
/// Returns all known nodes' public keys along with announced node info.
///
/// (C-not exported) because we have no mapping for `BTreeMap`s
&*self.nodes
}
+ /// Returns information on a node with the given id.
+ pub fn node(&self, node_id: &NodeId) -> Option<&NodeInfo> {
+ self.nodes.get(node_id)
+ }
+
/// Get network addresses by node id.
/// Returns None if the requested node is completely unknown,
/// or if node announcement for the node was never received.
/// The maximum number of paths that may be used by (MPP) payments.
/// Defaults to [`DEFAULT_MAX_PATH_COUNT`].
pub max_path_count: u8,
+
+ /// Selects the maximum share of a channel's total capacity which will be sent over a channel,
+ /// as a power of 1/2. A higher value prefers to send the payment using more MPP parts whereas
+ /// a lower value prefers to send larger MPP parts, potentially saturating channels and
+ /// increasing failure probability for those paths.
+ ///
+ /// Note that this restriction will be relaxed during pathfinding after paths which meet this
+ /// restriction have been found. While paths which meet this criteria will be searched for, it
+ /// is ultimately up to the scorer to select them over other paths.
+ ///
+ /// A value of 0 will allow payments up to and including a channel's total announced usable
+ /// capacity, a value of one will only use up to half its capacity, two 1/4, etc.
+ ///
+ /// Default value: 2
+ pub max_channel_saturation_power_of_half: u8,
+
+ /// A list of SCIDs which this payment was previously attempted over and which caused the
+ /// payment to fail. Future attempts for the same payment shouldn't be relayed through any of
+ /// these SCIDs.
+ pub previously_failed_channels: Vec<u64>,
}
impl_writeable_tlv_based!(PaymentParameters, {
(2, features, option),
(3, max_path_count, (default_value, DEFAULT_MAX_PATH_COUNT)),
(4, route_hints, vec_type),
+ (5, max_channel_saturation_power_of_half, (default_value, 2)),
(6, expiry_time, option),
+ (7, previously_failed_channels, vec_type),
});
impl PaymentParameters {
expiry_time: None,
max_total_cltv_expiry_delta: DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA,
max_path_count: DEFAULT_MAX_PATH_COUNT,
+ max_channel_saturation_power_of_half: 2,
+ previously_failed_channels: Vec::new(),
}
}
pub fn with_max_path_count(self, max_path_count: u8) -> Self {
Self { max_path_count, ..self }
}
+
+ /// Includes a limit for the maximum number of payment paths that may be used.
+ ///
+ /// (C-not exported) since bindings don't support move semantics
+ pub fn with_max_channel_saturation_power_of_half(self, max_channel_saturation_power_of_half: u8) -> Self {
+ Self { max_channel_saturation_power_of_half, ..self }
+ }
}
/// A list of hops along a payment path terminating with a channel to the recipient.
}
}
- fn htlc_maximum_msat(&self) -> u64 {
- match self {
- CandidateRouteHop::FirstHop { details } => details.next_outbound_htlc_limit_msat,
- CandidateRouteHop::PublicHop { info, .. } => info.htlc_maximum_msat(),
- CandidateRouteHop::PrivateHop { hint } => {
- hint.htlc_maximum_msat.unwrap_or(u64::max_value())
- },
- }
- }
-
fn fees(&self) -> RoutingFees {
match self {
CandidateRouteHop::FirstHop { .. } => RoutingFees {
}
}
+#[inline]
+fn max_htlc_from_capacity(capacity: EffectiveCapacity, max_channel_saturation_power_of_half: u8) -> u64 {
+ let saturation_shift: u32 = max_channel_saturation_power_of_half as u32;
+ match capacity {
+ EffectiveCapacity::ExactLiquidity { liquidity_msat } => liquidity_msat,
+ EffectiveCapacity::Infinite => u64::max_value(),
+ EffectiveCapacity::Unknown => EffectiveCapacity::Unknown.as_msat(),
+ EffectiveCapacity::MaximumHTLC { amount_msat } =>
+ amount_msat.checked_shr(saturation_shift).unwrap_or(0),
+ EffectiveCapacity::Total { capacity_msat, htlc_maximum_msat: None } =>
+ capacity_msat.checked_shr(saturation_shift).unwrap_or(0),
+ EffectiveCapacity::Total { capacity_msat, htlc_maximum_msat: Some(htlc_max) } =>
+ cmp::min(capacity_msat.checked_shr(saturation_shift).unwrap_or(0), htlc_max),
+ }
+}
+
+fn iter_equal<I1: Iterator, I2: Iterator>(mut iter_a: I1, mut iter_b: I2)
+-> bool where I1::Item: PartialEq<I2::Item> {
+ loop {
+ let a = iter_a.next();
+ let b = iter_b.next();
+ if a.is_none() && b.is_none() { return true; }
+ if a.is_none() || b.is_none() { return false; }
+ if a.unwrap().ne(&b.unwrap()) { return false; }
+ }
+}
+
/// It's useful to keep track of the hops associated with the fees required to use them,
/// so that we can choose cheaper paths (as per Dijkstra's algorithm).
/// Fee values should be updated only in the context of the whole path, see update_value_and_recompute_fees.
// to the fees being paid not lining up with the actual limits.
//
// Note that this function is not aware of the available_liquidity limit, and thus does not
- // support increasing the value being transferred.
+ // support increasing the value being transferred beyond what was selected during the initial
+ // routing passes.
fn update_value_and_recompute_fees(&mut self, value_msat: u64) {
- assert!(value_msat <= self.hops.last().unwrap().0.fee_msat);
-
let mut total_fee_paid_msat = 0 as u64;
for i in (0..self.hops.len()).rev() {
let last_hop = i == self.hops.len() - 1;
pub(crate) fn get_route<L: Deref, S: Score>(
our_node_pubkey: &PublicKey, payment_params: &PaymentParameters, network_graph: &ReadOnlyNetworkGraph,
first_hops: Option<&[&ChannelDetails]>, final_value_msat: u64, final_cltv_expiry_delta: u32,
- logger: L, scorer: &S, random_seed_bytes: &[u8; 32]
+ logger: L, scorer: &S, _random_seed_bytes: &[u8; 32]
) -> Result<Route, LightningError>
where L::Target: Logger {
let payee_node_id = NodeId::from_pubkey(&payment_params.payee_pubkey);
// 4. See if we managed to collect paths which aggregately are able to transfer target value
// (not recommended value).
// 5. If yes, proceed. If not, fail routing.
- // 6. Randomly combine paths into routes having enough to fulfill the payment. (TODO: knapsack)
- // 7. Of all the found paths, select only those with the lowest total fee.
- // 8. The last path in every selected route is likely to be more than we need.
- // Reduce its value-to-transfer and recompute fees.
- // 9. Choose the best route by the lowest total fee.
+ // 6. Select the paths which have the lowest cost (fee plus scorer penalty) per amount
+ // transferred up to the transfer target value.
+ // 7. Reduce the value of the last path until we are sending only the target value.
+ // 8. If our maximum channel saturation limit caused us to pick two identical paths, combine
+ // them so that we're not sending two HTLCs along the same path.
// As for the actual search algorithm,
// we do a payee-to-payer pseudo-Dijkstra's sorting by each node's distance from the payee
final_value_msat
};
+ // When we start collecting routes we enforce the max_channel_saturation_power_of_half
+ // requirement strictly. After we've collected enough (or if we fail to find new routes) we
+ // drop the requirement by setting this to 0.
+ let mut channel_saturation_pow_half = payment_params.max_channel_saturation_power_of_half;
+
// Keep track of how much liquidity has been used in selected channels. Used to determine
// if the channel can be used by additional MPP paths or to inform path finding decisions. It is
// aware of direction *only* to ensure that the correct htlc_maximum_msat value is used. Hence,
// - for first and last hops early in get_route
if $src_node_id != $dest_node_id {
let short_channel_id = $candidate.short_channel_id();
- let htlc_maximum_msat = $candidate.htlc_maximum_msat();
+ let effective_capacity = $candidate.effective_capacity();
+ let htlc_maximum_msat = max_htlc_from_capacity(effective_capacity, channel_saturation_pow_half);
// It is tricky to subtract $next_hops_fee_msat from available liquidity here.
// It may be misleading because we might later choose to reduce the value transferred
let contributes_sufficient_value = available_value_contribution_msat >= minimal_value_contribution_msat;
// Do not consider candidate hops that would exceed the maximum path length.
let path_length_to_node = $next_hops_path_length + 1;
- let doesnt_exceed_max_path_length = path_length_to_node <= MAX_PATH_LENGTH_ESTIMATE;
+ let exceeds_max_path_length = path_length_to_node > MAX_PATH_LENGTH_ESTIMATE;
// Do not consider candidates that exceed the maximum total cltv expiry limit.
// In order to already account for some of the privacy enhancing random CLTV
.unwrap_or(payment_params.max_total_cltv_expiry_delta - final_cltv_expiry_delta);
let hop_total_cltv_delta = ($next_hops_cltv_delta as u32)
.saturating_add($candidate.cltv_expiry_delta());
- let doesnt_exceed_cltv_delta_limit = hop_total_cltv_delta <= max_total_cltv_expiry_delta;
+ let exceeds_cltv_delta_limit = hop_total_cltv_delta > max_total_cltv_expiry_delta;
let value_contribution_msat = cmp::min(available_value_contribution_msat, $next_hops_value_contribution);
// Includes paying fees for the use of the following channels.
(amount_to_transfer_over_msat < $next_hops_path_htlc_minimum_msat &&
recommended_value_msat > $next_hops_path_htlc_minimum_msat));
+ let payment_failed_on_this_channel =
+ payment_params.previously_failed_channels.contains(&short_channel_id);
+
// If HTLC minimum is larger than the amount we're going to transfer, we shouldn't
// bother considering this channel. If retrying with recommended_value_msat may
// allow us to hit the HTLC minimum limit, set htlc_minimum_limit so that we go
// around again with a higher amount.
- if contributes_sufficient_value && doesnt_exceed_max_path_length &&
- doesnt_exceed_cltv_delta_limit && may_overpay_to_meet_path_minimum_msat {
+ if !contributes_sufficient_value || exceeds_max_path_length ||
+ exceeds_cltv_delta_limit || payment_failed_on_this_channel {
+ // Path isn't useful, ignore it and move on.
+ } else if may_overpay_to_meet_path_minimum_msat {
hit_minimum_limit = true;
- } else if contributes_sufficient_value && doesnt_exceed_max_path_length &&
- doesnt_exceed_cltv_delta_limit && over_path_minimum_msat {
+ } else if over_path_minimum_msat {
// Note that low contribution here (limited by available_liquidity_msat)
// might violate htlc_minimum_msat on the hops which are next along the
// payment path (upstream to the payee). To avoid that, we recompute
let channel_usage = ChannelUsage {
amount_msat: amount_to_transfer_over_msat,
inflight_htlc_msat: used_liquidity_msat,
- effective_capacity: $candidate.effective_capacity(),
+ effective_capacity,
};
let channel_penalty_msat = scorer.channel_penalty_msat(
short_channel_id, &$src_node_id, &$dest_node_id, channel_usage
// Both these cases (and other cases except reaching recommended_value_msat) mean that
// paths_collection will be stopped because found_new_path==false.
// This is not necessarily a routing failure.
- 'path_construction: while let Some(RouteGraphNode { node_id, lowest_fee_to_node, total_cltv_delta, value_contribution_msat, path_htlc_minimum_msat, path_penalty_msat, path_length_to_node, .. }) = targets.pop() {
+ 'path_construction: while let Some(RouteGraphNode { node_id, lowest_fee_to_node, total_cltv_delta, mut value_contribution_msat, path_htlc_minimum_msat, path_penalty_msat, path_length_to_node, .. }) = targets.pop() {
// Since we're going payee-to-payer, hitting our node as a target means we should stop
// traversing the graph and arrange the path out of what we found.
// on some channels we already passed (assuming dest->source direction). Here, we
// recompute the fees again, so that if that's the case, we match the currently
// underpaid htlc_minimum_msat with fees.
- payment_path.update_value_and_recompute_fees(cmp::min(value_contribution_msat, final_value_msat));
+ debug_assert_eq!(payment_path.get_value_msat(), value_contribution_msat);
+ value_contribution_msat = cmp::min(value_contribution_msat, final_value_msat);
+ payment_path.update_value_and_recompute_fees(value_contribution_msat);
// Since a path allows to transfer as much value as
// the smallest channel it has ("bottleneck"), we should recompute
.entry((hop.candidate.short_channel_id(), *prev_hop < hop.node_id))
.and_modify(|used_liquidity_msat| *used_liquidity_msat += spent_on_hop_msat)
.or_insert(spent_on_hop_msat);
- if *used_liquidity_msat == hop.candidate.htlc_maximum_msat() {
+ let hop_capacity = hop.candidate.effective_capacity();
+ let hop_max_msat = max_htlc_from_capacity(hop_capacity, channel_saturation_pow_half);
+ if *used_liquidity_msat == hop_max_msat {
// If this path used all of this channel's available liquidity, we know
// this path will not be selected again in the next loop iteration.
prevented_redundant_path_selection = true;
}
- debug_assert!(*used_liquidity_msat <= hop.candidate.htlc_maximum_msat());
+ debug_assert!(*used_liquidity_msat <= hop_max_msat);
}
if !prevented_redundant_path_selection {
// If we weren't capped by hitting a liquidity limit on a channel in the path,
}
if !allow_mpp {
+ if !found_new_path && channel_saturation_pow_half != 0 {
+ channel_saturation_pow_half = 0;
+ continue 'paths_collection;
+ }
// If we don't support MPP, no use trying to gather more value ever.
break 'paths_collection;
}
// iteration.
// In the latter case, making another path finding attempt won't help,
// because we deterministically terminated the search due to low liquidity.
- if already_collected_value_msat >= recommended_value_msat || !found_new_path {
+ if !found_new_path && channel_saturation_pow_half != 0 {
+ channel_saturation_pow_half = 0;
+ } else if already_collected_value_msat >= recommended_value_msat || !found_new_path {
log_trace!(logger, "Have now collected {} msat (seeking {} msat) in paths. Last path loop {} a new path.",
already_collected_value_msat, recommended_value_msat, if found_new_path { "found" } else { "did not find" });
break 'paths_collection;
return Err(LightningError{err: "Failed to find a sufficient route to the given destination".to_owned(), action: ErrorAction::IgnoreError});
}
- // Sort by total fees and take the best paths.
- payment_paths.sort_unstable_by_key(|path| path.get_total_fee_paid_msat());
- if payment_paths.len() > 50 {
- payment_paths.truncate(50);
- }
-
- // Draw multiple sufficient routes by randomly combining the selected paths.
- let mut drawn_routes = Vec::new();
- let mut prng = ChaCha20::new(random_seed_bytes, &[0u8; 12]);
- let mut random_index_bytes = [0u8; ::core::mem::size_of::<usize>()];
-
- let num_permutations = payment_paths.len();
- for _ in 0..num_permutations {
- let mut cur_route = Vec::<PaymentPath>::new();
- let mut aggregate_route_value_msat = 0;
-
- // Step (6).
- // Do a Fisher-Yates shuffle to create a random permutation of the payment paths
- for cur_index in (1..payment_paths.len()).rev() {
- prng.process_in_place(&mut random_index_bytes);
- let random_index = usize::from_be_bytes(random_index_bytes).wrapping_rem(cur_index+1);
- payment_paths.swap(cur_index, random_index);
+ // Step (6).
+ let mut selected_route = payment_paths;
+
+ debug_assert_eq!(selected_route.iter().map(|p| p.get_value_msat()).sum::<u64>(), already_collected_value_msat);
+ let mut overpaid_value_msat = already_collected_value_msat - final_value_msat;
+
+ // First, sort by the cost-per-value of the path, dropping the paths that cost the most for
+ // the value they contribute towards the payment amount.
+ // We sort in descending order as we will remove from the front in `retain`, next.
+ selected_route.sort_unstable_by(|a, b|
+ (((b.get_cost_msat() as u128) << 64) / (b.get_value_msat() as u128))
+ .cmp(&(((a.get_cost_msat() as u128) << 64) / (a.get_value_msat() as u128)))
+ );
+
+ // We should make sure that at least 1 path left.
+ let mut paths_left = selected_route.len();
+ selected_route.retain(|path| {
+ if paths_left == 1 {
+ return true
}
+ let path_value_msat = path.get_value_msat();
+ if path_value_msat <= overpaid_value_msat {
+ overpaid_value_msat -= path_value_msat;
+ paths_left -= 1;
+ return false;
+ }
+ true
+ });
+ debug_assert!(selected_route.len() > 0);
+ if overpaid_value_msat != 0 {
// Step (7).
- for payment_path in &payment_paths {
- cur_route.push(payment_path.clone());
- aggregate_route_value_msat += payment_path.get_value_msat();
- if aggregate_route_value_msat > final_value_msat {
- // Last path likely overpaid. Substract it from the most expensive
- // (in terms of proportional fee) path in this route and recompute fees.
- // This might be not the most economically efficient way, but fewer paths
- // also makes routing more reliable.
- let mut overpaid_value_msat = aggregate_route_value_msat - final_value_msat;
-
- // First, we drop some expensive low-value paths entirely if possible, since fewer
- // paths is better: the payment is less likely to fail. In order to do so, we sort
- // by value and fall back to total fees paid, i.e., in case of equal values we
- // prefer lower cost paths.
- cur_route.sort_unstable_by(|a, b| {
- a.get_value_msat().cmp(&b.get_value_msat())
- // Reverse ordering for cost, so we drop higher-cost paths first
- .then_with(|| b.get_cost_msat().cmp(&a.get_cost_msat()))
- });
-
- // We should make sure that at least 1 path left.
- let mut paths_left = cur_route.len();
- cur_route.retain(|path| {
- if paths_left == 1 {
- return true
- }
- let mut keep = true;
- let path_value_msat = path.get_value_msat();
- if path_value_msat <= overpaid_value_msat {
- keep = false;
- overpaid_value_msat -= path_value_msat;
- paths_left -= 1;
- }
- keep
- });
+ // Now, subtract the remaining overpaid value from the most-expensive path.
+ // TODO: this could also be optimized by also sorting by feerate_per_sat_routed,
+ // so that the sender pays less fees overall. And also htlc_minimum_msat.
+ selected_route.sort_unstable_by(|a, b| {
+ let a_f = a.hops.iter().map(|hop| hop.0.candidate.fees().proportional_millionths as u64).sum::<u64>();
+ let b_f = b.hops.iter().map(|hop| hop.0.candidate.fees().proportional_millionths as u64).sum::<u64>();
+ a_f.cmp(&b_f).then_with(|| b.get_cost_msat().cmp(&a.get_cost_msat()))
+ });
+ let expensive_payment_path = selected_route.first_mut().unwrap();
- if overpaid_value_msat == 0 {
- break;
- }
+ // We already dropped all the paths with value below `overpaid_value_msat` above, thus this
+ // can't go negative.
+ let expensive_path_new_value_msat = expensive_payment_path.get_value_msat() - overpaid_value_msat;
+ expensive_payment_path.update_value_and_recompute_fees(expensive_path_new_value_msat);
+ }
- assert!(cur_route.len() > 0);
-
- // Step (8).
- // Now, subtract the overpaid value from the most-expensive path.
- // TODO: this could also be optimized by also sorting by feerate_per_sat_routed,
- // so that the sender pays less fees overall. And also htlc_minimum_msat.
- cur_route.sort_unstable_by_key(|path| { path.hops.iter().map(|hop| hop.0.candidate.fees().proportional_millionths as u64).sum::<u64>() });
- let expensive_payment_path = cur_route.first_mut().unwrap();
-
- // We already dropped all the small value paths above, meaning all the
- // remaining paths are larger than remaining overpaid_value_msat.
- // Thus, this can't be negative.
- let expensive_path_new_value_msat = expensive_payment_path.get_value_msat() - overpaid_value_msat;
- expensive_payment_path.update_value_and_recompute_fees(expensive_path_new_value_msat);
- break;
- }
+ // Step (8).
+ // Sort by the path itself and combine redundant paths.
+ // Note that we sort by SCIDs alone as its simpler but when combining we have to ensure we
+ // compare both SCIDs and NodeIds as individual nodes may use random aliases causing collisions
+ // across nodes.
+ selected_route.sort_unstable_by_key(|path| {
+ let mut key = [0u64; MAX_PATH_LENGTH_ESTIMATE as usize];
+ debug_assert!(path.hops.len() <= key.len());
+ for (scid, key) in path.hops.iter().map(|h| h.0.candidate.short_channel_id()).zip(key.iter_mut()) {
+ *key = scid;
+ }
+ key
+ });
+ for idx in 0..(selected_route.len() - 1) {
+ if idx + 1 >= selected_route.len() { break; }
+ if iter_equal(selected_route[idx ].hops.iter().map(|h| (h.0.candidate.short_channel_id(), h.0.node_id)),
+ selected_route[idx + 1].hops.iter().map(|h| (h.0.candidate.short_channel_id(), h.0.node_id))) {
+ let new_value = selected_route[idx].get_value_msat() + selected_route[idx + 1].get_value_msat();
+ selected_route[idx].update_value_and_recompute_fees(new_value);
+ selected_route.remove(idx + 1);
}
- drawn_routes.push(cur_route);
}
- // Step (9).
- // Select the best route by lowest total cost.
- drawn_routes.sort_unstable_by_key(|paths| paths.iter().map(|path| path.get_cost_msat()).sum::<u64>());
let mut selected_paths = Vec::<Vec<Result<RouteHop, LightningError>>>::new();
- for payment_path in drawn_routes.first().unwrap() {
+ for payment_path in selected_route {
let mut path = payment_path.hops.iter().map(|(payment_hop, node_features)| {
Ok(RouteHop {
pubkey: PublicKey::from_slice(payment_hop.node_id.as_slice()).map_err(|_| LightningError{err: format!("Public key {:?} is invalid", &payment_hop.node_id), action: ErrorAction::IgnoreAndLog(Level::Trace)})?,
use prelude::*;
use sync::{self, Arc};
+ use core::convert::TryInto;
+
fn get_channel_details(short_channel_id: Option<u64>, node_id: PublicKey,
features: InitFeatures, outbound_capacity_msat: u64) -> channelmanager::ChannelDetails {
channelmanager::ChannelDetails {
cltv_expiry_delta: 42,
htlc_minimum_msat: None,
htlc_maximum_msat: None,
- }])]);
+ }])]).with_max_channel_saturation_power_of_half(0);
// Keep only two paths from us to nodes[2], both with a 99sat HTLC maximum, with one with
// no fee and one with a 1msat fee. Previously, trying to route 100 sats to nodes[2] here
// Get a route for 100 sats and check that we found the MPP route no problem and didn't
// overpay at all.
- let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100_000, 42, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+ let mut route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100_000, 42, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
assert_eq!(route.paths.len(), 2);
- // Paths are somewhat randomly ordered, but:
- // * the first is channel 2 (1 msat fee) -> channel 4 -> channel 42
- // * the second is channel 1 (0 fee, but 99 sat maximum) -> channel 3 -> channel 42
- assert_eq!(route.paths[0][0].short_channel_id, 2);
- assert_eq!(route.paths[0][0].fee_msat, 1);
- assert_eq!(route.paths[0][2].fee_msat, 1_000);
- assert_eq!(route.paths[1][0].short_channel_id, 1);
- assert_eq!(route.paths[1][0].fee_msat, 0);
- assert_eq!(route.paths[1][2].fee_msat, 99_000);
+ route.paths.sort_by_key(|path| path[0].short_channel_id);
+ // Paths are manually ordered ordered by SCID, so:
+ // * the first is channel 1 (0 fee, but 99 sat maximum) -> channel 3 -> channel 42
+ // * the second is channel 2 (1 msat fee) -> channel 4 -> channel 42
+ assert_eq!(route.paths[0][0].short_channel_id, 1);
+ assert_eq!(route.paths[0][0].fee_msat, 0);
+ assert_eq!(route.paths[0][2].fee_msat, 99_000);
+ assert_eq!(route.paths[1][0].short_channel_id, 2);
+ assert_eq!(route.paths[1][0].fee_msat, 1);
+ assert_eq!(route.paths[1][2].fee_msat, 1_000);
assert_eq!(route.get_total_fees(), 1);
assert_eq!(route.get_total_amount(), 100_000);
}
let scorer = test_utils::TestScorer::with_penalty(0);
let keys_manager = test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
let random_seed_bytes = keys_manager.get_secure_random_bytes();
- let payment_params = PaymentParameters::from_node_id(nodes[2]).with_features(InvoiceFeatures::known());
+ let payment_params = PaymentParameters::from_node_id(nodes[2]).with_features(InvoiceFeatures::known())
+ .with_max_channel_saturation_power_of_half(0);
// We need a route consisting of 3 paths:
// From our node to node2 via node0, node7, node1 (three paths one hop each).
assert_eq!(route.paths[0].len(), 1);
assert_eq!(route.paths[1].len(), 1);
+ assert!((route.paths[0][0].short_channel_id == 3 && route.paths[1][0].short_channel_id == 2) ||
+ (route.paths[0][0].short_channel_id == 2 && route.paths[1][0].short_channel_id == 3));
+
assert_eq!(route.paths[0][0].pubkey, nodes[0]);
- assert_eq!(route.paths[0][0].short_channel_id, 3);
assert_eq!(route.paths[0][0].fee_msat, 50_000);
assert_eq!(route.paths[1][0].pubkey, nodes[0]);
- assert_eq!(route.paths[1][0].short_channel_id, 2);
assert_eq!(route.paths[1][0].fee_msat, 50_000);
}
}
}
+ #[test]
+ fn avoids_recently_failed_paths() {
+ // Ensure that the router always avoids all of the `previously_failed_channels` channels by
+ // randomly inserting channels into it until we can't find a route anymore.
+ let (secp_ctx, network, _, _, logger) = build_graph();
+ let (_, our_id, _, nodes) = get_nodes(&secp_ctx);
+ let network_graph = network.read_only();
+
+ let scorer = test_utils::TestScorer::with_penalty(0);
+ let mut payment_params = PaymentParameters::from_node_id(nodes[6]).with_route_hints(last_hops(&nodes))
+ .with_max_path_count(1);
+ let keys_manager = test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
+ let random_seed_bytes = keys_manager.get_secure_random_bytes();
+
+ // We should be able to find a route initially, and then after we fail a few random
+ // channels eventually we won't be able to any longer.
+ assert!(get_route(&our_id, &payment_params, &network_graph, None, 100, 0, Arc::clone(&logger), &scorer, &random_seed_bytes).is_ok());
+ loop {
+ if let Ok(route) = get_route(&our_id, &payment_params, &network_graph, None, 100, 0, Arc::clone(&logger), &scorer, &random_seed_bytes) {
+ for chan in route.paths[0].iter() {
+ assert!(!payment_params.previously_failed_channels.contains(&chan.short_channel_id));
+ }
+ let victim = (u64::from_ne_bytes(random_seed_bytes[0..8].try_into().unwrap()) as usize)
+ % route.paths[0].len();
+ payment_params.previously_failed_channels.push(route.paths[0][victim].short_channel_id);
+ } else { break; }
+ }
+ }
+
#[test]
fn limits_path_length() {
let (secp_ctx, network, _, _, logger) = build_line_graph();
}
}
+ #[test]
+ fn avoids_saturating_channels() {
+ let (secp_ctx, network_graph, gossip_sync, _, logger) = build_graph();
+ let (_, our_id, privkeys, nodes) = get_nodes(&secp_ctx);
+
+ let scorer = ProbabilisticScorer::new(Default::default(), &*network_graph, Arc::clone(&logger));
+
+ // Set the fee on channel 13 to 100% to match channel 4 giving us two equivalent paths (us
+ // -> node 7 -> node2 and us -> node 1 -> node 2) which we should balance over.
+ update_channel(&gossip_sync, &secp_ctx, &privkeys[1], UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 4,
+ timestamp: 2,
+ flags: 0,
+ cltv_expiry_delta: (4 << 4) | 1,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(250_000_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+ update_channel(&gossip_sync, &secp_ctx, &privkeys[7], UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 13,
+ timestamp: 2,
+ flags: 0,
+ cltv_expiry_delta: (13 << 4) | 1,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(250_000_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+
+ let payment_params = PaymentParameters::from_node_id(nodes[2]).with_features(InvoiceFeatures::known());
+ let keys_manager = test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
+ let random_seed_bytes = keys_manager.get_secure_random_bytes();
+ // 100,000 sats is less than the available liquidity on each channel, set above.
+ let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100_000_000, 42, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+ assert_eq!(route.paths.len(), 2);
+ assert!((route.paths[0][1].short_channel_id == 4 && route.paths[1][1].short_channel_id == 13) ||
+ (route.paths[1][1].short_channel_id == 4 && route.paths[0][1].short_channel_id == 13));
+ }
+
#[cfg(not(feature = "no-std"))]
pub(super) fn random_init_seed() -> u64 {
// Because the default HashMap in std pulls OS randomness, we can use it as a (bad) RNG.
///
/// Default value: 250 msat
pub anti_probing_penalty_msat: u64,
+
+ /// This penalty is applied when the amount we're attempting to send over a channel exceeds our
+ /// current estimate of the channel's available liquidity.
+ ///
+ /// Note that in this case all other penalties, including the
+ /// [`liquidity_penalty_multiplier_msat`] and [`amount_penalty_multiplier_msat`]-based
+ /// penalties, as well as the [`base_penalty_msat`] and the [`anti_probing_penalty_msat`], if
+ /// applicable, are still included in the overall penalty.
+ ///
+ /// If you wish to avoid creating paths with such channels entirely, setting this to a value of
+ /// `u64::max_value()` will guarantee that.
+ ///
+ /// Default value: 1_0000_0000_000 msat (1 Bitcoin)
+ ///
+ /// [`liquidity_penalty_multiplier_msat`]: Self::liquidity_penalty_multiplier_msat
+ /// [`amount_penalty_multiplier_msat`]: Self::amount_penalty_multiplier_msat
+ /// [`base_penalty_msat`]: Self::base_penalty_msat
+ /// [`anti_probing_penalty_msat`]: Self::anti_probing_penalty_msat
+ pub considered_impossible_penalty_msat: u64,
}
/// Accounting for channel liquidity balance uncertainty.
amount_penalty_multiplier_msat: 0,
manual_node_penalties: HashMap::new(),
anti_probing_penalty_msat: 0,
+ considered_impossible_penalty_msat: 0,
}
}
amount_penalty_multiplier_msat: 256,
manual_node_penalties: HashMap::new(),
anti_probing_penalty_msat: 250,
+ considered_impossible_penalty_msat: 1_0000_0000_000,
}
}
}
if amount_msat <= min_liquidity_msat {
0
} else if amount_msat >= max_liquidity_msat {
- if amount_msat > max_liquidity_msat {
- u64::max_value()
- } else if max_liquidity_msat != self.capacity_msat {
- // Avoid using the failed channel on retry.
- u64::max_value()
- } else {
- // Equivalent to hitting the else clause below with the amount equal to the
- // effective capacity and without any certainty on the liquidity upper bound.
- let negative_log10_times_2048 = NEGATIVE_LOG10_UPPER_BOUND * 2048;
- self.combined_penalty_msat(amount_msat, negative_log10_times_2048, params)
- }
+ // Equivalent to hitting the else clause below with the amount equal to the effective
+ // capacity and without any certainty on the liquidity upper bound, plus the
+ // impossibility penalty.
+ let negative_log10_times_2048 = NEGATIVE_LOG10_UPPER_BOUND * 2048;
+ self.combined_penalty_msat(amount_msat, negative_log10_times_2048, params)
+ .saturating_add(params.considered_impossible_penalty_msat)
} else {
let numerator = (max_liquidity_msat - amount_msat).saturating_add(1);
let denominator = (max_liquidity_msat - min_liquidity_msat).saturating_add(1);
assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0);
let usage = ChannelUsage { amount_msat: 102_400, ..usage };
assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 47);
- let usage = ChannelUsage { amount_msat: 1_024_000, ..usage };
+ let usage = ChannelUsage { amount_msat: 1_023_999, ..usage };
assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 2_000);
let usage = ChannelUsage {
let network_graph = network_graph(&logger);
let params = ProbabilisticScoringParameters {
liquidity_penalty_multiplier_msat: 1_000,
+ considered_impossible_penalty_msat: u64::max_value(),
..ProbabilisticScoringParameters::zero_penalty()
};
let scorer = ProbabilisticScorer::new(params, &network_graph, &logger)
let network_graph = network_graph(&logger);
let params = ProbabilisticScoringParameters {
liquidity_penalty_multiplier_msat: 1_000,
+ considered_impossible_penalty_msat: u64::max_value(),
..ProbabilisticScoringParameters::zero_penalty()
};
let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
let params = ProbabilisticScoringParameters {
liquidity_penalty_multiplier_msat: 1_000,
liquidity_offset_half_life: Duration::from_secs(10),
+ considered_impossible_penalty_msat: u64::max_value(),
..ProbabilisticScoringParameters::zero_penalty()
};
let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
let usage = ChannelUsage {
amount_msat: 0,
inflight_htlc_msat: 0,
- effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: Some(1_000) },
+ effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: Some(1_024) },
};
assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0);
- let usage = ChannelUsage { amount_msat: 1_024, ..usage };
+ let usage = ChannelUsage { amount_msat: 1_023, ..usage };
assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 2_000);
scorer.payment_path_failed(&payment_path_for_amount(768).iter().collect::<Vec<_>>(), 42);
let usage = ChannelUsage { amount_msat: 1_023, ..usage };
assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 2_000);
let usage = ChannelUsage { amount_msat: 1_024, ..usage };
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 2_000);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value());
// Fully decay liquidity upper bound.
SinceEpoch::advance(Duration::from_secs(10));
let usage = ChannelUsage { amount_msat: 0, ..usage };
assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0);
let usage = ChannelUsage { amount_msat: 1_024, ..usage };
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 2_000);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value());
SinceEpoch::advance(Duration::from_secs(10));
let usage = ChannelUsage { amount_msat: 0, ..usage };
assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0);
let usage = ChannelUsage { amount_msat: 1_024, ..usage };
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 2_000);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value());
}
#[test]
let params = ProbabilisticScoringParameters {
liquidity_penalty_multiplier_msat: 1_000,
liquidity_offset_half_life: Duration::from_secs(10),
+ considered_impossible_penalty_msat: u64::max_value(),
..ProbabilisticScoringParameters::zero_penalty()
};
let mut scorer = ProbabilisticScorer::new(params.clone(), &network_graph, &logger);
let params = ProbabilisticScoringParameters {
liquidity_penalty_multiplier_msat: 1_000,
liquidity_offset_half_life: Duration::from_secs(10),
+ considered_impossible_penalty_msat: u64::max_value(),
..ProbabilisticScoringParameters::zero_penalty()
};
let mut scorer = ProbabilisticScorer::new(params.clone(), &network_graph, &logger);
fn accounts_for_inflight_htlc_usage() {
let logger = TestLogger::new();
let network_graph = network_graph(&logger);
- let params = ProbabilisticScoringParameters::default();
+ let params = ProbabilisticScoringParameters {
+ considered_impossible_penalty_msat: u64::max_value(),
+ ..ProbabilisticScoringParameters::zero_penalty()
+ };
let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
let source = source_node_id();
let target = target_node_id();