/// suggestion.
const STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS: u64 = 60 * 60 * 24 * 14;
+/// We stop tracking the removal of permanently failed nodes and channels one week after removal
+const REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS: u64 = 60 * 60 * 24 * 7;
+
/// The maximum number of extra bytes which we do not understand in a gossip message before we will
/// refuse to relay the message.
const MAX_EXCESS_BYTES_FOR_RELAY: usize = 1024;
// Lock order: channels -> nodes
channels: RwLock<BTreeMap<u64, ChannelInfo>>,
nodes: RwLock<BTreeMap<NodeId, NodeInfo>>,
+ // Lock order: removed_channels -> removed_nodes
+ //
+ // NOTE: In the following `removed_*` maps, we use seconds since UNIX epoch to track time instead
+ // of `std::time::Instant`s for a few reasons:
+ // * We want it to be possible to do tracking in no-std environments where we can compare
+ // a provided current UNIX timestamp with the time at which we started tracking.
+ // * In the future, if we decide to persist these maps, they will already be serializable.
+ // * Although we lose out on the platform's monotonic clock, the system clock in a std
+ // environment should be practical over the time period we are considering (on the order of a
+ // week).
+ //
+ /// Keeps track of short channel IDs for channels we have explicitly removed due to permanent
+ /// failure so that we don't resync them from gossip. Each SCID is mapped to the time (in seconds)
+ /// it was removed so that once some time passes, we can potentially resync it from gossip again.
+ removed_channels: Mutex<HashMap<u64, Option<u64>>>,
+ /// Keeps track of `NodeId`s we have explicitly removed due to permanent failure so that we don't
+ /// resync them from gossip. Each `NodeId` is mapped to the time (in seconds) it was removed so
+ /// that once some time passes, we can potentially resync it from gossip again.
+ removed_nodes: Mutex<HashMap<NodeId, Option<u64>>>,
}
/// A read-only view of [`NetworkGraph`].
is_permanent: bool,
},
/// An error indicating that a node failed to route a payment, which should be applied via
- /// [`NetworkGraph::node_failed`].
+ /// [`NetworkGraph::node_failed_permanent`] if permanent.
NodeFailure {
/// The node id of the failed node.
node_id: PublicKey,
self.channel_failed(short_channel_id, is_permanent);
},
NetworkUpdate::NodeFailure { ref node_id, is_permanent } => {
- let action = if is_permanent { "Removing" } else { "Disabling" };
- log_debug!(self.logger, "{} node graph entry for {} due to a payment failure.", action, node_id);
- self.node_failed(node_id, is_permanent);
+ if is_permanent {
+ log_debug!(self.logger,
+ "Removed node graph entry for {} due to a payment failure.", log_pubkey!(node_id));
+ self.node_failed_permanent(node_id);
+ };
},
}
}
channels: RwLock::new(channels),
nodes: RwLock::new(nodes),
last_rapid_gossip_sync_timestamp: Mutex::new(last_rapid_gossip_sync_timestamp),
+ removed_nodes: Mutex::new(HashMap::new()),
+ removed_channels: Mutex::new(HashMap::new()),
})
}
}
channels: RwLock::new(BTreeMap::new()),
nodes: RwLock::new(BTreeMap::new()),
last_rapid_gossip_sync_timestamp: Mutex::new(None),
+ removed_channels: Mutex::new(HashMap::new()),
+ removed_nodes: Mutex::new(HashMap::new()),
}
}
return Err(LightningError{err: "Channel announcement node had a channel with itself".to_owned(), action: ErrorAction::IgnoreError});
}
+ let node_one = NodeId::from_pubkey(&msg.node_id_1);
+ let node_two = NodeId::from_pubkey(&msg.node_id_2);
+
{
let channels = self.channels.read().unwrap();
// We use the Node IDs rather than the bitcoin_keys to check for "equivalence"
// as we didn't (necessarily) store the bitcoin keys, and we only really care
// if the peers on the channel changed anyway.
- if NodeId::from_pubkey(&msg.node_id_1) == chan.node_one && NodeId::from_pubkey(&msg.node_id_2) == chan.node_two {
+ if node_one == chan.node_one && node_two == chan.node_two {
return Err(LightningError {
err: "Already have chain-validated channel".to_owned(),
action: ErrorAction::IgnoreDuplicateGossip
}
}
+ {
+ let removed_channels = self.removed_channels.lock().unwrap();
+ let removed_nodes = self.removed_nodes.lock().unwrap();
+ if removed_channels.contains_key(&msg.short_channel_id) ||
+ removed_nodes.contains_key(&node_one) ||
+ removed_nodes.contains_key(&node_two) {
+ return Err(LightningError{
+ err: format!("Channel with SCID {} or one of its nodes was removed from our network graph recently", &msg.short_channel_id),
+ action: ErrorAction::IgnoreAndLog(Level::Gossip)});
+ }
+ }
+
let utxo_value = match &chain_access {
&None => {
// Tentatively accept, potentially exposing us to DoS attacks
let chan_info = ChannelInfo {
features: msg.features.clone(),
- node_one: NodeId::from_pubkey(&msg.node_id_1),
+ node_one,
one_to_two: None,
- node_two: NodeId::from_pubkey(&msg.node_id_2),
+ node_two,
two_to_one: None,
capacity_sats: utxo_value,
announcement_message: if msg.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY
/// May cause the removal of nodes too, if this was their last channel.
/// If not permanent, makes channels unavailable for routing.
pub fn channel_failed(&self, short_channel_id: u64, is_permanent: bool) {
+ #[cfg(feature = "std")]
+ let current_time_unix = Some(SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs());
+ #[cfg(not(feature = "std"))]
+ let current_time_unix = None;
+
let mut channels = self.channels.write().unwrap();
if is_permanent {
if let Some(chan) = channels.remove(&short_channel_id) {
let mut nodes = self.nodes.write().unwrap();
+ self.removed_channels.lock().unwrap().insert(short_channel_id, current_time_unix);
Self::remove_channel_in_nodes(&mut nodes, &chan, short_channel_id);
}
} else {
}
}
- /// Marks a node in the graph as failed.
- pub fn node_failed(&self, _node_id: &PublicKey, is_permanent: bool) {
- if is_permanent {
- // TODO: Wholly remove the node
- } else {
- // TODO: downgrade the node
+ /// Marks a node in the graph as permanently failed, effectively removing it and its channels
+ /// from local storage.
+ pub fn node_failed_permanent(&self, node_id: &PublicKey) {
+ #[cfg(feature = "std")]
+ let current_time_unix = Some(SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs());
+ #[cfg(not(feature = "std"))]
+ let current_time_unix = None;
+
+ let node_id = NodeId::from_pubkey(node_id);
+ let mut channels = self.channels.write().unwrap();
+ let mut nodes = self.nodes.write().unwrap();
+ let mut removed_channels = self.removed_channels.lock().unwrap();
+ let mut removed_nodes = self.removed_nodes.lock().unwrap();
+
+ if let Some(node) = nodes.remove(&node_id) {
+ for scid in node.channels.iter() {
+ if let Some(chan_info) = channels.remove(scid) {
+ let other_node_id = if node_id == chan_info.node_one { chan_info.node_two } else { chan_info.node_one };
+ if let BtreeEntry::Occupied(mut other_node_entry) = nodes.entry(other_node_id) {
+ other_node_entry.get_mut().channels.retain(|chan_id| {
+ *scid != *chan_id
+ });
+ if other_node_entry.get().channels.is_empty() {
+ other_node_entry.remove_entry();
+ }
+ }
+ removed_channels.insert(*scid, current_time_unix);
+ }
+ }
+ removed_nodes.insert(node_id, current_time_unix);
}
}
/// Note that for users of the `lightning-background-processor` crate this method may be
/// automatically called regularly for you.
///
+ /// This method will also cause us to stop tracking removed nodes and channels if they have been
+ /// in the map for a while so that these can be resynced from gossip in the future.
+ ///
/// This method is only available with the `std` feature. See
- /// [`NetworkGraph::remove_stale_channels_with_time`] for `no-std` use.
- pub fn remove_stale_channels(&self) {
+ /// [`NetworkGraph::remove_stale_channels_and_tracking_with_time`] for `no-std` use.
+ pub fn remove_stale_channels_and_tracking(&self) {
let time = SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs();
- self.remove_stale_channels_with_time(time);
+ self.remove_stale_channels_and_tracking_with_time(time);
}
/// Removes information about channels that we haven't heard any updates about in some time.
/// updates every two weeks, the non-normative section of BOLT 7 currently suggests that
/// pruning occur for updates which are at least two weeks old, which we implement here.
///
+ /// This method will also cause us to stop tracking removed nodes and channels if they have been
+ /// in the map for a while so that these can be resynced from gossip in the future.
+ ///
/// This function takes the current unix time as an argument. For users with the `std` feature
- /// enabled, [`NetworkGraph::remove_stale_channels`] may be preferable.
- pub fn remove_stale_channels_with_time(&self, current_time_unix: u64) {
+ /// enabled, [`NetworkGraph::remove_stale_channels_and_tracking`] may be preferable.
+ pub fn remove_stale_channels_and_tracking_with_time(&self, current_time_unix: u64) {
let mut channels = self.channels.write().unwrap();
// Time out if we haven't received an update in at least 14 days.
if current_time_unix > u32::max_value() as u64 { return; } // Remove by 2106
Self::remove_channel_in_nodes(&mut nodes, &info, scid);
}
}
+
+ let should_keep_tracking = |time: &mut Option<u64>| {
+ if let Some(time) = time {
+ current_time_unix.saturating_sub(*time) < REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS
+ } else {
+ // NOTE: In the case of no-std, we won't have access to the current UNIX time at the time of removal,
+ // so we'll just set the removal time here to the current UNIX time on the very next invocation
+ // of this function.
+ #[cfg(feature = "no-std")]
+ {
+ let mut tracked_time = Some(current_time_unix);
+ core::mem::swap(time, &mut tracked_time);
+ return true;
+ }
+ #[allow(unreachable_code)]
+ false
+ }};
+
+ self.removed_channels.lock().unwrap().retain(|_, time| should_keep_tracking(time));
+ self.removed_nodes.lock().unwrap().retain(|_, time| should_keep_tracking(time));
}
/// For an already known (from announcement) channel, update info about one of the directions
use util::events::{Event, EventHandler, MessageSendEvent, MessageSendEventsProvider};
use util::scid_utils::scid_from_parts;
+ use crate::routing::gossip::REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS;
use super::STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS;
use bitcoin::hashes::sha256d::Hash as Sha256dHash;
Err(e) => assert_eq!(e.err, "Already have chain-validated channel")
};
+ #[cfg(feature = "std")]
+ {
+ use std::time::{SystemTime, UNIX_EPOCH};
+
+ let tracking_time = SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs();
+ // Mark a node as permanently failed so it's tracked as removed.
+ gossip_sync.network_graph().node_failed_permanent(&PublicKey::from_secret_key(&secp_ctx, node_1_privkey));
+
+ // Return error and ignore valid channel announcement if one of the nodes has been tracked as removed.
+ let valid_announcement = get_signed_channel_announcement(|unsigned_announcement| {
+ unsigned_announcement.short_channel_id += 3;
+ }, node_1_privkey, node_2_privkey, &secp_ctx);
+ match gossip_sync.handle_channel_announcement(&valid_announcement) {
+ Ok(_) => panic!(),
+ Err(e) => assert_eq!(e.err, "Channel with SCID 3 or one of its nodes was removed from our network graph recently")
+ }
+
+ gossip_sync.network_graph().remove_stale_channels_and_tracking_with_time(tracking_time + REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS);
+
+ // The above channel announcement should be handled as per normal now.
+ match gossip_sync.handle_channel_announcement(&valid_announcement) {
+ Ok(res) => assert!(res),
+ _ => panic!()
+ }
+ }
+
// Don't relay valid channels with excess data
let valid_announcement = get_signed_channel_announcement(|unsigned_announcement| {
- unsigned_announcement.short_channel_id += 3;
+ unsigned_announcement.short_channel_id += 4;
unsigned_announcement.excess_data.resize(MAX_EXCESS_BYTES_FOR_RELAY + 1, 0);
}, node_1_privkey, node_2_privkey, &secp_ctx);
match gossip_sync.handle_channel_announcement(&valid_announcement) {
let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap();
let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap();
+ let node_2_id = PublicKey::from_secret_key(&secp_ctx, node_2_privkey);
{
// There is no nodes in the table at the beginning.
assert_eq!(network_graph.read_only().channels().len(), 0);
// Nodes are also deleted because there are no associated channels anymore
assert_eq!(network_graph.read_only().nodes().len(), 0);
- // TODO: Test NetworkUpdate::NodeFailure, which is not implemented yet.
+
+ {
+ // Get a new network graph since we don't want to track removed nodes in this test with "std"
+ let network_graph = NetworkGraph::new(genesis_hash, &logger);
+
+ // Announce a channel to test permanent node failure
+ let valid_channel_announcement = get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx);
+ let short_channel_id = valid_channel_announcement.contents.short_channel_id;
+ let chain_source: Option<&test_utils::TestChainSource> = None;
+ assert!(network_graph.update_channel_from_announcement(&valid_channel_announcement, &chain_source).is_ok());
+ assert!(network_graph.read_only().channels().get(&short_channel_id).is_some());
+
+ // Non-permanent node failure does not delete any nodes or channels
+ network_graph.handle_event(&Event::PaymentPathFailed {
+ payment_id: None,
+ payment_hash: PaymentHash([0; 32]),
+ payment_failed_permanently: false,
+ all_paths_failed: true,
+ path: vec![],
+ network_update: Some(NetworkUpdate::NodeFailure {
+ node_id: node_2_id,
+ is_permanent: false,
+ }),
+ short_channel_id: None,
+ retry: None,
+ error_code: None,
+ error_data: None,
+ });
+
+ assert!(network_graph.read_only().channels().get(&short_channel_id).is_some());
+ assert!(network_graph.read_only().nodes().get(&NodeId::from_pubkey(&node_2_id)).is_some());
+
+ // Permanent node failure deletes node and its channels
+ network_graph.handle_event(&Event::PaymentPathFailed {
+ payment_id: None,
+ payment_hash: PaymentHash([0; 32]),
+ payment_failed_permanently: false,
+ all_paths_failed: true,
+ path: vec![],
+ network_update: Some(NetworkUpdate::NodeFailure {
+ node_id: node_2_id,
+ is_permanent: true,
+ }),
+ short_channel_id: None,
+ retry: None,
+ error_code: None,
+ error_data: None,
+ });
+
+ assert_eq!(network_graph.read_only().nodes().len(), 0);
+ // Channels are also deleted because the associated node has been deleted
+ assert_eq!(network_graph.read_only().channels().len(), 0);
+ }
}
#[test]
fn test_channel_timeouts() {
- // Test the removal of channels with `remove_stale_channels`.
+ // Test the removal of channels with `remove_stale_channels_and_tracking`.
let logger = test_utils::TestLogger::new();
let chain_source = test_utils::TestChainSource::new(Network::Testnet);
let genesis_hash = genesis_block(Network::Testnet).header.block_hash();
assert!(gossip_sync.handle_channel_update(&valid_channel_update).is_ok());
assert!(network_graph.read_only().channels().get(&short_channel_id).unwrap().one_to_two.is_some());
- network_graph.remove_stale_channels_with_time(100 + STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS);
+ network_graph.remove_stale_channels_and_tracking_with_time(100 + STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS);
assert_eq!(network_graph.read_only().channels().len(), 1);
assert_eq!(network_graph.read_only().nodes().len(), 2);
- network_graph.remove_stale_channels_with_time(101 + STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS);
+ network_graph.remove_stale_channels_and_tracking_with_time(101 + STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS);
#[cfg(feature = "std")]
{
// In std mode, a further check is performed before fully removing the channel -
use std::time::{SystemTime, UNIX_EPOCH};
let announcement_time = SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs();
- network_graph.remove_stale_channels_with_time(announcement_time + 1 + STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS);
+ network_graph.remove_stale_channels_and_tracking_with_time(announcement_time + 1 + STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS);
}
assert_eq!(network_graph.read_only().channels().len(), 0);
assert_eq!(network_graph.read_only().nodes().len(), 0);
+
+ #[cfg(feature = "std")]
+ {
+ use std::time::{SystemTime, UNIX_EPOCH};
+
+ let tracking_time = SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs();
+
+ // Clear tracked nodes and channels for clean slate
+ network_graph.removed_channels.lock().unwrap().clear();
+ network_graph.removed_nodes.lock().unwrap().clear();
+
+ // Add a channel and nodes from channel announcement. So our network graph will
+ // now only consist of two nodes and one channel between them.
+ assert!(network_graph.update_channel_from_announcement(
+ &valid_channel_announcement, &chain_source).is_ok());
+
+ // Mark the channel as permanently failed. This will also remove the two nodes
+ // and all of the entries will be tracked as removed.
+ network_graph.channel_failed(short_channel_id, true);
+
+ // Should not remove from tracking if insufficient time has passed
+ network_graph.remove_stale_channels_and_tracking_with_time(
+ tracking_time + REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS - 1);
+ assert_eq!(network_graph.removed_channels.lock().unwrap().len(), 1);
+
+ // Provide a later time so that sufficient time has passed
+ network_graph.remove_stale_channels_and_tracking_with_time(
+ tracking_time + REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS);
+ assert!(network_graph.removed_channels.lock().unwrap().is_empty());
+ assert!(network_graph.removed_nodes.lock().unwrap().is_empty());
+ }
+
+ #[cfg(not(feature = "std"))]
+ {
+ // When we don't have access to the system clock, the time we started tracking removal will only
+ // be that provided by the first call to `remove_stale_channels_and_tracking_with_time`. Hence,
+ // only if sufficient time has passed after that first call, will the next call remove it from
+ // tracking.
+ let removal_time = 1664619654;
+
+ // Clear removed nodes and channels for clean slate
+ network_graph.removed_channels.lock().unwrap().clear();
+ network_graph.removed_nodes.lock().unwrap().clear();
+
+ // Add a channel and nodes from channel announcement. So our network graph will
+ // now only consist of two nodes and one channel between them.
+ assert!(network_graph.update_channel_from_announcement(
+ &valid_channel_announcement, &chain_source).is_ok());
+
+ // Mark the channel as permanently failed. This will also remove the two nodes
+ // and all of the entries will be tracked as removed.
+ network_graph.channel_failed(short_channel_id, true);
+
+ // The first time we call the following, the channel will have a removal time assigned.
+ network_graph.remove_stale_channels_and_tracking_with_time(removal_time);
+ assert_eq!(network_graph.removed_channels.lock().unwrap().len(), 1);
+
+ // Provide a later time so that sufficient time has passed
+ network_graph.remove_stale_channels_and_tracking_with_time(
+ removal_time + REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS);
+ assert!(network_graph.removed_channels.lock().unwrap().is_empty());
+ assert!(network_graph.removed_nodes.lock().unwrap().is_empty());
+ }
}
#[test]
use util::time::Time;
use prelude::*;
-use core::fmt;
+use core::{cmp, fmt};
use core::cell::{RefCell, RefMut};
+use core::convert::TryInto;
use core::ops::{Deref, DerefMut};
use core::time::Duration;
use io::{self, Read};
pub base_penalty_amount_multiplier_msat: u64,
/// A multiplier used in conjunction with the negative `log10` of the channel's success
- /// probability for a payment to determine the liquidity penalty.
+ /// probability for a payment, as determined by our latest estimates of the channel's
+ /// liquidity, to determine the liquidity penalty.
///
/// The penalty is based in part on the knowledge learned from prior successful and unsuccessful
/// payments. This knowledge is decayed over time based on [`liquidity_offset_half_life`]. The
/// uncertainty bounds of the channel liquidity balance. Amounts above the upper bound will
/// result in a `u64::max_value` penalty, however.
///
- /// Default value: 40,000 msat
+ /// Default value: 30,000 msat
///
/// [`liquidity_offset_half_life`]: Self::liquidity_offset_half_life
pub liquidity_penalty_multiplier_msat: u64,
pub liquidity_offset_half_life: Duration,
/// A multiplier used in conjunction with a payment amount and the negative `log10` of the
- /// channel's success probability for the payment to determine the amount penalty.
+ /// channel's success probability for the payment, as determined by our latest estimates of the
+ /// channel's liquidity, to determine the amount penalty.
///
/// The purpose of the amount penalty is to avoid having fees dominate the channel cost (i.e.,
/// fees plus penalty) for large payments. The penalty is computed as the product of this
/// probabilities, the multiplier will have a decreasing effect as the negative `log10` will
/// fall below `1`.
///
- /// Default value: 256 msat
+ /// Default value: 192 msat
pub liquidity_penalty_amount_multiplier_msat: u64,
+ /// A multiplier used in conjunction with the negative `log10` of the channel's success
+ /// probability for the payment, as determined based on the history of our estimates of the
+ /// channel's available liquidity, to determine a penalty.
+ ///
+ /// This penalty is similar to [`liquidity_penalty_multiplier_msat`], however, instead of using
+ /// only our latest estimate for the current liquidity available in the channel, it estimates
+ /// success probability based on the estimated liquidity available in the channel through
+ /// history. Specifically, every time we update our liquidity bounds on a given channel, we
+ /// track which of several buckets those bounds fall into, exponentially decaying the
+ /// probability of each bucket as new samples are added.
+ ///
+ /// Default value: 10,000 msat
+ ///
+ /// [`liquidity_penalty_multiplier_msat`]: Self::liquidity_penalty_multiplier_msat
+ pub historical_liquidity_penalty_multiplier_msat: u64,
+
+ /// A multiplier used in conjunction with the payment amount and the negative `log10` of the
+ /// channel's success probability for the payment, as determined based on the history of our
+ /// estimates of the channel's available liquidity, to determine a penalty.
+ ///
+ /// The purpose of the amount penalty is to avoid having fees dominate the channel cost for
+ /// large payments. The penalty is computed as the product of this multiplier and the `2^20`ths
+ /// of the payment amount, weighted by the negative `log10` of the success probability.
+ ///
+ /// This penalty is similar to [`liquidity_penalty_amount_multiplier_msat`], however, instead
+ /// of using only our latest estimate for the current liquidity available in the channel, it
+ /// estimates success probability based on the estimated liquidity available in the channel
+ /// through history. Specifically, every time we update our liquidity bounds on a given
+ /// channel, we track which of several buckets those bounds fall into, exponentially decaying
+ /// the probability of each bucket as new samples are added.
+ ///
+ /// Default value: 64 msat
+ ///
+ /// [`liquidity_penalty_amount_multiplier_msat`]: Self::liquidity_penalty_amount_multiplier_msat
+ pub historical_liquidity_penalty_amount_multiplier_msat: u64,
+
+ /// If we aren't learning any new datapoints for a channel, the historical liquidity bounds
+ /// tracking can simply live on with increasingly stale data. Instead, when a channel has not
+ /// seen a liquidity estimate update for this amount of time, the historical datapoints are
+ /// decayed by half.
+ ///
+ /// Note that after 16 or more half lives all historical data will be completely gone.
+ ///
+ /// Default value: 14 days
+ pub historical_no_updates_half_life: Duration,
+
/// Manual penalties used for the given nodes. Allows to set a particular penalty for a given
/// node. Note that a manual penalty of `u64::max_value()` means the node would not ever be
/// considered during path finding.
pub considered_impossible_penalty_msat: u64,
}
+/// Tracks the historical state of a distribution as a weighted average of how much time was spent
+/// in each of 8 buckets.
+#[derive(Clone, Copy)]
+struct HistoricalBucketRangeTracker {
+ buckets: [u16; 8],
+}
+
+impl HistoricalBucketRangeTracker {
+ fn new() -> Self { Self { buckets: [0; 8] } }
+ fn track_datapoint(&mut self, bucket_idx: u8) {
+ // We have 8 leaky buckets for min and max liquidity. Each bucket tracks the amount of time
+ // we spend in each bucket as a 16-bit fixed-point number with a 5 bit fractional part.
+ //
+ // Each time we update our liquidity estimate, we add 32 (1.0 in our fixed-point system) to
+ // the buckets for the current min and max liquidity offset positions.
+ //
+ // We then decay each bucket by multiplying by 2047/2048 (avoiding dividing by a
+ // non-power-of-two). This ensures we can't actually overflow the u16 - when we get to
+ // 63,457 adding 32 and decaying by 2047/2048 leaves us back at 63,457.
+ //
+ // In total, this allows us to track data for the last 8,000 or so payments across a given
+ // channel.
+ //
+ // These constants are a balance - we try to fit in 2 bytes per bucket to reduce overhead,
+ // and need to balance having more bits in the decimal part (to ensure decay isn't too
+ // non-linear) with having too few bits in the mantissa, causing us to not store very many
+ // datapoints.
+ //
+ // The constants were picked experimentally, selecting a decay amount that restricts us
+ // from overflowing buckets without having to cap them manually.
+ debug_assert!(bucket_idx < 8);
+ if bucket_idx < 8 {
+ for e in self.buckets.iter_mut() {
+ *e = ((*e as u32) * 2047 / 2048) as u16;
+ }
+ self.buckets[bucket_idx as usize] = self.buckets[bucket_idx as usize].saturating_add(32);
+ }
+ }
+ /// Decay all buckets by the given number of half-lives. Used to more aggressively remove old
+ /// datapoints as we receive newer information.
+ fn time_decay_data(&mut self, half_lives: u32) {
+ for e in self.buckets.iter_mut() {
+ *e = e.checked_shr(half_lives).unwrap_or(0);
+ }
+ }
+}
+
+impl_writeable_tlv_based!(HistoricalBucketRangeTracker, { (0, buckets, required) });
+
+struct HistoricalMinMaxBuckets<'a> {
+ min_liquidity_offset_history: &'a HistoricalBucketRangeTracker,
+ max_liquidity_offset_history: &'a HistoricalBucketRangeTracker,
+}
+
+impl HistoricalMinMaxBuckets<'_> {
+ #[inline]
+ fn calculate_success_probability_times_billion(&self, required_decays: u32, payment_amt_64th_bucket: u8) -> Option<u64> {
+ // If historical penalties are enabled, calculate the penalty by walking the set of
+ // historical liquidity bucket (min, max) combinations (where min_idx < max_idx) and, for
+ // each, calculate the probability of success given our payment amount, then total the
+ // weighted average probability of success.
+ //
+ // We use a sliding scale to decide which point within a given bucket will be compared to
+ // the amount being sent - for lower-bounds, the amount being sent is compared to the lower
+ // edge of the first bucket (i.e. zero), but compared to the upper 7/8ths of the last
+ // bucket (i.e. 9 times the index, or 63), with each bucket in between increasing the
+ // comparison point by 1/64th. For upper-bounds, the same applies, however with an offset
+ // of 1/64th (i.e. starting at one and ending at 64). This avoids failing to assign
+ // penalties to channels at the edges.
+ //
+ // If we used the bottom edge of buckets, we'd end up never assigning any penalty at all to
+ // such a channel when sending less than ~0.19% of the channel's capacity (e.g. ~200k sats
+ // for a 1 BTC channel!).
+ //
+ // If we used the middle of each bucket we'd never assign any penalty at all when sending
+ // less than 1/16th of a channel's capacity, or 1/8th if we used the top of the bucket.
+ let mut total_valid_points_tracked = 0;
+
+ // Rather than actually decaying the individual buckets, which would lose precision, we
+ // simply track whether all buckets would be decayed to zero, in which case we treat it as
+ // if we had no data.
+ let mut is_fully_decayed = true;
+ let mut check_track_bucket_contains_undecayed_points =
+ |bucket_val: u16| if bucket_val.checked_shr(required_decays).unwrap_or(0) > 0 { is_fully_decayed = false; };
+
+ for (min_idx, min_bucket) in self.min_liquidity_offset_history.buckets.iter().enumerate() {
+ check_track_bucket_contains_undecayed_points(*min_bucket);
+ for max_bucket in self.max_liquidity_offset_history.buckets.iter().take(8 - min_idx) {
+ total_valid_points_tracked += (*min_bucket as u64) * (*max_bucket as u64);
+ check_track_bucket_contains_undecayed_points(*max_bucket);
+ }
+ }
+ // If the total valid points is smaller than 1.0 (i.e. 32 in our fixed-point scheme), treat
+ // it as if we were fully decayed.
+ if total_valid_points_tracked.checked_shr(required_decays).unwrap_or(0) < 32*32 || is_fully_decayed {
+ return None;
+ }
+
+ let mut cumulative_success_prob_times_billion = 0;
+ for (min_idx, min_bucket) in self.min_liquidity_offset_history.buckets.iter().enumerate() {
+ for (max_idx, max_bucket) in self.max_liquidity_offset_history.buckets.iter().enumerate().take(8 - min_idx) {
+ let bucket_prob_times_million = (*min_bucket as u64) * (*max_bucket as u64)
+ * 1024 * 1024 / total_valid_points_tracked;
+ let min_64th_bucket = min_idx as u8 * 9;
+ let max_64th_bucket = (7 - max_idx as u8) * 9 + 1;
+ if payment_amt_64th_bucket > max_64th_bucket {
+ // Success probability 0, the payment amount is above the max liquidity
+ } else if payment_amt_64th_bucket <= min_64th_bucket {
+ cumulative_success_prob_times_billion += bucket_prob_times_million * 1024;
+ } else {
+ cumulative_success_prob_times_billion += bucket_prob_times_million *
+ ((max_64th_bucket - payment_amt_64th_bucket) as u64) * 1024 /
+ ((max_64th_bucket - min_64th_bucket) as u64);
+ }
+ }
+ }
+
+ Some(cumulative_success_prob_times_billion)
+ }
+}
+
/// Accounting for channel liquidity balance uncertainty.
///
/// Direction is defined in terms of [`NodeId`] partial ordering, where the source node is the
/// Time when the liquidity bounds were last modified.
last_updated: T,
+
+ min_liquidity_offset_history: HistoricalBucketRangeTracker,
+ max_liquidity_offset_history: HistoricalBucketRangeTracker,
}
/// A snapshot of [`ChannelLiquidity`] in one direction assuming a certain channel capacity and
/// decayed with a given half life.
-struct DirectedChannelLiquidity<L: Deref<Target = u64>, T: Time, U: Deref<Target = T>> {
+struct DirectedChannelLiquidity<'a, L: Deref<Target = u64>, BRT: Deref<Target = HistoricalBucketRangeTracker>, T: Time, U: Deref<Target = T>> {
min_liquidity_offset_msat: L,
max_liquidity_offset_msat: L,
+ min_liquidity_offset_history: BRT,
+ max_liquidity_offset_history: BRT,
capacity_msat: u64,
last_updated: U,
now: T,
- half_life: Duration,
+ params: &'a ProbabilisticScoringParameters,
}
impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> ProbabilisticScorerUsingTime<G, L, T> where L::Target: Logger {
let log_direction = |source, target| {
if let Some((directed_info, _)) = chan_debug.as_directed_to(target) {
let amt = directed_info.effective_capacity().as_msat();
- let dir_liq = liq.as_directed(source, target, amt, self.params.liquidity_offset_half_life);
+ let dir_liq = liq.as_directed(source, target, amt, &self.params);
log_debug!(self.logger, "Liquidity from {:?} to {:?} via {} is in the range ({}, {})",
source, target, scid, dir_liq.min_liquidity_msat(), dir_liq.max_liquidity_msat());
} else {
if let Some(liq) = self.channel_liquidities.get(&scid) {
if let Some((directed_info, source)) = chan.as_directed_to(target) {
let amt = directed_info.effective_capacity().as_msat();
- let dir_liq = liq.as_directed(source, target, amt, self.params.liquidity_offset_half_life);
+ let dir_liq = liq.as_directed(source, target, amt, &self.params);
return Some((dir_liq.min_liquidity_msat(), dir_liq.max_liquidity_msat()));
}
}
liquidity_penalty_multiplier_msat: 0,
liquidity_offset_half_life: Duration::from_secs(3600),
liquidity_penalty_amount_multiplier_msat: 0,
+ historical_liquidity_penalty_multiplier_msat: 0,
+ historical_liquidity_penalty_amount_multiplier_msat: 0,
+ historical_no_updates_half_life: Duration::from_secs(60 * 60 * 24 * 14),
manual_node_penalties: HashMap::new(),
anti_probing_penalty_msat: 0,
considered_impossible_penalty_msat: 0,
Self {
base_penalty_msat: 500,
base_penalty_amount_multiplier_msat: 8192,
- liquidity_penalty_multiplier_msat: 40_000,
+ liquidity_penalty_multiplier_msat: 30_000,
liquidity_offset_half_life: Duration::from_secs(3600),
- liquidity_penalty_amount_multiplier_msat: 256,
+ liquidity_penalty_amount_multiplier_msat: 192,
+ historical_liquidity_penalty_multiplier_msat: 10_000,
+ historical_liquidity_penalty_amount_multiplier_msat: 64,
+ historical_no_updates_half_life: Duration::from_secs(60 * 60 * 24 * 14),
manual_node_penalties: HashMap::new(),
anti_probing_penalty_msat: 250,
considered_impossible_penalty_msat: 1_0000_0000_000,
Self {
min_liquidity_offset_msat: 0,
max_liquidity_offset_msat: 0,
+ min_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
+ max_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
last_updated: T::now(),
}
}
/// Returns a view of the channel liquidity directed from `source` to `target` assuming
/// `capacity_msat`.
- fn as_directed(
- &self, source: &NodeId, target: &NodeId, capacity_msat: u64, half_life: Duration
- ) -> DirectedChannelLiquidity<&u64, T, &T> {
- let (min_liquidity_offset_msat, max_liquidity_offset_msat) = if source < target {
- (&self.min_liquidity_offset_msat, &self.max_liquidity_offset_msat)
- } else {
- (&self.max_liquidity_offset_msat, &self.min_liquidity_offset_msat)
- };
+ fn as_directed<'a>(
+ &self, source: &NodeId, target: &NodeId, capacity_msat: u64, params: &'a ProbabilisticScoringParameters
+ ) -> DirectedChannelLiquidity<'a, &u64, &HistoricalBucketRangeTracker, T, &T> {
+ let (min_liquidity_offset_msat, max_liquidity_offset_msat, min_liquidity_offset_history, max_liquidity_offset_history) =
+ if source < target {
+ (&self.min_liquidity_offset_msat, &self.max_liquidity_offset_msat,
+ &self.min_liquidity_offset_history, &self.max_liquidity_offset_history)
+ } else {
+ (&self.max_liquidity_offset_msat, &self.min_liquidity_offset_msat,
+ &self.max_liquidity_offset_history, &self.min_liquidity_offset_history)
+ };
DirectedChannelLiquidity {
min_liquidity_offset_msat,
max_liquidity_offset_msat,
+ min_liquidity_offset_history,
+ max_liquidity_offset_history,
capacity_msat,
last_updated: &self.last_updated,
now: T::now(),
- half_life,
+ params,
}
}
/// Returns a mutable view of the channel liquidity directed from `source` to `target` assuming
/// `capacity_msat`.
- fn as_directed_mut(
- &mut self, source: &NodeId, target: &NodeId, capacity_msat: u64, half_life: Duration
- ) -> DirectedChannelLiquidity<&mut u64, T, &mut T> {
- let (min_liquidity_offset_msat, max_liquidity_offset_msat) = if source < target {
- (&mut self.min_liquidity_offset_msat, &mut self.max_liquidity_offset_msat)
- } else {
- (&mut self.max_liquidity_offset_msat, &mut self.min_liquidity_offset_msat)
- };
+ fn as_directed_mut<'a>(
+ &mut self, source: &NodeId, target: &NodeId, capacity_msat: u64, params: &'a ProbabilisticScoringParameters
+ ) -> DirectedChannelLiquidity<'a, &mut u64, &mut HistoricalBucketRangeTracker, T, &mut T> {
+ let (min_liquidity_offset_msat, max_liquidity_offset_msat, min_liquidity_offset_history, max_liquidity_offset_history) =
+ if source < target {
+ (&mut self.min_liquidity_offset_msat, &mut self.max_liquidity_offset_msat,
+ &mut self.min_liquidity_offset_history, &mut self.max_liquidity_offset_history)
+ } else {
+ (&mut self.max_liquidity_offset_msat, &mut self.min_liquidity_offset_msat,
+ &mut self.max_liquidity_offset_history, &mut self.min_liquidity_offset_history)
+ };
DirectedChannelLiquidity {
min_liquidity_offset_msat,
max_liquidity_offset_msat,
+ min_liquidity_offset_history,
+ max_liquidity_offset_history,
capacity_msat,
last_updated: &mut self.last_updated,
now: T::now(),
- half_life,
+ params,
}
}
}
const AMOUNT_PENALTY_DIVISOR: u64 = 1 << 20;
const BASE_AMOUNT_PENALTY_DIVISOR: u64 = 1 << 30;
-impl<L: Deref<Target = u64>, T: Time, U: Deref<Target = T>> DirectedChannelLiquidity<L, T, U> {
+impl<L: Deref<Target = u64>, BRT: Deref<Target = HistoricalBucketRangeTracker>, T: Time, U: Deref<Target = T>> DirectedChannelLiquidity<'_, L, BRT, T, U> {
/// Returns a liquidity penalty for routing the given HTLC `amount_msat` through the channel in
/// this direction.
fn penalty_msat(&self, amount_msat: u64, params: &ProbabilisticScoringParameters) -> u64 {
let max_liquidity_msat = self.max_liquidity_msat();
let min_liquidity_msat = core::cmp::min(self.min_liquidity_msat(), max_liquidity_msat);
- if amount_msat <= min_liquidity_msat {
+
+ let mut res = if amount_msat <= min_liquidity_msat {
0
} else if amount_msat >= max_liquidity_msat {
// Equivalent to hitting the else clause below with the amount equal to the effective
// capacity and without any certainty on the liquidity upper bound, plus the
// impossibility penalty.
let negative_log10_times_2048 = NEGATIVE_LOG10_UPPER_BOUND * 2048;
- self.combined_penalty_msat(amount_msat, negative_log10_times_2048, params)
+ Self::combined_penalty_msat(amount_msat, negative_log10_times_2048,
+ params.liquidity_penalty_multiplier_msat,
+ params.liquidity_penalty_amount_multiplier_msat)
.saturating_add(params.considered_impossible_penalty_msat)
} else {
let numerator = (max_liquidity_msat - amount_msat).saturating_add(1);
} else {
let negative_log10_times_2048 =
approx::negative_log10_times_2048(numerator, denominator);
- self.combined_penalty_msat(amount_msat, negative_log10_times_2048, params)
+ Self::combined_penalty_msat(amount_msat, negative_log10_times_2048,
+ params.liquidity_penalty_multiplier_msat,
+ params.liquidity_penalty_amount_multiplier_msat)
+ }
+ };
+
+ if params.historical_liquidity_penalty_multiplier_msat != 0 ||
+ params.historical_liquidity_penalty_amount_multiplier_msat != 0 {
+ let required_decays = self.now.duration_since(*self.last_updated).as_secs()
+ .checked_div(params.historical_no_updates_half_life.as_secs())
+ .map_or(u32::max_value(), |decays| cmp::min(decays, u32::max_value() as u64) as u32);
+ let payment_amt_64th_bucket = amount_msat * 64 / self.capacity_msat;
+ debug_assert!(payment_amt_64th_bucket <= 64);
+ if payment_amt_64th_bucket > 64 { return res; }
+
+ let buckets = HistoricalMinMaxBuckets {
+ min_liquidity_offset_history: &self.min_liquidity_offset_history,
+ max_liquidity_offset_history: &self.max_liquidity_offset_history,
+ };
+ if let Some(cumulative_success_prob_times_billion) = buckets
+ .calculate_success_probability_times_billion(required_decays, payment_amt_64th_bucket as u8) {
+ let historical_negative_log10_times_2048 = approx::negative_log10_times_2048(cumulative_success_prob_times_billion + 1, 1024 * 1024 * 1024);
+ res = res.saturating_add(Self::combined_penalty_msat(amount_msat,
+ historical_negative_log10_times_2048, params.historical_liquidity_penalty_multiplier_msat,
+ params.historical_liquidity_penalty_amount_multiplier_msat));
+ } else {
+ // If we don't have any valid points (or, once decayed, we have less than a full
+ // point), redo the non-historical calculation with no liquidity bounds tracked and
+ // the historical penalty multipliers.
+ let max_capacity = self.capacity_msat.saturating_sub(amount_msat).saturating_add(1);
+ let negative_log10_times_2048 =
+ approx::negative_log10_times_2048(max_capacity, self.capacity_msat.saturating_add(1));
+ res = res.saturating_add(Self::combined_penalty_msat(amount_msat, negative_log10_times_2048,
+ params.historical_liquidity_penalty_multiplier_msat,
+ params.historical_liquidity_penalty_amount_multiplier_msat));
+ return res;
}
}
+
+ res
}
/// Computes the liquidity penalty from the penalty multipliers.
#[inline(always)]
- fn combined_penalty_msat(
- &self, amount_msat: u64, negative_log10_times_2048: u64,
- params: &ProbabilisticScoringParameters
+ fn combined_penalty_msat(amount_msat: u64, negative_log10_times_2048: u64,
+ liquidity_penalty_multiplier_msat: u64, liquidity_penalty_amount_multiplier_msat: u64,
) -> u64 {
let liquidity_penalty_msat = {
// Upper bound the liquidity penalty to ensure some channel is selected.
- let multiplier_msat = params.liquidity_penalty_multiplier_msat;
+ let multiplier_msat = liquidity_penalty_multiplier_msat;
let max_penalty_msat = multiplier_msat.saturating_mul(NEGATIVE_LOG10_UPPER_BOUND);
(negative_log10_times_2048.saturating_mul(multiplier_msat) / 2048).min(max_penalty_msat)
};
let amount_penalty_msat = negative_log10_times_2048
- .saturating_mul(params.liquidity_penalty_amount_multiplier_msat)
+ .saturating_mul(liquidity_penalty_amount_multiplier_msat)
.saturating_mul(amount_msat) / 2048 / AMOUNT_PENALTY_DIVISOR;
liquidity_penalty_msat.saturating_add(amount_penalty_msat)
fn decayed_offset_msat(&self, offset_msat: u64) -> u64 {
self.now.duration_since(*self.last_updated).as_secs()
- .checked_div(self.half_life.as_secs())
+ .checked_div(self.params.liquidity_offset_half_life.as_secs())
.and_then(|decays| offset_msat.checked_shr(decays as u32))
.unwrap_or(0)
}
}
-impl<L: DerefMut<Target = u64>, T: Time, U: DerefMut<Target = T>> DirectedChannelLiquidity<L, T, U> {
+impl<L: DerefMut<Target = u64>, BRT: DerefMut<Target = HistoricalBucketRangeTracker>, T: Time, U: DerefMut<Target = T>> DirectedChannelLiquidity<'_, L, BRT, T, U> {
/// Adjusts the channel liquidity balance bounds when failing to route `amount_msat`.
fn failed_at_channel<Log: Deref>(&mut self, amount_msat: u64, chan_descr: fmt::Arguments, logger: &Log) where Log::Target: Logger {
- if amount_msat < self.max_liquidity_msat() {
- log_debug!(logger, "Setting max liquidity of {} to {}", chan_descr, amount_msat);
+ let existing_max_msat = self.max_liquidity_msat();
+ if amount_msat < existing_max_msat {
+ log_debug!(logger, "Setting max liquidity of {} from {} to {}", chan_descr, existing_max_msat, amount_msat);
self.set_max_liquidity_msat(amount_msat);
} else {
- log_trace!(logger, "Max liquidity of {} already more than {}", chan_descr, amount_msat);
+ log_trace!(logger, "Max liquidity of {} is {} (already less than or equal to {})",
+ chan_descr, existing_max_msat, amount_msat);
}
}
/// Adjusts the channel liquidity balance bounds when failing to route `amount_msat` downstream.
fn failed_downstream<Log: Deref>(&mut self, amount_msat: u64, chan_descr: fmt::Arguments, logger: &Log) where Log::Target: Logger {
- if amount_msat > self.min_liquidity_msat() {
- log_debug!(logger, "Setting min liquidity of {} to {}", chan_descr, amount_msat);
+ let existing_min_msat = self.min_liquidity_msat();
+ if amount_msat > existing_min_msat {
+ log_debug!(logger, "Setting min liquidity of {} from {} to {}", existing_min_msat, chan_descr, amount_msat);
self.set_min_liquidity_msat(amount_msat);
} else {
- log_trace!(logger, "Min liquidity of {} already less than {}", chan_descr, amount_msat);
+ log_trace!(logger, "Min liquidity of {} is {} (already greater than or equal to {})",
+ chan_descr, existing_min_msat, amount_msat);
}
}
self.set_max_liquidity_msat(max_liquidity_msat);
}
+ fn update_history_buckets(&mut self) {
+ let half_lives = self.now.duration_since(*self.last_updated).as_secs()
+ .checked_div(self.params.historical_no_updates_half_life.as_secs())
+ .map(|v| v.try_into().unwrap_or(u32::max_value())).unwrap_or(u32::max_value());
+ self.min_liquidity_offset_history.time_decay_data(half_lives);
+ self.max_liquidity_offset_history.time_decay_data(half_lives);
+
+ debug_assert!(*self.min_liquidity_offset_msat <= self.capacity_msat);
+ self.min_liquidity_offset_history.track_datapoint(
+ // Ensure the bucket index we pass is in the range [0, 7], even if the liquidity offset
+ // is zero or the channel's capacity, though the second should generally never happen.
+ (self.min_liquidity_offset_msat.saturating_sub(1) * 8 / self.capacity_msat)
+ .try_into().unwrap_or(32)); // 32 is bogus for 8 buckets, and will be ignored
+ debug_assert!(*self.max_liquidity_offset_msat <= self.capacity_msat);
+ self.max_liquidity_offset_history.track_datapoint(
+ // Ensure the bucket index we pass is in the range [0, 7], even if the liquidity offset
+ // is zero or the channel's capacity, though the second should generally never happen.
+ (self.max_liquidity_offset_msat.saturating_sub(1) * 8 / self.capacity_msat)
+ .try_into().unwrap_or(32)); // 32 is bogus for 8 buckets, and will be ignored
+ }
+
/// Adjusts the lower bound of the channel liquidity balance in this direction.
fn set_min_liquidity_msat(&mut self, amount_msat: u64) {
*self.min_liquidity_offset_msat = amount_msat;
} else {
self.decayed_offset_msat(*self.max_liquidity_offset_msat)
};
+ self.update_history_buckets();
*self.last_updated = self.now;
}
} else {
self.decayed_offset_msat(*self.min_liquidity_offset_msat)
};
+ self.update_history_buckets();
*self.last_updated = self.now;
}
}
_ => {},
}
- let liquidity_offset_half_life = self.params.liquidity_offset_half_life;
let amount_msat = usage.amount_msat;
let capacity_msat = usage.effective_capacity.as_msat()
.saturating_sub(usage.inflight_htlc_msat);
self.channel_liquidities
.get(&short_channel_id)
.unwrap_or(&ChannelLiquidity::new())
- .as_directed(source, target, capacity_msat, liquidity_offset_half_life)
+ .as_directed(source, target, capacity_msat, &self.params)
.penalty_msat(amount_msat, &self.params)
.saturating_add(anti_probing_penalty_msat)
.saturating_add(base_penalty_msat)
fn payment_path_failed(&mut self, path: &[&RouteHop], short_channel_id: u64) {
let amount_msat = path.split_last().map(|(hop, _)| hop.fee_msat).unwrap_or(0);
- let liquidity_offset_half_life = self.params.liquidity_offset_half_life;
log_trace!(self.logger, "Scoring path through to SCID {} as having failed at {} msat", short_channel_id, amount_msat);
let network_graph = self.network_graph.read_only();
for (hop_idx, hop) in path.iter().enumerate() {
self.channel_liquidities
.entry(hop.short_channel_id)
.or_insert_with(ChannelLiquidity::new)
- .as_directed_mut(source, &target, capacity_msat, liquidity_offset_half_life)
+ .as_directed_mut(source, &target, capacity_msat, &self.params)
.failed_at_channel(amount_msat, format_args!("SCID {}, towards {:?}", hop.short_channel_id, target), &self.logger);
break;
}
self.channel_liquidities
.entry(hop.short_channel_id)
.or_insert_with(ChannelLiquidity::new)
- .as_directed_mut(source, &target, capacity_msat, liquidity_offset_half_life)
+ .as_directed_mut(source, &target, capacity_msat, &self.params)
.failed_downstream(amount_msat, format_args!("SCID {}, towards {:?}", hop.short_channel_id, target), &self.logger);
} else {
log_debug!(self.logger, "Not able to penalize channel with SCID {} as we do not have graph info for it (likely a route-hint last-hop).",
fn payment_path_successful(&mut self, path: &[&RouteHop]) {
let amount_msat = path.split_last().map(|(hop, _)| hop.fee_msat).unwrap_or(0);
- let liquidity_offset_half_life = self.params.liquidity_offset_half_life;
log_trace!(self.logger, "Scoring path through SCID {} as having succeeded at {} msat.",
path.split_last().map(|(hop, _)| hop.short_channel_id).unwrap_or(0), amount_msat);
let network_graph = self.network_graph.read_only();
self.channel_liquidities
.entry(hop.short_channel_id)
.or_insert_with(ChannelLiquidity::new)
- .as_directed_mut(source, &target, capacity_msat, liquidity_offset_half_life)
+ .as_directed_mut(source, &target, capacity_msat, &self.params)
.successful(amount_msat, format_args!("SCID {}, towards {:?}", hop.short_channel_id, target), &self.logger);
} else {
log_debug!(self.logger, "Not able to learn for channel with SCID {} as we do not have graph info for it (likely a route-hint last-hop).",
let duration_since_epoch = T::duration_since_epoch() - self.last_updated.elapsed();
write_tlv_fields!(w, {
(0, self.min_liquidity_offset_msat, required),
+ (1, Some(self.min_liquidity_offset_history), option),
(2, self.max_liquidity_offset_msat, required),
+ (3, Some(self.max_liquidity_offset_history), option),
(4, duration_since_epoch, required),
});
Ok(())
fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
let mut min_liquidity_offset_msat = 0;
let mut max_liquidity_offset_msat = 0;
+ let mut min_liquidity_offset_history = Some(HistoricalBucketRangeTracker::new());
+ let mut max_liquidity_offset_history = Some(HistoricalBucketRangeTracker::new());
let mut duration_since_epoch = Duration::from_secs(0);
read_tlv_fields!(r, {
(0, min_liquidity_offset_msat, required),
+ (1, min_liquidity_offset_history, option),
(2, max_liquidity_offset_msat, required),
+ (3, max_liquidity_offset_history, option),
(4, duration_since_epoch, required),
});
// On rust prior to 1.60 `Instant::duration_since` will panic if time goes backwards.
Ok(Self {
min_liquidity_offset_msat,
max_liquidity_offset_msat,
+ min_liquidity_offset_history: min_liquidity_offset_history.unwrap(),
+ max_liquidity_offset_history: max_liquidity_offset_history.unwrap(),
last_updated,
})
}
#[cfg(test)]
mod tests {
- use super::{ChannelLiquidity, ProbabilisticScoringParameters, ProbabilisticScorerUsingTime};
+ use super::{ChannelLiquidity, HistoricalBucketRangeTracker, ProbabilisticScoringParameters, ProbabilisticScorerUsingTime};
use util::time::Time;
use util::time::tests::SinceEpoch;
let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger)
.with_channel(42,
ChannelLiquidity {
- min_liquidity_offset_msat: 700, max_liquidity_offset_msat: 100, last_updated
+ min_liquidity_offset_msat: 700, max_liquidity_offset_msat: 100, last_updated,
+ min_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
+ max_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
})
.with_channel(43,
ChannelLiquidity {
- min_liquidity_offset_msat: 700, max_liquidity_offset_msat: 100, last_updated
+ min_liquidity_offset_msat: 700, max_liquidity_offset_msat: 100, last_updated,
+ min_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
+ max_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
});
let source = source_node_id();
let target = target_node_id();
// Update minimum liquidity.
- let liquidity_offset_half_life = scorer.params.liquidity_offset_half_life;
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&source, &target, 1_000, liquidity_offset_half_life);
+ .as_directed(&source, &target, 1_000, &scorer.params);
assert_eq!(liquidity.min_liquidity_msat(), 100);
assert_eq!(liquidity.max_liquidity_msat(), 300);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&target, &source, 1_000, liquidity_offset_half_life);
+ .as_directed(&target, &source, 1_000, &scorer.params);
assert_eq!(liquidity.min_liquidity_msat(), 700);
assert_eq!(liquidity.max_liquidity_msat(), 900);
scorer.channel_liquidities.get_mut(&42).unwrap()
- .as_directed_mut(&source, &target, 1_000, liquidity_offset_half_life)
+ .as_directed_mut(&source, &target, 1_000, &scorer.params)
.set_min_liquidity_msat(200);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&source, &target, 1_000, liquidity_offset_half_life);
+ .as_directed(&source, &target, 1_000, &scorer.params);
assert_eq!(liquidity.min_liquidity_msat(), 200);
assert_eq!(liquidity.max_liquidity_msat(), 300);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&target, &source, 1_000, liquidity_offset_half_life);
+ .as_directed(&target, &source, 1_000, &scorer.params);
assert_eq!(liquidity.min_liquidity_msat(), 700);
assert_eq!(liquidity.max_liquidity_msat(), 800);
// Update maximum liquidity.
let liquidity = scorer.channel_liquidities.get(&43).unwrap()
- .as_directed(&target, &recipient, 1_000, liquidity_offset_half_life);
+ .as_directed(&target, &recipient, 1_000, &scorer.params);
assert_eq!(liquidity.min_liquidity_msat(), 700);
assert_eq!(liquidity.max_liquidity_msat(), 900);
let liquidity = scorer.channel_liquidities.get(&43).unwrap()
- .as_directed(&recipient, &target, 1_000, liquidity_offset_half_life);
+ .as_directed(&recipient, &target, 1_000, &scorer.params);
assert_eq!(liquidity.min_liquidity_msat(), 100);
assert_eq!(liquidity.max_liquidity_msat(), 300);
scorer.channel_liquidities.get_mut(&43).unwrap()
- .as_directed_mut(&target, &recipient, 1_000, liquidity_offset_half_life)
+ .as_directed_mut(&target, &recipient, 1_000, &scorer.params)
.set_max_liquidity_msat(200);
let liquidity = scorer.channel_liquidities.get(&43).unwrap()
- .as_directed(&target, &recipient, 1_000, liquidity_offset_half_life);
+ .as_directed(&target, &recipient, 1_000, &scorer.params);
assert_eq!(liquidity.min_liquidity_msat(), 0);
assert_eq!(liquidity.max_liquidity_msat(), 200);
let liquidity = scorer.channel_liquidities.get(&43).unwrap()
- .as_directed(&recipient, &target, 1_000, liquidity_offset_half_life);
+ .as_directed(&recipient, &target, 1_000, &scorer.params);
assert_eq!(liquidity.min_liquidity_msat(), 800);
assert_eq!(liquidity.max_liquidity_msat(), 1000);
}
let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger)
.with_channel(42,
ChannelLiquidity {
- min_liquidity_offset_msat: 200, max_liquidity_offset_msat: 400, last_updated
+ min_liquidity_offset_msat: 200, max_liquidity_offset_msat: 400, last_updated,
+ min_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
+ max_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
});
let source = source_node_id();
let target = target_node_id();
assert!(source > target);
// Check initial bounds.
- let liquidity_offset_half_life = scorer.params.liquidity_offset_half_life;
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&source, &target, 1_000, liquidity_offset_half_life);
+ .as_directed(&source, &target, 1_000, &scorer.params);
assert_eq!(liquidity.min_liquidity_msat(), 400);
assert_eq!(liquidity.max_liquidity_msat(), 800);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&target, &source, 1_000, liquidity_offset_half_life);
+ .as_directed(&target, &source, 1_000, &scorer.params);
assert_eq!(liquidity.min_liquidity_msat(), 200);
assert_eq!(liquidity.max_liquidity_msat(), 600);
// Reset from source to target.
scorer.channel_liquidities.get_mut(&42).unwrap()
- .as_directed_mut(&source, &target, 1_000, liquidity_offset_half_life)
+ .as_directed_mut(&source, &target, 1_000, &scorer.params)
.set_min_liquidity_msat(900);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&source, &target, 1_000, liquidity_offset_half_life);
+ .as_directed(&source, &target, 1_000, &scorer.params);
assert_eq!(liquidity.min_liquidity_msat(), 900);
assert_eq!(liquidity.max_liquidity_msat(), 1_000);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&target, &source, 1_000, liquidity_offset_half_life);
+ .as_directed(&target, &source, 1_000, &scorer.params);
assert_eq!(liquidity.min_liquidity_msat(), 0);
assert_eq!(liquidity.max_liquidity_msat(), 100);
// Reset from target to source.
scorer.channel_liquidities.get_mut(&42).unwrap()
- .as_directed_mut(&target, &source, 1_000, liquidity_offset_half_life)
+ .as_directed_mut(&target, &source, 1_000, &scorer.params)
.set_min_liquidity_msat(400);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&source, &target, 1_000, liquidity_offset_half_life);
+ .as_directed(&source, &target, 1_000, &scorer.params);
assert_eq!(liquidity.min_liquidity_msat(), 0);
assert_eq!(liquidity.max_liquidity_msat(), 600);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&target, &source, 1_000, liquidity_offset_half_life);
+ .as_directed(&target, &source, 1_000, &scorer.params);
assert_eq!(liquidity.min_liquidity_msat(), 400);
assert_eq!(liquidity.max_liquidity_msat(), 1_000);
}
let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger)
.with_channel(42,
ChannelLiquidity {
- min_liquidity_offset_msat: 200, max_liquidity_offset_msat: 400, last_updated
+ min_liquidity_offset_msat: 200, max_liquidity_offset_msat: 400, last_updated,
+ min_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
+ max_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
});
let source = source_node_id();
let target = target_node_id();
assert!(source > target);
// Check initial bounds.
- let liquidity_offset_half_life = scorer.params.liquidity_offset_half_life;
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&source, &target, 1_000, liquidity_offset_half_life);
+ .as_directed(&source, &target, 1_000, &scorer.params);
assert_eq!(liquidity.min_liquidity_msat(), 400);
assert_eq!(liquidity.max_liquidity_msat(), 800);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&target, &source, 1_000, liquidity_offset_half_life);
+ .as_directed(&target, &source, 1_000, &scorer.params);
assert_eq!(liquidity.min_liquidity_msat(), 200);
assert_eq!(liquidity.max_liquidity_msat(), 600);
// Reset from source to target.
scorer.channel_liquidities.get_mut(&42).unwrap()
- .as_directed_mut(&source, &target, 1_000, liquidity_offset_half_life)
+ .as_directed_mut(&source, &target, 1_000, &scorer.params)
.set_max_liquidity_msat(300);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&source, &target, 1_000, liquidity_offset_half_life);
+ .as_directed(&source, &target, 1_000, &scorer.params);
assert_eq!(liquidity.min_liquidity_msat(), 0);
assert_eq!(liquidity.max_liquidity_msat(), 300);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&target, &source, 1_000, liquidity_offset_half_life);
+ .as_directed(&target, &source, 1_000, &scorer.params);
assert_eq!(liquidity.min_liquidity_msat(), 700);
assert_eq!(liquidity.max_liquidity_msat(), 1_000);
// Reset from target to source.
scorer.channel_liquidities.get_mut(&42).unwrap()
- .as_directed_mut(&target, &source, 1_000, liquidity_offset_half_life)
+ .as_directed_mut(&target, &source, 1_000, &scorer.params)
.set_max_liquidity_msat(600);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&source, &target, 1_000, liquidity_offset_half_life);
+ .as_directed(&source, &target, 1_000, &scorer.params);
assert_eq!(liquidity.min_liquidity_msat(), 400);
assert_eq!(liquidity.max_liquidity_msat(), 1_000);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&target, &source, 1_000, liquidity_offset_half_life);
+ .as_directed(&target, &source, 1_000, &scorer.params);
assert_eq!(liquidity.min_liquidity_msat(), 0);
assert_eq!(liquidity.max_liquidity_msat(), 600);
}
let scorer = ProbabilisticScorer::new(params, &network_graph, &logger)
.with_channel(42,
ChannelLiquidity {
- min_liquidity_offset_msat: 40, max_liquidity_offset_msat: 40, last_updated
+ min_liquidity_offset_msat: 40, max_liquidity_offset_msat: 40, last_updated,
+ min_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
+ max_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
});
let source = source_node_id();
let target = target_node_id();
let usage = ChannelUsage {
effective_capacity: EffectiveCapacity::Total { capacity_msat: 3_950_000_000, htlc_maximum_msat: Some(1_000) }, ..usage
};
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1985);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1983);
let usage = ChannelUsage {
effective_capacity: EffectiveCapacity::Total { capacity_msat: 4_950_000_000, htlc_maximum_msat: Some(1_000) }, ..usage
};
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1639);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1637);
let usage = ChannelUsage {
effective_capacity: EffectiveCapacity::Total { capacity_msat: 5_950_000_000, htlc_maximum_msat: Some(1_000) }, ..usage
};
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1607);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1606);
let usage = ChannelUsage {
effective_capacity: EffectiveCapacity::Total { capacity_msat: 6_950_000_000, htlc_maximum_msat: Some(1_000) }, ..usage
};
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1262);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1331);
let usage = ChannelUsage {
effective_capacity: EffectiveCapacity::Total { capacity_msat: 7_450_000_000, htlc_maximum_msat: Some(1_000) }, ..usage
};
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1262);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1387);
let usage = ChannelUsage {
effective_capacity: EffectiveCapacity::Total { capacity_msat: 7_950_000_000, htlc_maximum_msat: Some(1_000) }, ..usage
};
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1262);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1379);
let usage = ChannelUsage {
effective_capacity: EffectiveCapacity::Total { capacity_msat: 8_950_000_000, htlc_maximum_msat: Some(1_000) }, ..usage
};
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1262);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1363);
let usage = ChannelUsage {
effective_capacity: EffectiveCapacity::Total { capacity_msat: 9_950_000_000, htlc_maximum_msat: Some(1_000) }, ..usage
};
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1262);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1355);
}
#[test]
let params = ProbabilisticScoringParameters {
base_penalty_msat: 500, liquidity_penalty_multiplier_msat: 1_000,
- anti_probing_penalty_msat: 0, ..Default::default()
+ anti_probing_penalty_msat: 0, ..ProbabilisticScoringParameters::zero_penalty()
};
let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 558);
let params = ProbabilisticScoringParameters {
base_penalty_msat: 500, liquidity_penalty_multiplier_msat: 1_000,
base_penalty_amount_multiplier_msat: (1 << 30),
- anti_probing_penalty_msat: 0, ..Default::default()
+ anti_probing_penalty_msat: 0, ..ProbabilisticScoringParameters::zero_penalty()
};
let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value());
}
+ #[test]
+ fn remembers_historical_failures() {
+ let logger = TestLogger::new();
+ let network_graph = network_graph(&logger);
+ let params = ProbabilisticScoringParameters {
+ historical_liquidity_penalty_multiplier_msat: 1024,
+ historical_liquidity_penalty_amount_multiplier_msat: 1024,
+ historical_no_updates_half_life: Duration::from_secs(10),
+ ..ProbabilisticScoringParameters::zero_penalty()
+ };
+ let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
+ let source = source_node_id();
+ let target = target_node_id();
+
+ let usage = ChannelUsage {
+ amount_msat: 100,
+ inflight_htlc_msat: 0,
+ effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: Some(1_024) },
+ };
+ // With no historical data the normal liquidity penalty calculation is used.
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 47);
+
+ scorer.payment_path_failed(&payment_path_for_amount(1).iter().collect::<Vec<_>>(), 42);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 2048);
+
+ // Even after we tell the scorer we definitely have enough available liquidity, it will
+ // still remember that there was some failure in the past, and assign a non-0 penalty.
+ scorer.payment_path_failed(&payment_path_for_amount(1000).iter().collect::<Vec<_>>(), 43);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 198);
+
+ // Advance the time forward 16 half-lives (which the docs claim will ensure all data is
+ // gone), and check that we're back to where we started.
+ SinceEpoch::advance(Duration::from_secs(10 * 16));
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 47);
+ }
+
#[test]
fn adds_anti_probing_penalty() {
let logger = TestLogger::new();