//! Utilities for scoring payment channels.
//!
//! [`ProbabilisticScorer`] may be given to [`find_route`] to score payment channels during path
-//! finding when a custom [`Score`] implementation is not needed.
+//! finding when a custom [`ScoreLookUp`] implementation is not needed.
//!
//! # Example
//!
use crate::prelude::*;
use core::{cmp, fmt};
-use core::cell::{RefCell, RefMut};
+use core::cell::{RefCell, RefMut, Ref};
use core::convert::TryInto;
use core::ops::{Deref, DerefMut};
use core::time::Duration;
use crate::io::{self, Read};
-use crate::sync::{Mutex, MutexGuard};
+use crate::sync::{Mutex, MutexGuard, RwLock, RwLockReadGuard, RwLockWriteGuard};
/// We define Score ever-so-slightly differently based on whether we are being built for C bindings
/// or not. For users, `LockableScore` must somehow be writeable to disk. For Rust users, this is
macro_rules! define_score { ($($supertrait: path)*) => {
/// An interface used to score payment channels for path finding.
///
-/// Scoring is in terms of fees willing to be paid in order to avoid routing through a channel.
-pub trait Score $(: $supertrait)* {
+/// `ScoreLookUp` is used to determine the penalty for a given channel.
+///
+/// Scoring is in terms of fees willing to be paid in order to avoid routing through a channel.
+pub trait ScoreLookUp $(: $supertrait)* {
/// A configurable type which should contain various passed-in parameters for configuring the scorer,
/// on a per-routefinding-call basis through to the scorer methods,
/// which are used to determine the parameters for the suitability of channels for use.
fn channel_penalty_msat(
&self, short_channel_id: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage, score_params: &Self::ScoreParams
) -> u64;
+}
+/// `ScoreUpdate` is used to update the scorer's internal state after a payment attempt.
+pub trait ScoreUpdate $(: $supertrait)* {
/// Handles updating channel penalties after failing to route through a channel.
fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64);
fn probe_successful(&mut self, path: &Path);
}
-impl<S: Score, T: DerefMut<Target=S> $(+ $supertrait)*> Score for T {
- type ScoreParams = S::ScoreParams;
+impl<SP: Sized, S: ScoreLookUp<ScoreParams = SP>, T: Deref<Target=S> $(+ $supertrait)*> ScoreLookUp for T {
+ type ScoreParams = SP;
fn channel_penalty_msat(
&self, short_channel_id: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage, score_params: &Self::ScoreParams
) -> u64 {
self.deref().channel_penalty_msat(short_channel_id, source, target, usage, score_params)
}
+}
+impl<S: ScoreUpdate, T: DerefMut<Target=S> $(+ $supertrait)*> ScoreUpdate for T {
fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64) {
self.deref_mut().payment_path_failed(path, short_channel_id)
}
#[cfg(c_bindings)]
define_score!(Writeable);
+
#[cfg(not(c_bindings))]
define_score!();
/// A scorer that is accessed under a lock.
///
-/// Needed so that calls to [`Score::channel_penalty_msat`] in [`find_route`] can be made while
-/// having shared ownership of a scorer but without requiring internal locking in [`Score`]
+/// Needed so that calls to [`ScoreLookUp::channel_penalty_msat`] in [`find_route`] can be made while
+/// having shared ownership of a scorer but without requiring internal locking in [`ScoreUpdate`]
/// implementations. Internal locking would be detrimental to route finding performance and could
-/// result in [`Score::channel_penalty_msat`] returning a different value for the same channel.
+/// result in [`ScoreLookUp::channel_penalty_msat`] returning a different value for the same channel.
///
/// [`find_route`]: crate::routing::router::find_route
pub trait LockableScore<'a> {
- /// The [`Score`] type.
- type Score: 'a + Score;
+ /// The [`ScoreUpdate`] type.
+ type ScoreUpdate: 'a + ScoreUpdate;
+ /// The [`ScoreLookUp`] type.
+ type ScoreLookUp: 'a + ScoreLookUp;
+
+ /// The write locked [`ScoreUpdate`] type.
+ type WriteLocked: DerefMut<Target = Self::ScoreUpdate> + Sized;
- /// The locked [`Score`] type.
- type Locked: DerefMut<Target = Self::Score> + Sized;
+ /// The read locked [`ScoreLookUp`] type.
+ type ReadLocked: Deref<Target = Self::ScoreLookUp> + Sized;
- /// Returns the locked scorer.
- fn lock(&'a self) -> Self::Locked;
+ /// Returns read locked scorer.
+ fn read_lock(&'a self) -> Self::ReadLocked;
+
+ /// Returns write locked scorer.
+ fn write_lock(&'a self) -> Self::WriteLocked;
}
/// Refers to a scorer that is accessible under lock and also writeable to disk
#[cfg(not(c_bindings))]
impl<'a, T> WriteableScore<'a> for T where T: LockableScore<'a> + Writeable {}
#[cfg(not(c_bindings))]
-impl<'a, T: 'a + Score> LockableScore<'a> for Mutex<T> {
- type Score = T;
- type Locked = MutexGuard<'a, T>;
+impl<'a, T: 'a + ScoreLookUp + ScoreUpdate> LockableScore<'a> for Mutex<T> {
+ type ScoreUpdate = T;
+ type ScoreLookUp = T;
+
+ type WriteLocked = MutexGuard<'a, Self::ScoreUpdate>;
+ type ReadLocked = MutexGuard<'a, Self::ScoreLookUp>;
+
+ fn read_lock(&'a self) -> Self::ReadLocked {
+ Mutex::lock(self).unwrap()
+ }
- fn lock(&'a self) -> Self::Locked {
+ fn write_lock(&'a self) -> Self::WriteLocked {
Mutex::lock(self).unwrap()
}
}
#[cfg(not(c_bindings))]
-impl<'a, T: 'a + Score> LockableScore<'a> for RefCell<T> {
- type Score = T;
- type Locked = RefMut<'a, T>;
+impl<'a, T: 'a + ScoreUpdate + ScoreLookUp> LockableScore<'a> for RefCell<T> {
+ type ScoreUpdate = T;
+ type ScoreLookUp = T;
- fn lock(&'a self) -> Self::Locked {
+ type WriteLocked = RefMut<'a, Self::ScoreUpdate>;
+ type ReadLocked = Ref<'a, Self::ScoreLookUp>;
+
+ fn write_lock(&'a self) -> Self::WriteLocked {
self.borrow_mut()
}
+
+ fn read_lock(&'a self) -> Self::ReadLocked {
+ self.borrow()
+ }
+}
+
+#[cfg(not(c_bindings))]
+impl<'a, SP:Sized, T: 'a + ScoreUpdate + ScoreLookUp<ScoreParams = SP>> LockableScore<'a> for RwLock<T> {
+ type ScoreUpdate = T;
+ type ScoreLookUp = T;
+
+ type WriteLocked = RwLockWriteGuard<'a, Self::ScoreLookUp>;
+ type ReadLocked = RwLockReadGuard<'a, Self::ScoreUpdate>;
+
+ fn read_lock(&'a self) -> Self::ReadLocked {
+ RwLock::read(self).unwrap()
+ }
+
+ fn write_lock(&'a self) -> Self::WriteLocked {
+ RwLock::write(self).unwrap()
+ }
}
#[cfg(c_bindings)]
/// A concrete implementation of [`LockableScore`] which supports multi-threading.
-pub struct MultiThreadedLockableScore<T: Score> {
- score: Mutex<T>,
+pub struct MultiThreadedLockableScore<T: ScoreLookUp + ScoreUpdate> {
+ score: RwLock<T>,
}
#[cfg(c_bindings)]
-impl<'a, T: 'a + Score> LockableScore<'a> for MultiThreadedLockableScore<T> {
- type Score = T;
- type Locked = MultiThreadedScoreLock<'a, T>;
+impl<'a, SP:Sized, T: 'a + ScoreLookUp<ScoreParams = SP> + ScoreUpdate> LockableScore<'a> for MultiThreadedLockableScore<T> {
+ type ScoreUpdate = T;
+ type ScoreLookUp = T;
+ type WriteLocked = MultiThreadedScoreLockWrite<'a, Self::ScoreUpdate>;
+ type ReadLocked = MultiThreadedScoreLockRead<'a, Self::ScoreLookUp>;
+
+ fn read_lock(&'a self) -> Self::ReadLocked {
+ MultiThreadedScoreLockRead(self.score.read().unwrap())
+ }
- fn lock(&'a self) -> Self::Locked {
- MultiThreadedScoreLock(Mutex::lock(&self.score).unwrap())
+ fn write_lock(&'a self) -> Self::WriteLocked {
+ MultiThreadedScoreLockWrite(self.score.write().unwrap())
}
}
#[cfg(c_bindings)]
-impl<T: Score> Writeable for MultiThreadedLockableScore<T> {
+impl<T: ScoreUpdate + ScoreLookUp> Writeable for MultiThreadedLockableScore<T> {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
- self.lock().write(writer)
+ self.score.read().unwrap().write(writer)
}
}
#[cfg(c_bindings)]
-impl<'a, T: 'a + Score> WriteableScore<'a> for MultiThreadedLockableScore<T> {}
+impl<'a, T: 'a + ScoreUpdate + ScoreLookUp> WriteableScore<'a> for MultiThreadedLockableScore<T> {}
#[cfg(c_bindings)]
-impl<T: Score> MultiThreadedLockableScore<T> {
+impl<T: ScoreLookUp + ScoreUpdate> MultiThreadedLockableScore<T> {
/// Creates a new [`MultiThreadedLockableScore`] given an underlying [`Score`].
pub fn new(score: T) -> Self {
- MultiThreadedLockableScore { score: Mutex::new(score) }
+ MultiThreadedLockableScore { score: RwLock::new(score) }
}
}
#[cfg(c_bindings)]
/// A locked `MultiThreadedLockableScore`.
-pub struct MultiThreadedScoreLock<'a, T: Score>(MutexGuard<'a, T>);
+pub struct MultiThreadedScoreLockRead<'a, T: ScoreLookUp>(RwLockReadGuard<'a, T>);
#[cfg(c_bindings)]
-impl<'a, T: 'a + Score> Writeable for MultiThreadedScoreLock<'a, T> {
- fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
- self.0.write(writer)
+/// A locked `MultiThreadedLockableScore`.
+pub struct MultiThreadedScoreLockWrite<'a, T: ScoreUpdate>(RwLockWriteGuard<'a, T>);
+
+#[cfg(c_bindings)]
+impl<'a, T: 'a + ScoreLookUp> Deref for MultiThreadedScoreLockRead<'a, T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ self.0.deref()
}
}
#[cfg(c_bindings)]
-impl<'a, T: 'a + Score> DerefMut for MultiThreadedScoreLock<'a, T> {
- fn deref_mut(&mut self) -> &mut Self::Target {
- self.0.deref_mut()
- }
+impl<'a, T: 'a + ScoreUpdate> Writeable for MultiThreadedScoreLockWrite<'a, T> {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
+ self.0.write(writer)
+ }
}
#[cfg(c_bindings)]
-impl<'a, T: 'a + Score> Deref for MultiThreadedScoreLock<'a, T> {
+impl<'a, T: 'a + ScoreUpdate> Deref for MultiThreadedScoreLockWrite<'a, T> {
type Target = T;
- fn deref(&self) -> &Self::Target {
- self.0.deref()
- }
+ fn deref(&self) -> &Self::Target {
+ self.0.deref()
+ }
}
+#[cfg(c_bindings)]
+impl<'a, T: 'a + ScoreUpdate> DerefMut for MultiThreadedScoreLockWrite<'a, T> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ self.0.deref_mut()
+ }
+}
-/// Proposed use of a channel passed as a parameter to [`Score::channel_penalty_msat`].
+/// Proposed use of a channel passed as a parameter to [`ScoreLookUp::channel_penalty_msat`].
#[derive(Clone, Copy, Debug, PartialEq)]
pub struct ChannelUsage {
/// The amount to send through the channel, denominated in millisatoshis.
}
#[derive(Clone)]
-/// [`Score`] implementation that uses a fixed penalty.
+/// [`ScoreLookUp`] implementation that uses a fixed penalty.
pub struct FixedPenaltyScorer {
penalty_msat: u64,
}
}
}
-impl Score for FixedPenaltyScorer {
+impl ScoreLookUp for FixedPenaltyScorer {
type ScoreParams = ();
fn channel_penalty_msat(&self, _: u64, _: &NodeId, _: &NodeId, _: ChannelUsage, _score_params: &Self::ScoreParams) -> u64 {
self.penalty_msat
}
+}
+impl ScoreUpdate for FixedPenaltyScorer {
fn payment_path_failed(&mut self, _path: &Path, _short_channel_id: u64) {}
fn payment_path_successful(&mut self, _path: &Path) {}
#[cfg(feature = "no-std")]
type ConfiguredTime = Eternity;
-/// [`Score`] implementation using channel success probability distributions.
+/// [`ScoreLookUp`] implementation using channel success probability distributions.
///
/// Channels are tracked with upper and lower liquidity bounds - when an HTLC fails at a channel,
/// we learn that the upper-bound on the available liquidity is lower than the amount of the HTLC.
/// [`historical_liquidity_penalty_amount_multiplier_msat`]: ProbabilisticScoringFeeParameters::historical_liquidity_penalty_amount_multiplier_msat
pub type ProbabilisticScorer<G, L> = ProbabilisticScorerUsingTime::<G, L, ConfiguredTime>;
-/// Probabilistic [`Score`] implementation.
+/// Probabilistic [`ScoreLookUp`] implementation.
///
/// This is not exported to bindings users generally all users should use the [`ProbabilisticScorer`] type alias.
pub struct ProbabilisticScorerUsingTime<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time>
let amt = directed_info.effective_capacity().as_msat();
let dir_liq = liq.as_directed(source, target, 0, amt, self.decay_params);
- let (min_buckets, max_buckets, _) = dir_liq.liquidity_history
+ let (min_buckets, max_buckets) = dir_liq.liquidity_history
.get_decayed_buckets(now, *dir_liq.last_updated,
- self.decay_params.historical_no_updates_half_life);
+ self.decay_params.historical_no_updates_half_life)
+ .unwrap_or(([0; 32], [0; 32]));
log_debug!(self.logger, core::concat!(
"Liquidity from {} to {} via {} is in the range ({}, {}).\n",
/// in the top and bottom bucket, and roughly with similar (recent) frequency.
///
/// Because the datapoints are decayed slowly over time, values will eventually return to
- /// `Some(([0; 32], [0; 32]))`.
+ /// `Some(([1; 32], [1; 32]))` and then to `None` once no datapoints remain.
///
/// In order to fetch a single success probability from the buckets provided here, as used in
/// the scoring model, see [`Self::historical_estimated_payment_success_probability`].
let amt = directed_info.effective_capacity().as_msat();
let dir_liq = liq.as_directed(source, target, 0, amt, self.decay_params);
- let (min_buckets, mut max_buckets, _) = dir_liq.liquidity_history
- .get_decayed_buckets(dir_liq.now, *dir_liq.last_updated,
- self.decay_params.historical_no_updates_half_life);
+ let (min_buckets, mut max_buckets) =
+ dir_liq.liquidity_history.get_decayed_buckets(
+ dir_liq.now, *dir_liq.last_updated,
+ self.decay_params.historical_no_updates_half_life
+ )?;
+
// Note that the liquidity buckets are an offset from the edge, so we inverse
// the max order to get the probabilities from zero.
max_buckets.reverse();
log_trace!(logger, "Max liquidity of {} is {} (already less than or equal to {})",
chan_descr, existing_max_msat, amount_msat);
}
- self.update_history_buckets();
+ self.update_history_buckets(0);
}
/// Adjusts the channel liquidity balance bounds when failing to route `amount_msat` downstream.
log_trace!(logger, "Min liquidity of {} is {} (already greater than or equal to {})",
chan_descr, existing_min_msat, amount_msat);
}
- self.update_history_buckets();
+ self.update_history_buckets(0);
}
/// Adjusts the channel liquidity balance bounds when successfully routing `amount_msat`.
let max_liquidity_msat = self.max_liquidity_msat().checked_sub(amount_msat).unwrap_or(0);
log_debug!(logger, "Subtracting {} from max liquidity of {} (setting it to {})", amount_msat, chan_descr, max_liquidity_msat);
self.set_max_liquidity_msat(max_liquidity_msat);
- self.update_history_buckets();
+ self.update_history_buckets(amount_msat);
}
- fn update_history_buckets(&mut self) {
+ /// Updates the history buckets for this channel. Because the history buckets track what we now
+ /// know about the channel's state *prior to our payment* (i.e. what we assume is "steady
+ /// state"), we allow the caller to set an offset applied to our liquidity bounds which
+ /// represents the amount of the successful payment we just made.
+ fn update_history_buckets(&mut self, bucket_offset_msat: u64) {
let half_lives = self.now.duration_since(*self.last_updated).as_secs()
.checked_div(self.decay_params.historical_no_updates_half_life.as_secs())
.map(|v| v.try_into().unwrap_or(u32::max_value())).unwrap_or(u32::max_value());
let min_liquidity_offset_msat = self.decayed_offset_msat(*self.min_liquidity_offset_msat);
self.liquidity_history.min_liquidity_offset_history.track_datapoint(
- min_liquidity_offset_msat, self.capacity_msat
+ min_liquidity_offset_msat + bucket_offset_msat, self.capacity_msat
);
let max_liquidity_offset_msat = self.decayed_offset_msat(*self.max_liquidity_offset_msat);
self.liquidity_history.max_liquidity_offset_history.track_datapoint(
- max_liquidity_offset_msat, self.capacity_msat
+ max_liquidity_offset_msat.saturating_sub(bucket_offset_msat), self.capacity_msat
);
}
}
}
-impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> Score for ProbabilisticScorerUsingTime<G, L, T> where L::Target: Logger {
+impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> ScoreLookUp for ProbabilisticScorerUsingTime<G, L, T> where L::Target: Logger {
type ScoreParams = ProbabilisticScoringFeeParameters;
fn channel_penalty_msat(
&self, short_channel_id: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage, score_params: &ProbabilisticScoringFeeParameters
.saturating_add(anti_probing_penalty_msat)
.saturating_add(base_penalty_msat)
}
+}
+impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> ScoreUpdate for ProbabilisticScorerUsingTime<G, L, T> where L::Target: Logger {
fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64) {
let amount_msat = path.final_value_msat();
log_trace!(self.logger, "Scoring path through to SCID {} as having failed at {} msat", short_channel_id, amount_msat);
buckets: [u16; 32],
}
+ /// Buckets are stored in fixed point numbers with a 5 bit fractional part. Thus, the value
+ /// "one" is 32, or this constant.
+ pub const BUCKET_FIXED_POINT_ONE: u16 = 32;
+
impl HistoricalBucketRangeTracker {
pub(super) fn new() -> Self { Self { buckets: [0; 32] } }
pub(super) fn track_datapoint(&mut self, liquidity_offset_msat: u64, capacity_msat: u64) {
*e = ((*e as u32) * 2047 / 2048) as u16;
}
let bucket = pos_to_bucket(pos);
- self.buckets[bucket] = self.buckets[bucket].saturating_add(32);
+ self.buckets[bucket] = self.buckets[bucket].saturating_add(BUCKET_FIXED_POINT_ONE);
}
}
/// Decay all buckets by the given number of half-lives. Used to more aggressively remove old
}
impl<D: Deref<Target = HistoricalBucketRangeTracker>> HistoricalMinMaxBuckets<D> {
- #[inline]
pub(super) fn get_decayed_buckets<T: Time>(&self, now: T, last_updated: T, half_life: Duration)
- -> ([u16; 32], [u16; 32], u32) {
- let required_decays = now.duration_since(last_updated).as_secs()
- .checked_div(half_life.as_secs())
- .map_or(u32::max_value(), |decays| cmp::min(decays, u32::max_value() as u64) as u32);
+ -> Option<([u16; 32], [u16; 32])> {
+ let (_, required_decays) = self.get_total_valid_points(now, last_updated, half_life)?;
+
let mut min_buckets = *self.min_liquidity_offset_history;
min_buckets.time_decay_data(required_decays);
let mut max_buckets = *self.max_liquidity_offset_history;
max_buckets.time_decay_data(required_decays);
- (min_buckets.buckets, max_buckets.buckets, required_decays)
+ Some((min_buckets.buckets, max_buckets.buckets))
+ }
+ #[inline]
+ pub(super) fn get_total_valid_points<T: Time>(&self, now: T, last_updated: T, half_life: Duration)
+ -> Option<(u64, u32)> {
+ let required_decays = now.duration_since(last_updated).as_secs()
+ .checked_div(half_life.as_secs())
+ .map_or(u32::max_value(), |decays| cmp::min(decays, u32::max_value() as u64) as u32);
+
+ let mut total_valid_points_tracked = 0;
+ for (min_idx, min_bucket) in self.min_liquidity_offset_history.buckets.iter().enumerate() {
+ for max_bucket in self.max_liquidity_offset_history.buckets.iter().take(32 - min_idx) {
+ total_valid_points_tracked += (*min_bucket as u64) * (*max_bucket as u64);
+ }
+ }
+
+ // If the total valid points is smaller than 1.0 (i.e. 32 in our fixed-point scheme),
+ // treat it as if we were fully decayed.
+ const FULLY_DECAYED: u16 = BUCKET_FIXED_POINT_ONE * BUCKET_FIXED_POINT_ONE;
+ if total_valid_points_tracked.checked_shr(required_decays).unwrap_or(0) < FULLY_DECAYED.into() {
+ return None;
+ }
+
+ Some((total_valid_points_tracked, required_decays))
}
#[inline]
// state). For each pair, we calculate the probability as if the bucket's corresponding
// min- and max- liquidity bounds were our current liquidity bounds and then multiply
// that probability by the weight of the selected buckets.
- let mut total_valid_points_tracked = 0;
-
let payment_pos = amount_to_pos(amount_msat, capacity_msat);
if payment_pos >= POSITION_TICKS { return None; }
// Check if all our buckets are zero, once decayed and treat it as if we had no data. We
// don't actually use the decayed buckets, though, as that would lose precision.
- let (decayed_min_buckets, decayed_max_buckets, required_decays) =
- self.get_decayed_buckets(now, last_updated, half_life);
- if decayed_min_buckets.iter().all(|v| *v == 0) || decayed_max_buckets.iter().all(|v| *v == 0) {
- return None;
- }
+ let (total_valid_points_tracked, _)
+ = self.get_total_valid_points(now, last_updated, half_life)?;
- for (min_idx, min_bucket) in self.min_liquidity_offset_history.buckets.iter().enumerate() {
- for max_bucket in self.max_liquidity_offset_history.buckets.iter().take(32 - min_idx) {
- total_valid_points_tracked += (*min_bucket as u64) * (*max_bucket as u64);
+ let mut cumulative_success_prob_times_billion = 0;
+ // Special-case the 0th min bucket - it generally means we failed a payment, so only
+ // consider the highest (i.e. largest-offset-from-max-capacity) max bucket for all
+ // points against the 0th min bucket. This avoids the case where we fail to route
+ // increasingly lower values over a channel, but treat each failure as a separate
+ // datapoint, many of which may have relatively high maximum-available-liquidity
+ // values, which will result in us thinking we have some nontrivial probability of
+ // routing up to that amount.
+ if self.min_liquidity_offset_history.buckets[0] != 0 {
+ let mut highest_max_bucket_with_points = 0; // The highest max-bucket with any data
+ let mut total_max_points = 0; // Total points in max-buckets to consider
+ for (max_idx, max_bucket) in self.max_liquidity_offset_history.buckets.iter().enumerate() {
+ if *max_bucket >= BUCKET_FIXED_POINT_ONE {
+ highest_max_bucket_with_points = cmp::max(highest_max_bucket_with_points, max_idx);
+ }
+ total_max_points += *max_bucket as u64;
+ }
+ let max_bucket_end_pos = BUCKET_START_POS[32 - highest_max_bucket_with_points] - 1;
+ if payment_pos < max_bucket_end_pos {
+ let bucket_prob_times_billion =
+ (self.min_liquidity_offset_history.buckets[0] as u64) * total_max_points
+ * 1024 * 1024 * 1024 / total_valid_points_tracked;
+ cumulative_success_prob_times_billion += bucket_prob_times_billion *
+ ((max_bucket_end_pos - payment_pos) as u64) /
+ // Add an additional one in the divisor as the payment bucket has been
+ // rounded down.
+ (max_bucket_end_pos + 1) as u64;
}
- }
- // If the total valid points is smaller than 1.0 (i.e. 32 in our fixed-point scheme), treat
- // it as if we were fully decayed.
- if total_valid_points_tracked.checked_shr(required_decays).unwrap_or(0) < 32*32 {
- return None;
}
- let mut cumulative_success_prob_times_billion = 0;
- for (min_idx, min_bucket) in self.min_liquidity_offset_history.buckets.iter().enumerate() {
+ for (min_idx, min_bucket) in self.min_liquidity_offset_history.buckets.iter().enumerate().skip(1) {
let min_bucket_start_pos = BUCKET_START_POS[min_idx];
for (max_idx, max_bucket) in self.max_liquidity_offset_history.buckets.iter().enumerate().take(32 - min_idx) {
let max_bucket_end_pos = BUCKET_START_POS[32 - max_idx] - 1;
use crate::ln::msgs::{ChannelAnnouncement, ChannelUpdate, UnsignedChannelAnnouncement, UnsignedChannelUpdate};
use crate::routing::gossip::{EffectiveCapacity, NetworkGraph, NodeId};
use crate::routing::router::{BlindedTail, Path, RouteHop};
- use crate::routing::scoring::{ChannelUsage, Score};
+ use crate::routing::scoring::{ChannelUsage, ScoreLookUp, ScoreUpdate};
use crate::util::ser::{ReadableArgs, Writeable};
use crate::util::test_utils::{self, TestLogger};
channel_features: channelmanager::provided_channel_features(&config),
fee_msat,
cltv_expiry_delta: 18,
+ maybe_announced_channel: true,
}
}
// Even after we tell the scorer we definitely have enough available liquidity, it will
// still remember that there was some failure in the past, and assign a non-0 penalty.
scorer.payment_path_failed(&payment_path_for_amount(1000), 43);
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 198);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 32);
// The first points should be decayed just slightly and the last bucket has a new point.
assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
Some(([31, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0],
// simply check bounds here.
let five_hundred_prob =
scorer.historical_estimated_payment_success_probability(42, &target, 500).unwrap();
- assert!(five_hundred_prob > 0.5);
- assert!(five_hundred_prob < 0.52);
+ assert!(five_hundred_prob > 0.66);
+ assert!(five_hundred_prob < 0.68);
let one_prob =
scorer.historical_estimated_payment_success_probability(42, &target, 1).unwrap();
- assert!(one_prob < 0.95);
- assert!(one_prob > 0.90);
+ assert!(one_prob < 1.0);
+ assert!(one_prob > 0.95);
// Advance the time forward 16 half-lives (which the docs claim will ensure all data is
// gone), and check that we're back to where we started.
// Once fully decayed we still have data, but its all-0s. In the future we may remove the
// data entirely instead.
assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
- Some(([0; 32], [0; 32])));
+ None);
assert_eq!(scorer.historical_estimated_payment_success_probability(42, &target, 1), None);
let mut usage = ChannelUsage {
scorer.payment_path_failed(&payment_path_for_amount(1), 42);
assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 2048);
usage.inflight_htlc_msat = 0;
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 409);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 866);
let usage = ChannelUsage {
amount_msat: 1,
assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
Some(([63, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[32, 31, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])));
- assert!(scorer.historical_estimated_payment_success_probability(42, &target, amount_msat)
- .unwrap() > 0.24);
- assert!(scorer.historical_estimated_payment_success_probability(42, &target, amount_msat)
- .unwrap() < 0.25);
+ assert_eq!(scorer.historical_estimated_payment_success_probability(42, &target, amount_msat),
+ Some(0.0));
}
}