X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Frouting%2Fscoring.rs;h=5fdbf9ae3a9c8eff91ba0948ec86d8f9f3f7bdec;hb=refs%2Fheads%2F2023-04-expose-success-prob;hp=e60e4879b3d5e25ca121ed6d1a543b120317282f;hpb=3a643df99797ee2dd5cc19a6f9d090212b1c7963;p=rust-lightning diff --git a/lightning/src/routing/scoring.rs b/lightning/src/routing/scoring.rs index e60e4879..5fdbf9ae 100644 --- a/lightning/src/routing/scoring.rs +++ b/lightning/src/routing/scoring.rs @@ -19,8 +19,8 @@ //! # //! # use lightning::routing::gossip::NetworkGraph; //! # use lightning::routing::router::{RouteParameters, find_route}; -//! # use lightning::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringParameters}; -//! # use lightning::chain::keysinterface::KeysManager; +//! # use lightning::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters, ProbabilisticScoringDecayParameters}; +//! # use lightning::sign::KeysManager; //! # use lightning::util::logger::{Logger, Record}; //! # use bitcoin::secp256k1::PublicKey; //! # @@ -32,18 +32,20 @@ //! # let logger = FakeLogger {}; //! # //! // Use the default channel penalties. -//! let params = ProbabilisticScoringParameters::default(); -//! let scorer = ProbabilisticScorer::new(params, &network_graph, &logger); +//! let params = ProbabilisticScoringFeeParameters::default(); +//! let decay_params = ProbabilisticScoringDecayParameters::default(); +//! let scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger); //! //! // Or use custom channel penalties. -//! let params = ProbabilisticScoringParameters { -//! liquidity_penalty_multiplier_msat: 2 * 1000, -//! ..ProbabilisticScoringParameters::default() +//! let params = ProbabilisticScoringFeeParameters { +//! liquidity_penalty_multiplier_msat: 2 * 1000, +//! ..ProbabilisticScoringFeeParameters::default() //! }; -//! let scorer = ProbabilisticScorer::new(params, &network_graph, &logger); +//! let decay_params = ProbabilisticScoringDecayParameters::default(); +//! let scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger); //! # let random_seed_bytes = [42u8; 32]; //! -//! let route = find_route(&payer, &route_params, &network_graph, None, &logger, &scorer, &random_seed_bytes); +//! let route = find_route(&payer, &route_params, &network_graph, None, &logger, &scorer, ¶ms, &random_seed_bytes); //! # } //! ``` //! @@ -86,6 +88,10 @@ macro_rules! define_score { ($($supertrait: path)*) => { /// /// Scoring is in terms of fees willing to be paid in order to avoid routing through a channel. pub trait Score $(: $supertrait)* { + /// A configurable type which should contain various passed-in parameters for configuring the scorer, + /// on a per-routefinding-call basis through to the scorer methods, + /// which are used to determine the parameters for the suitability of channels for use. + type ScoreParams; /// Returns the fee in msats willing to be paid to avoid routing `send_amt_msat` through the /// given channel in the direction from `source` to `target`. /// @@ -95,7 +101,7 @@ pub trait Score $(: $supertrait)* { /// [`u64::max_value`] is given to indicate sufficient capacity for the invoice's full amount. /// Thus, implementations should be overflow-safe. fn channel_penalty_msat( - &self, short_channel_id: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage + &self, short_channel_id: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage, score_params: &Self::ScoreParams ) -> u64; /// Handles updating channel penalties after failing to route through a channel. @@ -112,10 +118,11 @@ pub trait Score $(: $supertrait)* { } impl $(+ $supertrait)*> Score for T { + type ScoreParams = S::ScoreParams; fn channel_penalty_msat( - &self, short_channel_id: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage + &self, short_channel_id: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage, score_params: &Self::ScoreParams ) -> u64 { - self.deref().channel_penalty_msat(short_channel_id, source, target, usage) + self.deref().channel_penalty_msat(short_channel_id, source, target, usage, score_params) } fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64) { @@ -150,8 +157,11 @@ define_score!(); /// /// [`find_route`]: crate::routing::router::find_route pub trait LockableScore<'a> { + /// The [`Score`] type. + type Score: 'a + Score; + /// The locked [`Score`] type. - type Locked: 'a + Score; + type Locked: DerefMut + Sized; /// Returns the locked scorer. fn lock(&'a self) -> Self::Locked; @@ -165,61 +175,38 @@ pub trait WriteableScore<'a>: LockableScore<'a> + Writeable {} #[cfg(not(c_bindings))] impl<'a, T> WriteableScore<'a> for T where T: LockableScore<'a> + Writeable {} -/// This is not exported to bindings users +#[cfg(not(c_bindings))] impl<'a, T: 'a + Score> LockableScore<'a> for Mutex { + type Score = T; type Locked = MutexGuard<'a, T>; - fn lock(&'a self) -> MutexGuard<'a, T> { + fn lock(&'a self) -> Self::Locked { Mutex::lock(self).unwrap() } } +#[cfg(not(c_bindings))] impl<'a, T: 'a + Score> LockableScore<'a> for RefCell { + type Score = T; type Locked = RefMut<'a, T>; - fn lock(&'a self) -> RefMut<'a, T> { + fn lock(&'a self) -> Self::Locked { self.borrow_mut() } } #[cfg(c_bindings)] /// A concrete implementation of [`LockableScore`] which supports multi-threading. -pub struct MultiThreadedLockableScore { - score: Mutex, -} -#[cfg(c_bindings)] -/// A locked `MultiThreadedLockableScore`. -pub struct MultiThreadedScoreLock<'a, S: Score>(MutexGuard<'a, S>); -#[cfg(c_bindings)] -impl<'a, T: Score + 'a> Score for MultiThreadedScoreLock<'a, T> { - fn channel_penalty_msat(&self, scid: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage) -> u64 { - self.0.channel_penalty_msat(scid, source, target, usage) - } - fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64) { - self.0.payment_path_failed(path, short_channel_id) - } - fn payment_path_successful(&mut self, path: &Path) { - self.0.payment_path_successful(path) - } - fn probe_failed(&mut self, path: &Path, short_channel_id: u64) { - self.0.probe_failed(path, short_channel_id) - } - fn probe_successful(&mut self, path: &Path) { - self.0.probe_successful(path) - } -} -#[cfg(c_bindings)] -impl<'a, T: Score + 'a> Writeable for MultiThreadedScoreLock<'a, T> { - fn write(&self, writer: &mut W) -> Result<(), io::Error> { - self.0.write(writer) - } +pub struct MultiThreadedLockableScore { + score: Mutex, } #[cfg(c_bindings)] -impl<'a, T: Score + 'a> LockableScore<'a> for MultiThreadedLockableScore { +impl<'a, T: 'a + Score> LockableScore<'a> for MultiThreadedLockableScore { + type Score = T; type Locked = MultiThreadedScoreLock<'a, T>; - fn lock(&'a self) -> MultiThreadedScoreLock<'a, T> { + fn lock(&'a self) -> Self::Locked { MultiThreadedScoreLock(Mutex::lock(&self.score).unwrap()) } } @@ -232,7 +219,7 @@ impl Writeable for MultiThreadedLockableScore { } #[cfg(c_bindings)] -impl<'a, T: Score + 'a> WriteableScore<'a> for MultiThreadedLockableScore {} +impl<'a, T: 'a + Score> WriteableScore<'a> for MultiThreadedLockableScore {} #[cfg(c_bindings)] impl MultiThreadedLockableScore { @@ -243,21 +230,34 @@ impl MultiThreadedLockableScore { } #[cfg(c_bindings)] -/// This is not exported to bindings users -impl<'a, T: Writeable> Writeable for RefMut<'a, T> { +/// A locked `MultiThreadedLockableScore`. +pub struct MultiThreadedScoreLock<'a, T: Score>(MutexGuard<'a, T>); + +#[cfg(c_bindings)] +impl<'a, T: 'a + Score> Writeable for MultiThreadedScoreLock<'a, T> { fn write(&self, writer: &mut W) -> Result<(), io::Error> { - T::write(&**self, writer) + self.0.write(writer) } } #[cfg(c_bindings)] -/// This is not exported to bindings users -impl<'a, S: Writeable> Writeable for MutexGuard<'a, S> { - fn write(&self, writer: &mut W) -> Result<(), io::Error> { - S::write(&**self, writer) - } +impl<'a, T: 'a + Score> DerefMut for MultiThreadedScoreLock<'a, T> { + fn deref_mut(&mut self) -> &mut Self::Target { + self.0.deref_mut() + } +} + +#[cfg(c_bindings)] +impl<'a, T: 'a + Score> Deref for MultiThreadedScoreLock<'a, T> { + type Target = T; + + fn deref(&self) -> &Self::Target { + self.0.deref() + } } + + /// Proposed use of a channel passed as a parameter to [`Score::channel_penalty_msat`]. #[derive(Clone, Copy, Debug, PartialEq)] pub struct ChannelUsage { @@ -286,7 +286,8 @@ impl FixedPenaltyScorer { } impl Score for FixedPenaltyScorer { - fn channel_penalty_msat(&self, _: u64, _: &NodeId, _: &NodeId, _: ChannelUsage) -> u64 { + type ScoreParams = (); + fn channel_penalty_msat(&self, _: u64, _: &NodeId, _: &NodeId, _: ChannelUsage, _score_params: &Self::ScoreParams) -> u64 { self.penalty_msat } @@ -316,7 +317,7 @@ impl ReadableArgs for FixedPenaltyScorer { } #[cfg(not(feature = "no-std"))] -type ConfiguredTime = std::time::Instant; +type ConfiguredTime = crate::util::time::MonotonicTime; #[cfg(feature = "no-std")] use crate::util::time::Eternity; #[cfg(feature = "no-std")] @@ -353,11 +354,11 @@ type ConfiguredTime = Eternity; /// behavior. /// /// [1]: https://arxiv.org/abs/2107.05322 -/// [`liquidity_penalty_multiplier_msat`]: ProbabilisticScoringParameters::liquidity_penalty_multiplier_msat -/// [`liquidity_penalty_amount_multiplier_msat`]: ProbabilisticScoringParameters::liquidity_penalty_amount_multiplier_msat -/// [`liquidity_offset_half_life`]: ProbabilisticScoringParameters::liquidity_offset_half_life -/// [`historical_liquidity_penalty_multiplier_msat`]: ProbabilisticScoringParameters::historical_liquidity_penalty_multiplier_msat -/// [`historical_liquidity_penalty_amount_multiplier_msat`]: ProbabilisticScoringParameters::historical_liquidity_penalty_amount_multiplier_msat +/// [`liquidity_penalty_multiplier_msat`]: ProbabilisticScoringFeeParameters::liquidity_penalty_multiplier_msat +/// [`liquidity_penalty_amount_multiplier_msat`]: ProbabilisticScoringFeeParameters::liquidity_penalty_amount_multiplier_msat +/// [`liquidity_offset_half_life`]: ProbabilisticScoringDecayParameters::liquidity_offset_half_life +/// [`historical_liquidity_penalty_multiplier_msat`]: ProbabilisticScoringFeeParameters::historical_liquidity_penalty_multiplier_msat +/// [`historical_liquidity_penalty_amount_multiplier_msat`]: ProbabilisticScoringFeeParameters::historical_liquidity_penalty_amount_multiplier_msat pub type ProbabilisticScorer = ProbabilisticScorerUsingTime::; /// Probabilistic [`Score`] implementation. @@ -365,7 +366,7 @@ pub type ProbabilisticScorer = ProbabilisticScorerUsingTime::>, L: Deref, T: Time> where L::Target: Logger { - params: ProbabilisticScoringParameters, + decay_params: ProbabilisticScoringDecayParameters, network_graph: G, logger: L, // TODO: Remove entries of closed channels. @@ -380,7 +381,7 @@ where L::Target: Logger { /// The penalty applied to any channel by the [`ProbabilisticScorer`] is the sum of each of the /// parameters here. #[derive(Clone)] -pub struct ProbabilisticScoringParameters { +pub struct ProbabilisticScoringFeeParameters { /// A fixed penalty in msats to apply to each channel. /// /// Default value: 500 msat @@ -415,30 +416,9 @@ pub struct ProbabilisticScoringParameters { /// /// Default value: 30,000 msat /// - /// [`liquidity_offset_half_life`]: Self::liquidity_offset_half_life + /// [`liquidity_offset_half_life`]: ProbabilisticScoringDecayParameters::liquidity_offset_half_life pub liquidity_penalty_multiplier_msat: u64, - /// Whenever this amount of time elapses since the last update to a channel's liquidity bounds, - /// the distance from the bounds to "zero" is cut in half. In other words, the lower-bound on - /// the available liquidity is halved and the upper-bound moves half-way to the channel's total - /// capacity. - /// - /// Because halving the liquidity bounds grows the uncertainty on the channel's liquidity, - /// the penalty for an amount within the new bounds may change. See the [`ProbabilisticScorer`] - /// struct documentation for more info on the way the liquidity bounds are used. - /// - /// For example, if the channel's capacity is 1 million sats, and the current upper and lower - /// liquidity bounds are 200,000 sats and 600,000 sats, after this amount of time the upper - /// and lower liquidity bounds will be decayed to 100,000 and 800,000 sats. - /// - /// Default value: 6 hours - /// - /// # Note - /// - /// When built with the `no-std` feature, time will never elapse. Therefore, the channel - /// liquidity knowledge will never decay except when the bounds cross. - pub liquidity_offset_half_life: Duration, - /// A multiplier used in conjunction with a payment amount and the negative `log10` of the /// channel's success probability for the payment, as determined by our latest estimates of the /// channel's liquidity, to determine the amount penalty. @@ -495,16 +475,6 @@ pub struct ProbabilisticScoringParameters { /// [`liquidity_penalty_amount_multiplier_msat`]: Self::liquidity_penalty_amount_multiplier_msat pub historical_liquidity_penalty_amount_multiplier_msat: u64, - /// If we aren't learning any new datapoints for a channel, the historical liquidity bounds - /// tracking can simply live on with increasingly stale data. Instead, when a channel has not - /// seen a liquidity estimate update for this amount of time, the historical datapoints are - /// decayed by half. - /// - /// Note that after 16 or more half lives all historical data will be completely gone. - /// - /// Default value: 14 days - pub historical_no_updates_half_life: Duration, - /// Manual penalties used for the given nodes. Allows to set a particular penalty for a given /// node. Note that a manual penalty of `u64::max_value()` means the node would not ever be /// considered during path finding. @@ -513,9 +483,10 @@ pub struct ProbabilisticScoringParameters { pub manual_node_penalties: HashMap, /// This penalty is applied when `htlc_maximum_msat` is equal to or larger than half of the - /// channel's capacity, which makes us prefer nodes with a smaller `htlc_maximum_msat`. We - /// treat such nodes preferentially as this makes balance discovery attacks harder to execute, - /// thereby creating an incentive to restrict `htlc_maximum_msat` and improve privacy. + /// channel's capacity, (ie. htlc_maximum_msat >= 0.5 * channel_capacity) which makes us + /// prefer nodes with a smaller `htlc_maximum_msat`. We treat such nodes preferentially + /// as this makes balance discovery attacks harder to execute, thereby creating an incentive + /// to restrict `htlc_maximum_msat` and improve privacy. /// /// Default value: 250 msat pub anti_probing_penalty_msat: u64, @@ -540,144 +511,133 @@ pub struct ProbabilisticScoringParameters { pub considered_impossible_penalty_msat: u64, } -/// Tracks the historical state of a distribution as a weighted average of how much time was spent -/// in each of 8 buckets. -#[derive(Clone, Copy)] -struct HistoricalBucketRangeTracker { - buckets: [u16; 8], +impl Default for ProbabilisticScoringFeeParameters { + fn default() -> Self { + Self { + base_penalty_msat: 500, + base_penalty_amount_multiplier_msat: 8192, + liquidity_penalty_multiplier_msat: 30_000, + liquidity_penalty_amount_multiplier_msat: 192, + manual_node_penalties: HashMap::new(), + anti_probing_penalty_msat: 250, + considered_impossible_penalty_msat: 1_0000_0000_000, + historical_liquidity_penalty_multiplier_msat: 10_000, + historical_liquidity_penalty_amount_multiplier_msat: 64, + } + } } -impl HistoricalBucketRangeTracker { - fn new() -> Self { Self { buckets: [0; 8] } } - fn track_datapoint(&mut self, liquidity_offset_msat: u64, capacity_msat: u64) { - // We have 8 leaky buckets for min and max liquidity. Each bucket tracks the amount of time - // we spend in each bucket as a 16-bit fixed-point number with a 5 bit fractional part. - // - // Each time we update our liquidity estimate, we add 32 (1.0 in our fixed-point system) to - // the buckets for the current min and max liquidity offset positions. - // - // We then decay each bucket by multiplying by 2047/2048 (avoiding dividing by a - // non-power-of-two). This ensures we can't actually overflow the u16 - when we get to - // 63,457 adding 32 and decaying by 2047/2048 leaves us back at 63,457. - // - // In total, this allows us to track data for the last 8,000 or so payments across a given - // channel. - // - // These constants are a balance - we try to fit in 2 bytes per bucket to reduce overhead, - // and need to balance having more bits in the decimal part (to ensure decay isn't too - // non-linear) with having too few bits in the mantissa, causing us to not store very many - // datapoints. - // - // The constants were picked experimentally, selecting a decay amount that restricts us - // from overflowing buckets without having to cap them manually. - - // Ensure the bucket index is in the range [0, 7], even if the liquidity offset is zero or - // the channel's capacity, though the second should generally never happen. - debug_assert!(liquidity_offset_msat <= capacity_msat); - let bucket_idx: u8 = (liquidity_offset_msat * 8 / capacity_msat.saturating_add(1)) - .try_into().unwrap_or(32); // 32 is bogus for 8 buckets, and will be ignored - debug_assert!(bucket_idx < 8); - if bucket_idx < 8 { - for e in self.buckets.iter_mut() { - *e = ((*e as u32) * 2047 / 2048) as u16; - } - self.buckets[bucket_idx as usize] = self.buckets[bucket_idx as usize].saturating_add(32); - } +impl ProbabilisticScoringFeeParameters { + /// Marks the node with the given `node_id` as banned, + /// i.e it will be avoided during path finding. + pub fn add_banned(&mut self, node_id: &NodeId) { + self.manual_node_penalties.insert(*node_id, u64::max_value()); } - /// Decay all buckets by the given number of half-lives. Used to more aggressively remove old - /// datapoints as we receive newer information. - fn time_decay_data(&mut self, half_lives: u32) { - for e in self.buckets.iter_mut() { - *e = e.checked_shr(half_lives).unwrap_or(0); + + /// Marks all nodes in the given list as banned, i.e., + /// they will be avoided during path finding. + pub fn add_banned_from_list(&mut self, node_ids: Vec) { + for id in node_ids { + self.manual_node_penalties.insert(id, u64::max_value()); } } -} -impl_writeable_tlv_based!(HistoricalBucketRangeTracker, { (0, buckets, required) }); + /// Removes the node with the given `node_id` from the list of nodes to avoid. + pub fn remove_banned(&mut self, node_id: &NodeId) { + self.manual_node_penalties.remove(node_id); + } -struct HistoricalMinMaxBuckets<'a> { - min_liquidity_offset_history: &'a HistoricalBucketRangeTracker, - max_liquidity_offset_history: &'a HistoricalBucketRangeTracker, -} + /// Sets a manual penalty for the given node. + pub fn set_manual_penalty(&mut self, node_id: &NodeId, penalty: u64) { + self.manual_node_penalties.insert(*node_id, penalty); + } -impl HistoricalMinMaxBuckets<'_> { - #[inline] - fn get_decayed_buckets(&self, now: T, last_updated: T, half_life: Duration) - -> ([u16; 8], [u16; 8], u32) { - let required_decays = now.duration_since(last_updated).as_secs() - .checked_div(half_life.as_secs()) - .map_or(u32::max_value(), |decays| cmp::min(decays, u32::max_value() as u64) as u32); - let mut min_buckets = *self.min_liquidity_offset_history; - min_buckets.time_decay_data(required_decays); - let mut max_buckets = *self.max_liquidity_offset_history; - max_buckets.time_decay_data(required_decays); - (min_buckets.buckets, max_buckets.buckets, required_decays) + /// Removes the node with the given `node_id` from the list of manual penalties. + pub fn remove_manual_penalty(&mut self, node_id: &NodeId) { + self.manual_node_penalties.remove(node_id); } - #[inline] - fn calculate_success_probability_times_billion( - &self, now: T, last_updated: T, half_life: Duration, payment_amt_64th_bucket: u8) - -> Option { - // If historical penalties are enabled, calculate the penalty by walking the set of - // historical liquidity bucket (min, max) combinations (where min_idx < max_idx) and, for - // each, calculate the probability of success given our payment amount, then total the - // weighted average probability of success. - // - // We use a sliding scale to decide which point within a given bucket will be compared to - // the amount being sent - for lower-bounds, the amount being sent is compared to the lower - // edge of the first bucket (i.e. zero), but compared to the upper 7/8ths of the last - // bucket (i.e. 9 times the index, or 63), with each bucket in between increasing the - // comparison point by 1/64th. For upper-bounds, the same applies, however with an offset - // of 1/64th (i.e. starting at one and ending at 64). This avoids failing to assign - // penalties to channels at the edges. - // - // If we used the bottom edge of buckets, we'd end up never assigning any penalty at all to - // such a channel when sending less than ~0.19% of the channel's capacity (e.g. ~200k sats - // for a 1 BTC channel!). - // - // If we used the middle of each bucket we'd never assign any penalty at all when sending - // less than 1/16th of a channel's capacity, or 1/8th if we used the top of the bucket. - let mut total_valid_points_tracked = 0; - - // Check if all our buckets are zero, once decayed and treat it as if we had no data. We - // don't actually use the decayed buckets, though, as that would lose precision. - let (decayed_min_buckets, decayed_max_buckets, required_decays) = - self.get_decayed_buckets(now, last_updated, half_life); - if decayed_min_buckets.iter().all(|v| *v == 0) || decayed_max_buckets.iter().all(|v| *v == 0) { - return None; - } + /// Clears the list of manual penalties that are applied during path finding. + pub fn clear_manual_penalties(&mut self) { + self.manual_node_penalties = HashMap::new(); + } +} - for (min_idx, min_bucket) in self.min_liquidity_offset_history.buckets.iter().enumerate() { - for max_bucket in self.max_liquidity_offset_history.buckets.iter().take(8 - min_idx) { - total_valid_points_tracked += (*min_bucket as u64) * (*max_bucket as u64); - } - } - // If the total valid points is smaller than 1.0 (i.e. 32 in our fixed-point scheme), treat - // it as if we were fully decayed. - if total_valid_points_tracked.checked_shr(required_decays).unwrap_or(0) < 32*32 { - return None; +#[cfg(test)] +impl ProbabilisticScoringFeeParameters { + fn zero_penalty() -> Self { + Self { + base_penalty_msat: 0, + base_penalty_amount_multiplier_msat: 0, + liquidity_penalty_multiplier_msat: 0, + liquidity_penalty_amount_multiplier_msat: 0, + historical_liquidity_penalty_multiplier_msat: 0, + historical_liquidity_penalty_amount_multiplier_msat: 0, + manual_node_penalties: HashMap::new(), + anti_probing_penalty_msat: 0, + considered_impossible_penalty_msat: 0, } + } +} - let mut cumulative_success_prob_times_billion = 0; - for (min_idx, min_bucket) in self.min_liquidity_offset_history.buckets.iter().enumerate() { - for (max_idx, max_bucket) in self.max_liquidity_offset_history.buckets.iter().enumerate().take(8 - min_idx) { - let bucket_prob_times_million = (*min_bucket as u64) * (*max_bucket as u64) - * 1024 * 1024 / total_valid_points_tracked; - let min_64th_bucket = min_idx as u8 * 9; - let max_64th_bucket = (7 - max_idx as u8) * 9 + 1; - if payment_amt_64th_bucket > max_64th_bucket { - // Success probability 0, the payment amount is above the max liquidity - } else if payment_amt_64th_bucket <= min_64th_bucket { - cumulative_success_prob_times_billion += bucket_prob_times_million * 1024; - } else { - cumulative_success_prob_times_billion += bucket_prob_times_million * - ((max_64th_bucket - payment_amt_64th_bucket) as u64) * 1024 / - ((max_64th_bucket - min_64th_bucket) as u64); - } - } +/// Parameters for configuring [`ProbabilisticScorer`]. +/// +/// Used to configure decay parameters that are static throughout the lifetime of the scorer. +/// these decay parameters affect the score of the channel penalty and are not changed on a +/// per-route penalty cost call. +#[derive(Copy, Clone)] +pub struct ProbabilisticScoringDecayParameters { + /// If we aren't learning any new datapoints for a channel, the historical liquidity bounds + /// tracking can simply live on with increasingly stale data. Instead, when a channel has not + /// seen a liquidity estimate update for this amount of time, the historical datapoints are + /// decayed by half. + /// For an example of historical_no_updates_half_life being used see [`historical_estimated_channel_liquidity_probabilities`] + /// + /// Note that after 16 or more half lives all historical data will be completely gone. + /// + /// Default value: 14 days + /// + /// [`historical_estimated_channel_liquidity_probabilities`]: ProbabilisticScorerUsingTime::historical_estimated_channel_liquidity_probabilities + pub historical_no_updates_half_life: Duration, + + /// Whenever this amount of time elapses since the last update to a channel's liquidity bounds, + /// the distance from the bounds to "zero" is cut in half. In other words, the lower-bound on + /// the available liquidity is halved and the upper-bound moves half-way to the channel's total + /// capacity. + /// + /// Because halving the liquidity bounds grows the uncertainty on the channel's liquidity, + /// the penalty for an amount within the new bounds may change. See the [`ProbabilisticScorer`] + /// struct documentation for more info on the way the liquidity bounds are used. + /// + /// For example, if the channel's capacity is 1 million sats, and the current upper and lower + /// liquidity bounds are 200,000 sats and 600,000 sats, after this amount of time the upper + /// and lower liquidity bounds will be decayed to 100,000 and 800,000 sats. + /// + /// Default value: 6 hours + /// + /// # Note + /// + /// When built with the `no-std` feature, time will never elapse. Therefore, the channel + /// liquidity knowledge will never decay except when the bounds cross. + pub liquidity_offset_half_life: Duration, +} + +impl Default for ProbabilisticScoringDecayParameters { + fn default() -> Self { + Self { + liquidity_offset_half_life: Duration::from_secs(6 * 60 * 60), + historical_no_updates_half_life: Duration::from_secs(60 * 60 * 24 * 14), } + } +} - Some(cumulative_success_prob_times_billion) +#[cfg(test)] +impl ProbabilisticScoringDecayParameters { + fn zero_penalty() -> Self { + Self { + liquidity_offset_half_life: Duration::from_secs(6 * 60 * 60), + historical_no_updates_half_life: Duration::from_secs(60 * 60 * 24 * 14), + } } } @@ -702,24 +662,23 @@ struct ChannelLiquidity { /// A snapshot of [`ChannelLiquidity`] in one direction assuming a certain channel capacity and /// decayed with a given half life. -struct DirectedChannelLiquidity<'a, L: Deref, BRT: Deref, T: Time, U: Deref> { +struct DirectedChannelLiquidity, BRT: Deref, T: Time, U: Deref> { min_liquidity_offset_msat: L, max_liquidity_offset_msat: L, - min_liquidity_offset_history: BRT, - max_liquidity_offset_history: BRT, + liquidity_history: HistoricalMinMaxBuckets, inflight_htlc_msat: u64, capacity_msat: u64, last_updated: U, now: T, - params: &'a ProbabilisticScoringParameters, + decay_params: ProbabilisticScoringDecayParameters, } impl>, L: Deref, T: Time> ProbabilisticScorerUsingTime where L::Target: Logger { /// Creates a new scorer using the given scoring parameters for sending payments from a node /// through a network graph. - pub fn new(params: ProbabilisticScoringParameters, network_graph: G, logger: L) -> Self { + pub fn new(decay_params: ProbabilisticScoringDecayParameters, network_graph: G, logger: L) -> Self { Self { - params, + decay_params, network_graph, logger, channel_liquidities: HashMap::new(), @@ -745,26 +704,38 @@ impl>, L: Deref, T: Time> ProbabilisticScorerU let log_direction = |source, target| { if let Some((directed_info, _)) = chan_debug.as_directed_to(target) { let amt = directed_info.effective_capacity().as_msat(); - let dir_liq = liq.as_directed(source, target, 0, amt, &self.params); + let dir_liq = liq.as_directed(source, target, 0, amt, self.decay_params); - let buckets = HistoricalMinMaxBuckets { - min_liquidity_offset_history: &dir_liq.min_liquidity_offset_history, - max_liquidity_offset_history: &dir_liq.max_liquidity_offset_history, - }; - let (min_buckets, max_buckets, _) = buckets.get_decayed_buckets(now, - *dir_liq.last_updated, self.params.historical_no_updates_half_life); + let (min_buckets, max_buckets) = dir_liq.liquidity_history + .get_decayed_buckets(now, *dir_liq.last_updated, + self.decay_params.historical_no_updates_half_life) + .unwrap_or(([0; 32], [0; 32])); log_debug!(self.logger, core::concat!( "Liquidity from {} to {} via {} is in the range ({}, {}).\n", - "\tHistorical min liquidity octile relative probabilities: {} {} {} {} {} {} {} {}\n", - "\tHistorical max liquidity octile relative probabilities: {} {} {} {} {} {} {} {}"), + "\tHistorical min liquidity bucket relative probabilities:\n", + "\t\t{} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {}\n", + "\tHistorical max liquidity bucket relative probabilities:\n", + "\t\t{} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {}"), source, target, scid, dir_liq.min_liquidity_msat(), dir_liq.max_liquidity_msat(), - min_buckets[0], min_buckets[1], min_buckets[2], min_buckets[3], - min_buckets[4], min_buckets[5], min_buckets[6], min_buckets[7], + min_buckets[ 0], min_buckets[ 1], min_buckets[ 2], min_buckets[ 3], + min_buckets[ 4], min_buckets[ 5], min_buckets[ 6], min_buckets[ 7], + min_buckets[ 8], min_buckets[ 9], min_buckets[10], min_buckets[11], + min_buckets[12], min_buckets[13], min_buckets[14], min_buckets[15], + min_buckets[16], min_buckets[17], min_buckets[18], min_buckets[19], + min_buckets[20], min_buckets[21], min_buckets[22], min_buckets[23], + min_buckets[24], min_buckets[25], min_buckets[26], min_buckets[27], + min_buckets[28], min_buckets[29], min_buckets[30], min_buckets[31], // Note that the liquidity buckets are an offset from the edge, so we // inverse the max order to get the probabilities from zero. - max_buckets[7], max_buckets[6], max_buckets[5], max_buckets[4], - max_buckets[3], max_buckets[2], max_buckets[1], max_buckets[0]); + max_buckets[31], max_buckets[30], max_buckets[29], max_buckets[28], + max_buckets[27], max_buckets[26], max_buckets[25], max_buckets[24], + max_buckets[23], max_buckets[22], max_buckets[21], max_buckets[20], + max_buckets[19], max_buckets[18], max_buckets[17], max_buckets[16], + max_buckets[15], max_buckets[14], max_buckets[13], max_buckets[12], + max_buckets[11], max_buckets[10], max_buckets[ 9], max_buckets[ 8], + max_buckets[ 7], max_buckets[ 6], max_buckets[ 5], max_buckets[ 4], + max_buckets[ 3], max_buckets[ 2], max_buckets[ 1], max_buckets[ 0]); } else { log_debug!(self.logger, "No amount known for SCID {} from {:?} to {:?}", scid, source, target); } @@ -787,7 +758,7 @@ impl>, L: Deref, T: Time> ProbabilisticScorerU if let Some(liq) = self.channel_liquidities.get(&scid) { if let Some((directed_info, source)) = chan.as_directed_to(target) { let amt = directed_info.effective_capacity().as_msat(); - let dir_liq = liq.as_directed(source, target, 0, amt, &self.params); + let dir_liq = liq.as_directed(source, target, 0, amt, self.decay_params); return Some((dir_liq.min_liquidity_msat(), dir_liq.max_liquidity_msat())); } } @@ -798,40 +769,45 @@ impl>, L: Deref, T: Time> ProbabilisticScorerU /// Query the historical estimated minimum and maximum liquidity available for sending a /// payment over the channel with `scid` towards the given `target` node. /// - /// Returns two sets of 8 buckets. The first set describes the octiles for lower-bound - /// liquidity estimates, the second set describes the octiles for upper-bound liquidity - /// estimates. Each bucket describes the relative frequency at which we've seen a liquidity - /// bound in the octile relative to the channel's total capacity, on an arbitrary scale. - /// Because the values are slowly decayed, more recent data points are weighted more heavily - /// than older datapoints. + /// Returns two sets of 32 buckets. The first set describes the lower-bound liquidity history, + /// the second set describes the upper-bound liquidity history. Each bucket describes the + /// relative frequency at which we've seen a liquidity bound in the bucket's range relative to + /// the channel's total capacity, on an arbitrary scale. Because the values are slowly decayed, + /// more recent data points are weighted more heavily than older datapoints. /// - /// When scoring, the estimated probability that an upper-/lower-bound lies in a given octile - /// relative to the channel's total capacity is calculated by dividing that bucket's value with - /// the total of all buckets for the given bound. + /// Note that the range of each bucket varies by its location to provide more granular results + /// at the edges of a channel's capacity, where it is more likely to sit. /// - /// For example, a value of `[0, 0, 0, 0, 0, 0, 32]` indicates that we believe the probability - /// of a bound being in the top octile to be 100%, and have never (recently) seen it in any - /// other octiles. A value of `[31, 0, 0, 0, 0, 0, 0, 32]` indicates we've seen the bound being - /// both in the top and bottom octile, and roughly with similar (recent) frequency. + /// When scoring, the estimated probability that an upper-/lower-bound lies in a given bucket + /// is calculated by dividing that bucket's value with the total value of all buckets. + /// + /// For example, using a lower bucket count for illustrative purposes, a value of + /// `[0, 0, 0, ..., 0, 32]` indicates that we believe the probability of a bound being very + /// close to the channel's capacity to be 100%, and have never (recently) seen it in any other + /// bucket. A value of `[31, 0, 0, ..., 0, 0, 32]` indicates we've seen the bound being both + /// in the top and bottom bucket, and roughly with similar (recent) frequency. /// /// Because the datapoints are decayed slowly over time, values will eventually return to - /// `Some(([0; 8], [0; 8]))`. + /// `Some(([1; 32], [1; 32]))` and then to `None` once no datapoints remain. + /// + /// In order to fetch a single success probability from the buckets provided here, as used in + /// the scoring model, see [`Self::historical_estimated_payment_success_probability`]. pub fn historical_estimated_channel_liquidity_probabilities(&self, scid: u64, target: &NodeId) - -> Option<([u16; 8], [u16; 8])> { + -> Option<([u16; 32], [u16; 32])> { let graph = self.network_graph.read_only(); if let Some(chan) = graph.channels().get(&scid) { if let Some(liq) = self.channel_liquidities.get(&scid) { if let Some((directed_info, source)) = chan.as_directed_to(target) { let amt = directed_info.effective_capacity().as_msat(); - let dir_liq = liq.as_directed(source, target, 0, amt, &self.params); - - let buckets = HistoricalMinMaxBuckets { - min_liquidity_offset_history: &dir_liq.min_liquidity_offset_history, - max_liquidity_offset_history: &dir_liq.max_liquidity_offset_history, - }; - let (min_buckets, mut max_buckets, _) = buckets.get_decayed_buckets(T::now(), - *dir_liq.last_updated, self.params.historical_no_updates_half_life); + let dir_liq = liq.as_directed(source, target, 0, amt, self.decay_params); + + let (min_buckets, mut max_buckets) = + dir_liq.liquidity_history.get_decayed_buckets( + dir_liq.now, *dir_liq.last_updated, + self.decay_params.historical_no_updates_half_life + )?; + // Note that the liquidity buckets are an offset from the edge, so we inverse // the max order to get the probabilities from zero. max_buckets.reverse(); @@ -842,75 +818,32 @@ impl>, L: Deref, T: Time> ProbabilisticScorerU None } - /// Marks the node with the given `node_id` as banned, i.e., - /// it will be avoided during path finding. - pub fn add_banned(&mut self, node_id: &NodeId) { - self.params.manual_node_penalties.insert(*node_id, u64::max_value()); - } - - /// Removes the node with the given `node_id` from the list of nodes to avoid. - pub fn remove_banned(&mut self, node_id: &NodeId) { - self.params.manual_node_penalties.remove(node_id); - } - - /// Sets a manual penalty for the given node. - pub fn set_manual_penalty(&mut self, node_id: &NodeId, penalty: u64) { - self.params.manual_node_penalties.insert(*node_id, penalty); - } - - /// Removes the node with the given `node_id` from the list of manual penalties. - pub fn remove_manual_penalty(&mut self, node_id: &NodeId) { - self.params.manual_node_penalties.remove(node_id); - } - - /// Clears the list of manual penalties that are applied during path finding. - pub fn clear_manual_penalties(&mut self) { - self.params.manual_node_penalties = HashMap::new(); - } -} - -impl ProbabilisticScoringParameters { - #[cfg(test)] - fn zero_penalty() -> Self { - Self { - base_penalty_msat: 0, - base_penalty_amount_multiplier_msat: 0, - liquidity_penalty_multiplier_msat: 0, - liquidity_offset_half_life: Duration::from_secs(6 * 60 * 60), - liquidity_penalty_amount_multiplier_msat: 0, - historical_liquidity_penalty_multiplier_msat: 0, - historical_liquidity_penalty_amount_multiplier_msat: 0, - historical_no_updates_half_life: Duration::from_secs(60 * 60 * 24 * 14), - manual_node_penalties: HashMap::new(), - anti_probing_penalty_msat: 0, - considered_impossible_penalty_msat: 0, - } - } + /// Query the probability of payment success sending the given `amount_msat` over the channel + /// with `scid` towards the given `target` node, based on the historical estimated liquidity + /// bounds. + /// + /// These are the same bounds as returned by + /// [`Self::historical_estimated_channel_liquidity_probabilities`] (but not those returned by + /// [`Self::estimated_channel_liquidity_range`]). + pub fn historical_estimated_payment_success_probability( + &self, scid: u64, target: &NodeId, amount_msat: u64) + -> Option { + let graph = self.network_graph.read_only(); - /// Marks all nodes in the given list as banned, i.e., - /// they will be avoided during path finding. - pub fn add_banned_from_list(&mut self, node_ids: Vec) { - for id in node_ids { - self.manual_node_penalties.insert(id, u64::max_value()); - } - } -} + if let Some(chan) = graph.channels().get(&scid) { + if let Some(liq) = self.channel_liquidities.get(&scid) { + if let Some((directed_info, source)) = chan.as_directed_to(target) { + let capacity_msat = directed_info.effective_capacity().as_msat(); + let dir_liq = liq.as_directed(source, target, 0, capacity_msat, self.decay_params); -impl Default for ProbabilisticScoringParameters { - fn default() -> Self { - Self { - base_penalty_msat: 500, - base_penalty_amount_multiplier_msat: 8192, - liquidity_penalty_multiplier_msat: 30_000, - liquidity_offset_half_life: Duration::from_secs(6 * 60 * 60), - liquidity_penalty_amount_multiplier_msat: 192, - historical_liquidity_penalty_multiplier_msat: 10_000, - historical_liquidity_penalty_amount_multiplier_msat: 64, - historical_no_updates_half_life: Duration::from_secs(60 * 60 * 24 * 14), - manual_node_penalties: HashMap::new(), - anti_probing_penalty_msat: 250, - considered_impossible_penalty_msat: 1_0000_0000_000, + return dir_liq.liquidity_history.calculate_success_probability_times_billion( + dir_liq.now, *dir_liq.last_updated, + self.decay_params.historical_no_updates_half_life, amount_msat, capacity_msat + ).map(|p| p as f64 / (1024 * 1024 * 1024) as f64); + } + } } + None } } @@ -928,10 +861,9 @@ impl ChannelLiquidity { /// Returns a view of the channel liquidity directed from `source` to `target` assuming /// `capacity_msat`. - fn as_directed<'a>( - &self, source: &NodeId, target: &NodeId, inflight_htlc_msat: u64, capacity_msat: u64, - params: &'a ProbabilisticScoringParameters - ) -> DirectedChannelLiquidity<'a, &u64, &HistoricalBucketRangeTracker, T, &T> { + fn as_directed( + &self, source: &NodeId, target: &NodeId, inflight_htlc_msat: u64, capacity_msat: u64, decay_params: ProbabilisticScoringDecayParameters + ) -> DirectedChannelLiquidity<&u64, &HistoricalBucketRangeTracker, T, &T> { let (min_liquidity_offset_msat, max_liquidity_offset_msat, min_liquidity_offset_history, max_liquidity_offset_history) = if source < target { (&self.min_liquidity_offset_msat, &self.max_liquidity_offset_msat, @@ -944,22 +876,23 @@ impl ChannelLiquidity { DirectedChannelLiquidity { min_liquidity_offset_msat, max_liquidity_offset_msat, - min_liquidity_offset_history, - max_liquidity_offset_history, + liquidity_history: HistoricalMinMaxBuckets { + min_liquidity_offset_history, + max_liquidity_offset_history, + }, inflight_htlc_msat, capacity_msat, last_updated: &self.last_updated, now: T::now(), - params, + decay_params: decay_params, } } /// Returns a mutable view of the channel liquidity directed from `source` to `target` assuming /// `capacity_msat`. - fn as_directed_mut<'a>( - &mut self, source: &NodeId, target: &NodeId, inflight_htlc_msat: u64, capacity_msat: u64, - params: &'a ProbabilisticScoringParameters - ) -> DirectedChannelLiquidity<'a, &mut u64, &mut HistoricalBucketRangeTracker, T, &mut T> { + fn as_directed_mut( + &mut self, source: &NodeId, target: &NodeId, inflight_htlc_msat: u64, capacity_msat: u64, decay_params: ProbabilisticScoringDecayParameters + ) -> DirectedChannelLiquidity<&mut u64, &mut HistoricalBucketRangeTracker, T, &mut T> { let (min_liquidity_offset_msat, max_liquidity_offset_msat, min_liquidity_offset_history, max_liquidity_offset_history) = if source < target { (&mut self.min_liquidity_offset_msat, &mut self.max_liquidity_offset_msat, @@ -972,13 +905,15 @@ impl ChannelLiquidity { DirectedChannelLiquidity { min_liquidity_offset_msat, max_liquidity_offset_msat, - min_liquidity_offset_history, - max_liquidity_offset_history, + liquidity_history: HistoricalMinMaxBuckets { + min_liquidity_offset_history, + max_liquidity_offset_history, + }, inflight_htlc_msat, capacity_msat, last_updated: &mut self.last_updated, now: T::now(), - params, + decay_params: decay_params, } } } @@ -995,10 +930,11 @@ const PRECISION_LOWER_BOUND_DENOMINATOR: u64 = approx::LOWER_BITS_BOUND; const AMOUNT_PENALTY_DIVISOR: u64 = 1 << 20; const BASE_AMOUNT_PENALTY_DIVISOR: u64 = 1 << 30; -impl, BRT: Deref, T: Time, U: Deref> DirectedChannelLiquidity<'_, L, BRT, T, U> { +impl, BRT: Deref, T: Time, U: Deref> DirectedChannelLiquidity< L, BRT, T, U> { /// Returns a liquidity penalty for routing the given HTLC `amount_msat` through the channel in /// this direction. - fn penalty_msat(&self, amount_msat: u64, params: &ProbabilisticScoringParameters) -> u64 { + fn penalty_msat(&self, amount_msat: u64, score_params: &ProbabilisticScoringFeeParameters) -> u64 { + let available_capacity = self.available_capacity(); let max_liquidity_msat = self.max_liquidity_msat(); let min_liquidity_msat = core::cmp::min(self.min_liquidity_msat(), max_liquidity_msat); @@ -1010,9 +946,9 @@ impl, BRT: Deref, // impossibility penalty. let negative_log10_times_2048 = NEGATIVE_LOG10_UPPER_BOUND * 2048; Self::combined_penalty_msat(amount_msat, negative_log10_times_2048, - params.liquidity_penalty_multiplier_msat, - params.liquidity_penalty_amount_multiplier_msat) - .saturating_add(params.considered_impossible_penalty_msat) + score_params.liquidity_penalty_multiplier_msat, + score_params.liquidity_penalty_amount_multiplier_msat) + .saturating_add(score_params.considered_impossible_penalty_msat) } else { let numerator = (max_liquidity_msat - amount_msat).saturating_add(1); let denominator = (max_liquidity_msat - min_liquidity_msat).saturating_add(1); @@ -1025,38 +961,30 @@ impl, BRT: Deref, let negative_log10_times_2048 = approx::negative_log10_times_2048(numerator, denominator); Self::combined_penalty_msat(amount_msat, negative_log10_times_2048, - params.liquidity_penalty_multiplier_msat, - params.liquidity_penalty_amount_multiplier_msat) + score_params.liquidity_penalty_multiplier_msat, + score_params.liquidity_penalty_amount_multiplier_msat) } }; - if params.historical_liquidity_penalty_multiplier_msat != 0 || - params.historical_liquidity_penalty_amount_multiplier_msat != 0 { - let payment_amt_64th_bucket = if amount_msat < u64::max_value() / 64 { - amount_msat * 64 / self.capacity_msat.saturating_add(1) - } else { - // Only use 128-bit arithmetic when multiplication will overflow to avoid 128-bit - // division. This branch should only be hit in fuzz testing since the amount would - // need to be over 2.88 million BTC in practice. - ((amount_msat as u128) * 64 / (self.capacity_msat as u128).saturating_add(1)) - .try_into().unwrap_or(65) - }; - #[cfg(not(fuzzing))] - debug_assert!(payment_amt_64th_bucket <= 64); - if payment_amt_64th_bucket > 64 { return res; } + if amount_msat >= available_capacity { + // We're trying to send more than the capacity, use a max penalty. + res = res.saturating_add(Self::combined_penalty_msat(amount_msat, + NEGATIVE_LOG10_UPPER_BOUND * 2048, + score_params.historical_liquidity_penalty_multiplier_msat, + score_params.historical_liquidity_penalty_amount_multiplier_msat)); + return res; + } - let buckets = HistoricalMinMaxBuckets { - min_liquidity_offset_history: &self.min_liquidity_offset_history, - max_liquidity_offset_history: &self.max_liquidity_offset_history, - }; - if let Some(cumulative_success_prob_times_billion) = buckets + if score_params.historical_liquidity_penalty_multiplier_msat != 0 || + score_params.historical_liquidity_penalty_amount_multiplier_msat != 0 { + if let Some(cumulative_success_prob_times_billion) = self.liquidity_history .calculate_success_probability_times_billion(self.now, *self.last_updated, - params.historical_no_updates_half_life, payment_amt_64th_bucket as u8) + self.decay_params.historical_no_updates_half_life, amount_msat, self.capacity_msat) { let historical_negative_log10_times_2048 = approx::negative_log10_times_2048(cumulative_success_prob_times_billion + 1, 1024 * 1024 * 1024); res = res.saturating_add(Self::combined_penalty_msat(amount_msat, - historical_negative_log10_times_2048, params.historical_liquidity_penalty_multiplier_msat, - params.historical_liquidity_penalty_amount_multiplier_msat)); + historical_negative_log10_times_2048, score_params.historical_liquidity_penalty_multiplier_msat, + score_params.historical_liquidity_penalty_amount_multiplier_msat)); } else { // If we don't have any valid points (or, once decayed, we have less than a full // point), redo the non-historical calculation with no liquidity bounds tracked and @@ -1067,8 +995,8 @@ impl, BRT: Deref, let negative_log10_times_2048 = approx::negative_log10_times_2048(numerator, denominator); res = res.saturating_add(Self::combined_penalty_msat(amount_msat, negative_log10_times_2048, - params.historical_liquidity_penalty_multiplier_msat, - params.historical_liquidity_penalty_amount_multiplier_msat)); + score_params.historical_liquidity_penalty_multiplier_msat, + score_params.historical_liquidity_penalty_amount_multiplier_msat)); } } @@ -1077,15 +1005,15 @@ impl, BRT: Deref, /// Computes the liquidity penalty from the penalty multipliers. #[inline(always)] - fn combined_penalty_msat(amount_msat: u64, negative_log10_times_2048: u64, + fn combined_penalty_msat(amount_msat: u64, mut negative_log10_times_2048: u64, liquidity_penalty_multiplier_msat: u64, liquidity_penalty_amount_multiplier_msat: u64, ) -> u64 { - let liquidity_penalty_msat = { - // Upper bound the liquidity penalty to ensure some channel is selected. - let multiplier_msat = liquidity_penalty_multiplier_msat; - let max_penalty_msat = multiplier_msat.saturating_mul(NEGATIVE_LOG10_UPPER_BOUND); - (negative_log10_times_2048.saturating_mul(multiplier_msat) / 2048).min(max_penalty_msat) - }; + negative_log10_times_2048 = + negative_log10_times_2048.min(NEGATIVE_LOG10_UPPER_BOUND * 2048); + + // Upper bound the liquidity penalty to ensure some channel is selected. + let liquidity_penalty_msat = negative_log10_times_2048 + .saturating_mul(liquidity_penalty_multiplier_msat) / 2048; let amount_penalty_msat = negative_log10_times_2048 .saturating_mul(liquidity_penalty_amount_multiplier_msat) .saturating_mul(amount_msat) / 2048 / AMOUNT_PENALTY_DIVISOR; @@ -1094,30 +1022,48 @@ impl, BRT: Deref, } /// Returns the lower bound of the channel liquidity balance in this direction. + #[inline(always)] fn min_liquidity_msat(&self) -> u64 { self.decayed_offset_msat(*self.min_liquidity_offset_msat) } /// Returns the upper bound of the channel liquidity balance in this direction. + #[inline(always)] fn max_liquidity_msat(&self) -> u64 { self.available_capacity() .saturating_sub(self.decayed_offset_msat(*self.max_liquidity_offset_msat)) } /// Returns the capacity minus the in-flight HTLCs in this direction. + #[inline(always)] fn available_capacity(&self) -> u64 { self.capacity_msat.saturating_sub(self.inflight_htlc_msat) } fn decayed_offset_msat(&self, offset_msat: u64) -> u64 { - self.now.duration_since(*self.last_updated).as_secs() - .checked_div(self.params.liquidity_offset_half_life.as_secs()) - .and_then(|decays| offset_msat.checked_shr(decays as u32)) - .unwrap_or(0) + let half_life = self.decay_params.liquidity_offset_half_life.as_secs(); + if half_life != 0 { + // Decay the offset by the appropriate number of half lives. If half of the next half + // life has passed, approximate an additional three-quarter life to help smooth out the + // decay. + let elapsed_time = self.now.duration_since(*self.last_updated).as_secs(); + let half_decays = elapsed_time / (half_life / 2); + let decays = half_decays / 2; + let decayed_offset_msat = offset_msat.checked_shr(decays as u32).unwrap_or(0); + if half_decays % 2 == 0 { + decayed_offset_msat + } else { + // 11_585 / 16_384 ~= core::f64::consts::FRAC_1_SQRT_2 + // 16_384 == 2^14 + (decayed_offset_msat as u128 * 11_585 / 16_384) as u64 + } + } else { + 0 + } } } -impl, BRT: DerefMut, T: Time, U: DerefMut> DirectedChannelLiquidity<'_, L, BRT, T, U> { +impl, BRT: DerefMut, T: Time, U: DerefMut> DirectedChannelLiquidity { /// Adjusts the channel liquidity balance bounds when failing to route `amount_msat`. fn failed_at_channel(&mut self, amount_msat: u64, chan_descr: fmt::Arguments, logger: &Log) where Log::Target: Logger { let existing_max_msat = self.max_liquidity_msat(); @@ -1128,7 +1074,7 @@ impl, BRT: DerefMut, BRT: DerefMut, BRT: DerefMut, BRT: DerefMut>, L: Deref, T: Time> Score for ProbabilisticScorerUsingTime where L::Target: Logger { + type ScoreParams = ProbabilisticScoringFeeParameters; fn channel_penalty_msat( - &self, short_channel_id: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage + &self, short_channel_id: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage, score_params: &ProbabilisticScoringFeeParameters ) -> u64 { - if let Some(penalty) = self.params.manual_node_penalties.get(target) { + if let Some(penalty) = score_params.manual_node_penalties.get(target) { return *penalty; } - let base_penalty_msat = self.params.base_penalty_msat.saturating_add( - self.params.base_penalty_amount_multiplier_msat + let base_penalty_msat = score_params.base_penalty_msat.saturating_add( + score_params.base_penalty_amount_multiplier_msat .saturating_mul(usage.amount_msat) / BASE_AMOUNT_PENALTY_DIVISOR); let mut anti_probing_penalty_msat = 0; match usage.effective_capacity { - EffectiveCapacity::ExactLiquidity { liquidity_msat } => { - if usage.amount_msat > liquidity_msat { + EffectiveCapacity::ExactLiquidity { liquidity_msat: amount_msat } | + EffectiveCapacity::HintMaxHTLC { amount_msat } => + { + if usage.amount_msat > amount_msat { return u64::max_value(); } else { return base_penalty_msat; @@ -1215,7 +1168,7 @@ impl>, L: Deref, T: Time> Score for Probabilis }, EffectiveCapacity::Total { capacity_msat, htlc_maximum_msat } => { if htlc_maximum_msat >= capacity_msat/2 { - anti_probing_penalty_msat = self.params.anti_probing_penalty_msat; + anti_probing_penalty_msat = score_params.anti_probing_penalty_msat; } }, _ => {}, @@ -1227,8 +1180,8 @@ impl>, L: Deref, T: Time> Score for Probabilis self.channel_liquidities .get(&short_channel_id) .unwrap_or(&ChannelLiquidity::new()) - .as_directed(source, target, inflight_htlc_msat, capacity_msat, &self.params) - .penalty_msat(amount_msat, &self.params) + .as_directed(source, target, inflight_htlc_msat, capacity_msat, self.decay_params) + .penalty_msat(amount_msat, score_params) .saturating_add(anti_probing_penalty_msat) .saturating_add(base_penalty_msat) } @@ -1255,13 +1208,13 @@ impl>, L: Deref, T: Time> Score for Probabilis self.channel_liquidities .entry(hop.short_channel_id) .or_insert_with(ChannelLiquidity::new) - .as_directed_mut(source, &target, 0, capacity_msat, &self.params) + .as_directed_mut(source, &target, 0, capacity_msat, self.decay_params) .failed_at_channel(amount_msat, format_args!("SCID {}, towards {:?}", hop.short_channel_id, target), &self.logger); } else { self.channel_liquidities .entry(hop.short_channel_id) .or_insert_with(ChannelLiquidity::new) - .as_directed_mut(source, &target, 0, capacity_msat, &self.params) + .as_directed_mut(source, &target, 0, capacity_msat, self.decay_params) .failed_downstream(amount_msat, format_args!("SCID {}, towards {:?}", hop.short_channel_id, target), &self.logger); } } else { @@ -1289,7 +1242,7 @@ impl>, L: Deref, T: Time> Score for Probabilis self.channel_liquidities .entry(hop.short_channel_id) .or_insert_with(ChannelLiquidity::new) - .as_directed_mut(source, &target, 0, capacity_msat, &self.params) + .as_directed_mut(source, &target, 0, capacity_msat, self.decay_params) .successful(amount_msat, format_args!("SCID {}, towards {:?}", hop.short_channel_id, target), &self.logger); } else { log_debug!(self.logger, "Not able to learn for channel with SCID {} as we do not have graph info for it (likely a route-hint last-hop).", @@ -1617,6 +1570,294 @@ mod approx { } } +mod bucketed_history { + use super::*; + + // Because liquidity is often skewed heavily in one direction, we store historical state + // distribution in buckets of different size. For backwards compatibility, buckets of size 1/8th + // must fit evenly into the buckets here. + // + // The smallest bucket is 2^-14th of the channel, for each of our 32 buckets here we define the + // width of the bucket in 2^14'ths of the channel. This increases exponentially until we reach + // a full 16th of the channel's capacity, which is reapeated a few times for backwards + // compatibility. The four middle buckets represent full octiles of the channel's capacity. + // + // For a 1 BTC channel, this let's us differentiate between failures in the bottom 6k sats, or + // between the 12,000th sat and 24,000th sat, while only needing to store and operate on 32 + // buckets in total. + + const BUCKET_START_POS: [u16; 33] = [ + 0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 3072, 4096, 6144, 8192, 10240, 12288, + 13312, 14336, 15360, 15872, 16128, 16256, 16320, 16352, 16368, 16376, 16380, 16382, 16383, 16384, + ]; + + const LEGACY_TO_BUCKET_RANGE: [(u8, u8); 8] = [ + (0, 12), (12, 14), (14, 15), (15, 16), (16, 17), (17, 18), (18, 20), (20, 32) + ]; + + const POSITION_TICKS: u16 = 1 << 14; + + fn pos_to_bucket(pos: u16) -> usize { + for bucket in 0..32 { + if pos < BUCKET_START_POS[bucket + 1] { + return bucket; + } + } + debug_assert!(false); + return 32; + } + + #[cfg(test)] + #[test] + fn check_bucket_maps() { + const BUCKET_WIDTH_IN_16384S: [u16; 32] = [ + 1, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 1024, 1024, 2048, 2048, + 2048, 2048, 1024, 1024, 1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1, 1]; + + let mut min_size_iter = 0; + let mut legacy_bucket_iter = 0; + for (bucket, width) in BUCKET_WIDTH_IN_16384S.iter().enumerate() { + assert_eq!(BUCKET_START_POS[bucket], min_size_iter); + for i in 0..*width { + assert_eq!(pos_to_bucket(min_size_iter + i) as usize, bucket); + } + min_size_iter += *width; + if min_size_iter % (POSITION_TICKS / 8) == 0 { + assert_eq!(LEGACY_TO_BUCKET_RANGE[legacy_bucket_iter].1 as usize, bucket + 1); + if legacy_bucket_iter + 1 < 8 { + assert_eq!(LEGACY_TO_BUCKET_RANGE[legacy_bucket_iter + 1].0 as usize, bucket + 1); + } + legacy_bucket_iter += 1; + } + } + assert_eq!(BUCKET_START_POS[32], POSITION_TICKS); + assert_eq!(min_size_iter, POSITION_TICKS); + } + + #[inline] + fn amount_to_pos(amount_msat: u64, capacity_msat: u64) -> u16 { + let pos = if amount_msat < u64::max_value() / (POSITION_TICKS as u64) { + (amount_msat * (POSITION_TICKS as u64) / capacity_msat.saturating_add(1)) + .try_into().unwrap_or(POSITION_TICKS) + } else { + // Only use 128-bit arithmetic when multiplication will overflow to avoid 128-bit + // division. This branch should only be hit in fuzz testing since the amount would + // need to be over 2.88 million BTC in practice. + ((amount_msat as u128) * (POSITION_TICKS as u128) + / (capacity_msat as u128).saturating_add(1)) + .try_into().unwrap_or(POSITION_TICKS) + }; + // If we are running in a client that doesn't validate gossip, its possible for a channel's + // capacity to change due to a `channel_update` message which, if received while a payment + // is in-flight, could cause this to fail. Thus, we only assert in test. + #[cfg(test)] + debug_assert!(pos < POSITION_TICKS); + pos + } + + /// Prior to LDK 0.0.117 we used eight buckets which were split evenly across the either + /// octiles. This was changed to use 32 buckets for accuracy reasons in 0.0.117, however we + /// support reading the legacy values here for backwards compatibility. + pub(super) struct LegacyHistoricalBucketRangeTracker { + buckets: [u16; 8], + } + + impl LegacyHistoricalBucketRangeTracker { + pub(crate) fn into_current(&self) -> HistoricalBucketRangeTracker { + let mut buckets = [0; 32]; + for (idx, legacy_bucket) in self.buckets.iter().enumerate() { + let mut new_val = *legacy_bucket; + let (start, end) = LEGACY_TO_BUCKET_RANGE[idx]; + new_val /= (end - start) as u16; + for i in start..end { + buckets[i as usize] = new_val; + } + } + HistoricalBucketRangeTracker { buckets } + } + } + + /// Tracks the historical state of a distribution as a weighted average of how much time was spent + /// in each of 32 buckets. + #[derive(Clone, Copy)] + pub(super) struct HistoricalBucketRangeTracker { + buckets: [u16; 32], + } + + /// Buckets are stored in fixed point numbers with a 5 bit fractional part. Thus, the value + /// "one" is 32, or this constant. + pub const BUCKET_FIXED_POINT_ONE: u16 = 32; + + impl HistoricalBucketRangeTracker { + pub(super) fn new() -> Self { Self { buckets: [0; 32] } } + pub(super) fn track_datapoint(&mut self, liquidity_offset_msat: u64, capacity_msat: u64) { + // We have 32 leaky buckets for min and max liquidity. Each bucket tracks the amount of time + // we spend in each bucket as a 16-bit fixed-point number with a 5 bit fractional part. + // + // Each time we update our liquidity estimate, we add 32 (1.0 in our fixed-point system) to + // the buckets for the current min and max liquidity offset positions. + // + // We then decay each bucket by multiplying by 2047/2048 (avoiding dividing by a + // non-power-of-two). This ensures we can't actually overflow the u16 - when we get to + // 63,457 adding 32 and decaying by 2047/2048 leaves us back at 63,457. + // + // In total, this allows us to track data for the last 8,000 or so payments across a given + // channel. + // + // These constants are a balance - we try to fit in 2 bytes per bucket to reduce overhead, + // and need to balance having more bits in the decimal part (to ensure decay isn't too + // non-linear) with having too few bits in the mantissa, causing us to not store very many + // datapoints. + // + // The constants were picked experimentally, selecting a decay amount that restricts us + // from overflowing buckets without having to cap them manually. + + let pos: u16 = amount_to_pos(liquidity_offset_msat, capacity_msat); + if pos < POSITION_TICKS { + for e in self.buckets.iter_mut() { + *e = ((*e as u32) * 2047 / 2048) as u16; + } + let bucket = pos_to_bucket(pos); + self.buckets[bucket] = self.buckets[bucket].saturating_add(BUCKET_FIXED_POINT_ONE); + } + } + /// Decay all buckets by the given number of half-lives. Used to more aggressively remove old + /// datapoints as we receive newer information. + #[inline] + pub(super) fn time_decay_data(&mut self, half_lives: u32) { + for e in self.buckets.iter_mut() { + *e = e.checked_shr(half_lives).unwrap_or(0); + } + } + } + + impl_writeable_tlv_based!(HistoricalBucketRangeTracker, { (0, buckets, required) }); + impl_writeable_tlv_based!(LegacyHistoricalBucketRangeTracker, { (0, buckets, required) }); + + /// A set of buckets representing the history of where we've seen the minimum- and maximum- + /// liquidity bounds for a given channel. + pub(super) struct HistoricalMinMaxBuckets> { + /// Buckets tracking where and how often we've seen the minimum liquidity bound for a + /// channel. + pub(super) min_liquidity_offset_history: D, + /// Buckets tracking where and how often we've seen the maximum liquidity bound for a + /// channel. + pub(super) max_liquidity_offset_history: D, + } + + impl> HistoricalMinMaxBuckets { + pub(super) fn get_decayed_buckets(&self, now: T, last_updated: T, half_life: Duration) + -> Option<([u16; 32], [u16; 32])> { + let (_, required_decays) = self.get_total_valid_points(now, last_updated, half_life)?; + + let mut min_buckets = *self.min_liquidity_offset_history; + min_buckets.time_decay_data(required_decays); + let mut max_buckets = *self.max_liquidity_offset_history; + max_buckets.time_decay_data(required_decays); + Some((min_buckets.buckets, max_buckets.buckets)) + } + #[inline] + pub(super) fn get_total_valid_points(&self, now: T, last_updated: T, half_life: Duration) + -> Option<(u64, u32)> { + let required_decays = now.duration_since(last_updated).as_secs() + .checked_div(half_life.as_secs()) + .map_or(u32::max_value(), |decays| cmp::min(decays, u32::max_value() as u64) as u32); + + let mut total_valid_points_tracked = 0; + for (min_idx, min_bucket) in self.min_liquidity_offset_history.buckets.iter().enumerate() { + for max_bucket in self.max_liquidity_offset_history.buckets.iter().take(32 - min_idx) { + total_valid_points_tracked += (*min_bucket as u64) * (*max_bucket as u64); + } + } + + // If the total valid points is smaller than 1.0 (i.e. 32 in our fixed-point scheme), + // treat it as if we were fully decayed. + const FULLY_DECAYED: u16 = BUCKET_FIXED_POINT_ONE * BUCKET_FIXED_POINT_ONE; + if total_valid_points_tracked.checked_shr(required_decays).unwrap_or(0) < FULLY_DECAYED.into() { + return None; + } + + Some((total_valid_points_tracked, required_decays)) + } + + #[inline] + pub(super) fn calculate_success_probability_times_billion( + &self, now: T, last_updated: T, half_life: Duration, amount_msat: u64, capacity_msat: u64) + -> Option { + // If historical penalties are enabled, we try to calculate a probability of success + // given our historical distribution of min- and max-liquidity bounds in a channel. + // To do so, we walk the set of historical liquidity bucket (min, max) combinations + // (where min_idx < max_idx, as having a minimum above our maximum is an invalid + // state). For each pair, we calculate the probability as if the bucket's corresponding + // min- and max- liquidity bounds were our current liquidity bounds and then multiply + // that probability by the weight of the selected buckets. + let payment_pos = amount_to_pos(amount_msat, capacity_msat); + if payment_pos >= POSITION_TICKS { return None; } + + // Check if all our buckets are zero, once decayed and treat it as if we had no data. We + // don't actually use the decayed buckets, though, as that would lose precision. + let (total_valid_points_tracked, _) + = self.get_total_valid_points(now, last_updated, half_life)?; + + let mut cumulative_success_prob_times_billion = 0; + // Special-case the 0th min bucket - it generally means we failed a payment, so only + // consider the highest (i.e. largest-offset-from-max-capacity) max bucket for all + // points against the 0th min bucket. This avoids the case where we fail to route + // increasingly lower values over a channel, but treat each failure as a separate + // datapoint, many of which may have relatively high maximum-available-liquidity + // values, which will result in us thinking we have some nontrivial probability of + // routing up to that amount. + if self.min_liquidity_offset_history.buckets[0] != 0 { + let mut highest_max_bucket_with_points = 0; // The highest max-bucket with any data + let mut total_max_points = 0; // Total points in max-buckets to consider + for (max_idx, max_bucket) in self.max_liquidity_offset_history.buckets.iter().enumerate() { + if *max_bucket >= BUCKET_FIXED_POINT_ONE { + highest_max_bucket_with_points = cmp::max(highest_max_bucket_with_points, max_idx); + } + total_max_points += *max_bucket as u64; + } + let max_bucket_end_pos = BUCKET_START_POS[32 - highest_max_bucket_with_points] - 1; + if payment_pos < max_bucket_end_pos { + let bucket_prob_times_billion = + (self.min_liquidity_offset_history.buckets[0] as u64) * total_max_points + * 1024 * 1024 * 1024 / total_valid_points_tracked; + cumulative_success_prob_times_billion += bucket_prob_times_billion * + ((max_bucket_end_pos - payment_pos) as u64) / + // Add an additional one in the divisor as the payment bucket has been + // rounded down. + (max_bucket_end_pos + 1) as u64; + } + } + + for (min_idx, min_bucket) in self.min_liquidity_offset_history.buckets.iter().enumerate().skip(1) { + let min_bucket_start_pos = BUCKET_START_POS[min_idx]; + for (max_idx, max_bucket) in self.max_liquidity_offset_history.buckets.iter().enumerate().take(32 - min_idx) { + let max_bucket_end_pos = BUCKET_START_POS[32 - max_idx] - 1; + // Note that this multiply can only barely not overflow - two 16 bit ints plus + // 30 bits is 62 bits. + let bucket_prob_times_billion = (*min_bucket as u64) * (*max_bucket as u64) + * 1024 * 1024 * 1024 / total_valid_points_tracked; + if payment_pos >= max_bucket_end_pos { + // Success probability 0, the payment amount may be above the max liquidity + break; + } else if payment_pos < min_bucket_start_pos { + cumulative_success_prob_times_billion += bucket_prob_times_billion; + } else { + cumulative_success_prob_times_billion += bucket_prob_times_billion * + ((max_bucket_end_pos - payment_pos) as u64) / + // Add an additional one in the divisor as the payment bucket has been + // rounded down. + ((max_bucket_end_pos - min_bucket_start_pos + 1) as u64); + } + } + } + + Some(cumulative_success_prob_times_billion) + } + } +} +use bucketed_history::{LegacyHistoricalBucketRangeTracker, HistoricalBucketRangeTracker, HistoricalMinMaxBuckets}; + impl>, L: Deref, T: Time> Writeable for ProbabilisticScorerUsingTime where L::Target: Logger { #[inline] fn write(&self, w: &mut W) -> Result<(), io::Error> { @@ -1628,18 +1869,18 @@ impl>, L: Deref, T: Time> Writeable for Probab } impl>, L: Deref, T: Time> -ReadableArgs<(ProbabilisticScoringParameters, G, L)> for ProbabilisticScorerUsingTime where L::Target: Logger { +ReadableArgs<(ProbabilisticScoringDecayParameters, G, L)> for ProbabilisticScorerUsingTime where L::Target: Logger { #[inline] fn read( - r: &mut R, args: (ProbabilisticScoringParameters, G, L) + r: &mut R, args: (ProbabilisticScoringDecayParameters, G, L) ) -> Result { - let (params, network_graph, logger) = args; + let (decay_params, network_graph, logger) = args; let mut channel_liquidities = HashMap::new(); read_tlv_fields!(r, { (0, channel_liquidities, required), }); Ok(Self { - params, + decay_params, network_graph, logger, channel_liquidities, @@ -1653,10 +1894,12 @@ impl Writeable for ChannelLiquidity { let duration_since_epoch = T::duration_since_epoch() - self.last_updated.elapsed(); write_tlv_fields!(w, { (0, self.min_liquidity_offset_msat, required), - (1, Some(self.min_liquidity_offset_history), option), + // 1 was the min_liquidity_offset_history in octile form (2, self.max_liquidity_offset_msat, required), - (3, Some(self.max_liquidity_offset_history), option), + // 3 was the max_liquidity_offset_history in octile form (4, duration_since_epoch, required), + (5, Some(self.min_liquidity_offset_history), option), + (7, Some(self.max_liquidity_offset_history), option), }); Ok(()) } @@ -1667,15 +1910,19 @@ impl Readable for ChannelLiquidity { fn read(r: &mut R) -> Result { let mut min_liquidity_offset_msat = 0; let mut max_liquidity_offset_msat = 0; - let mut min_liquidity_offset_history = Some(HistoricalBucketRangeTracker::new()); - let mut max_liquidity_offset_history = Some(HistoricalBucketRangeTracker::new()); + let mut legacy_min_liq_offset_history: Option = None; + let mut legacy_max_liq_offset_history: Option = None; + let mut min_liquidity_offset_history: Option = None; + let mut max_liquidity_offset_history: Option = None; let mut duration_since_epoch = Duration::from_secs(0); read_tlv_fields!(r, { (0, min_liquidity_offset_msat, required), - (1, min_liquidity_offset_history, option), + (1, legacy_min_liq_offset_history, option), (2, max_liquidity_offset_msat, required), - (3, max_liquidity_offset_history, option), + (3, legacy_max_liq_offset_history, option), (4, duration_since_epoch, required), + (5, min_liquidity_offset_history, option), + (7, max_liquidity_offset_history, option), }); // On rust prior to 1.60 `Instant::duration_since` will panic if time goes backwards. // We write `last_updated` as wallclock time even though its ultimately an `Instant` (which @@ -1689,6 +1936,20 @@ impl Readable for ChannelLiquidity { let last_updated = if wall_clock_now > duration_since_epoch { now - (wall_clock_now - duration_since_epoch) } else { now }; + if min_liquidity_offset_history.is_none() { + if let Some(legacy_buckets) = legacy_min_liq_offset_history { + min_liquidity_offset_history = Some(legacy_buckets.into_current()); + } else { + min_liquidity_offset_history = Some(HistoricalBucketRangeTracker::new()); + } + } + if max_liquidity_offset_history.is_none() { + if let Some(legacy_buckets) = legacy_max_liq_offset_history { + max_liquidity_offset_history = Some(legacy_buckets.into_current()); + } else { + max_liquidity_offset_history = Some(HistoricalBucketRangeTracker::new()); + } + } Ok(Self { min_liquidity_offset_msat, max_liquidity_offset_msat, @@ -1701,7 +1962,7 @@ impl Readable for ChannelLiquidity { #[cfg(test)] mod tests { - use super::{ChannelLiquidity, HistoricalBucketRangeTracker, ProbabilisticScoringParameters, ProbabilisticScorerUsingTime}; + use super::{ChannelLiquidity, HistoricalBucketRangeTracker, ProbabilisticScoringFeeParameters, ProbabilisticScoringDecayParameters, ProbabilisticScorerUsingTime}; use crate::blinded_path::{BlindedHop, BlindedPath}; use crate::util::config::UserConfig; use crate::util::time::Time; @@ -1817,20 +2078,20 @@ mod tests { let chain_source: Option<&crate::util::test_utils::TestChainSource> = None; network_graph.update_channel_from_announcement( &signed_announcement, &chain_source).unwrap(); - update_channel(network_graph, short_channel_id, node_1_key, 0, 1_000); - update_channel(network_graph, short_channel_id, node_2_key, 1, 0); + update_channel(network_graph, short_channel_id, node_1_key, 0, 1_000, 100); + update_channel(network_graph, short_channel_id, node_2_key, 1, 0, 100); } fn update_channel( network_graph: &mut NetworkGraph<&TestLogger>, short_channel_id: u64, node_key: SecretKey, - flags: u8, htlc_maximum_msat: u64 + flags: u8, htlc_maximum_msat: u64, timestamp: u32, ) { let genesis_hash = genesis_block(Network::Testnet).header.block_hash(); let secp_ctx = Secp256k1::new(); let unsigned_update = UnsignedChannelUpdate { chain_hash: genesis_hash, short_channel_id, - timestamp: 100, + timestamp, flags, cltv_expiry_delta: 18, htlc_minimum_msat: 0, @@ -1874,8 +2135,8 @@ mod tests { let logger = TestLogger::new(); let last_updated = SinceEpoch::now(); let network_graph = network_graph(&logger); - let params = ProbabilisticScoringParameters::default(); - let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger) + let decay_params = ProbabilisticScoringDecayParameters::default(); + let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger) .with_channel(42, ChannelLiquidity { min_liquidity_offset_msat: 700, max_liquidity_offset_msat: 100, last_updated, @@ -1897,52 +2158,52 @@ mod tests { // Update minimum liquidity. let liquidity = scorer.channel_liquidities.get(&42).unwrap() - .as_directed(&source, &target, 0, 1_000, &scorer.params); + .as_directed(&source, &target, 0, 1_000, decay_params); assert_eq!(liquidity.min_liquidity_msat(), 100); assert_eq!(liquidity.max_liquidity_msat(), 300); let liquidity = scorer.channel_liquidities.get(&42).unwrap() - .as_directed(&target, &source, 0, 1_000, &scorer.params); + .as_directed(&target, &source, 0, 1_000, decay_params); assert_eq!(liquidity.min_liquidity_msat(), 700); assert_eq!(liquidity.max_liquidity_msat(), 900); scorer.channel_liquidities.get_mut(&42).unwrap() - .as_directed_mut(&source, &target, 0, 1_000, &scorer.params) + .as_directed_mut(&source, &target, 0, 1_000, decay_params) .set_min_liquidity_msat(200); let liquidity = scorer.channel_liquidities.get(&42).unwrap() - .as_directed(&source, &target, 0, 1_000, &scorer.params); + .as_directed(&source, &target, 0, 1_000, decay_params); assert_eq!(liquidity.min_liquidity_msat(), 200); assert_eq!(liquidity.max_liquidity_msat(), 300); let liquidity = scorer.channel_liquidities.get(&42).unwrap() - .as_directed(&target, &source, 0, 1_000, &scorer.params); + .as_directed(&target, &source, 0, 1_000, decay_params); assert_eq!(liquidity.min_liquidity_msat(), 700); assert_eq!(liquidity.max_liquidity_msat(), 800); // Update maximum liquidity. let liquidity = scorer.channel_liquidities.get(&43).unwrap() - .as_directed(&target, &recipient, 0, 1_000, &scorer.params); + .as_directed(&target, &recipient, 0, 1_000, decay_params); assert_eq!(liquidity.min_liquidity_msat(), 700); assert_eq!(liquidity.max_liquidity_msat(), 900); let liquidity = scorer.channel_liquidities.get(&43).unwrap() - .as_directed(&recipient, &target, 0, 1_000, &scorer.params); + .as_directed(&recipient, &target, 0, 1_000, decay_params); assert_eq!(liquidity.min_liquidity_msat(), 100); assert_eq!(liquidity.max_liquidity_msat(), 300); scorer.channel_liquidities.get_mut(&43).unwrap() - .as_directed_mut(&target, &recipient, 0, 1_000, &scorer.params) + .as_directed_mut(&target, &recipient, 0, 1_000, decay_params) .set_max_liquidity_msat(200); let liquidity = scorer.channel_liquidities.get(&43).unwrap() - .as_directed(&target, &recipient, 0, 1_000, &scorer.params); + .as_directed(&target, &recipient, 0, 1_000, decay_params); assert_eq!(liquidity.min_liquidity_msat(), 0); assert_eq!(liquidity.max_liquidity_msat(), 200); let liquidity = scorer.channel_liquidities.get(&43).unwrap() - .as_directed(&recipient, &target, 0, 1_000, &scorer.params); + .as_directed(&recipient, &target, 0, 1_000, decay_params); assert_eq!(liquidity.min_liquidity_msat(), 800); assert_eq!(liquidity.max_liquidity_msat(), 1000); } @@ -1952,8 +2213,8 @@ mod tests { let logger = TestLogger::new(); let last_updated = SinceEpoch::now(); let network_graph = network_graph(&logger); - let params = ProbabilisticScoringParameters::default(); - let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger) + let decay_params = ProbabilisticScoringDecayParameters::default(); + let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger) .with_channel(42, ChannelLiquidity { min_liquidity_offset_msat: 200, max_liquidity_offset_msat: 400, last_updated, @@ -1966,42 +2227,42 @@ mod tests { // Check initial bounds. let liquidity = scorer.channel_liquidities.get(&42).unwrap() - .as_directed(&source, &target, 0, 1_000, &scorer.params); + .as_directed(&source, &target, 0, 1_000, decay_params); assert_eq!(liquidity.min_liquidity_msat(), 400); assert_eq!(liquidity.max_liquidity_msat(), 800); let liquidity = scorer.channel_liquidities.get(&42).unwrap() - .as_directed(&target, &source, 0, 1_000, &scorer.params); + .as_directed(&target, &source, 0, 1_000, decay_params); assert_eq!(liquidity.min_liquidity_msat(), 200); assert_eq!(liquidity.max_liquidity_msat(), 600); // Reset from source to target. scorer.channel_liquidities.get_mut(&42).unwrap() - .as_directed_mut(&source, &target, 0, 1_000, &scorer.params) + .as_directed_mut(&source, &target, 0, 1_000, decay_params) .set_min_liquidity_msat(900); let liquidity = scorer.channel_liquidities.get(&42).unwrap() - .as_directed(&source, &target, 0, 1_000, &scorer.params); + .as_directed(&source, &target, 0, 1_000, decay_params); assert_eq!(liquidity.min_liquidity_msat(), 900); assert_eq!(liquidity.max_liquidity_msat(), 1_000); let liquidity = scorer.channel_liquidities.get(&42).unwrap() - .as_directed(&target, &source, 0, 1_000, &scorer.params); + .as_directed(&target, &source, 0, 1_000, decay_params); assert_eq!(liquidity.min_liquidity_msat(), 0); assert_eq!(liquidity.max_liquidity_msat(), 100); // Reset from target to source. scorer.channel_liquidities.get_mut(&42).unwrap() - .as_directed_mut(&target, &source, 0, 1_000, &scorer.params) + .as_directed_mut(&target, &source, 0, 1_000, decay_params) .set_min_liquidity_msat(400); let liquidity = scorer.channel_liquidities.get(&42).unwrap() - .as_directed(&source, &target, 0, 1_000, &scorer.params); + .as_directed(&source, &target, 0, 1_000, decay_params); assert_eq!(liquidity.min_liquidity_msat(), 0); assert_eq!(liquidity.max_liquidity_msat(), 600); let liquidity = scorer.channel_liquidities.get(&42).unwrap() - .as_directed(&target, &source, 0, 1_000, &scorer.params); + .as_directed(&target, &source, 0, 1_000, decay_params); assert_eq!(liquidity.min_liquidity_msat(), 400); assert_eq!(liquidity.max_liquidity_msat(), 1_000); } @@ -2011,8 +2272,8 @@ mod tests { let logger = TestLogger::new(); let last_updated = SinceEpoch::now(); let network_graph = network_graph(&logger); - let params = ProbabilisticScoringParameters::default(); - let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger) + let decay_params = ProbabilisticScoringDecayParameters::default(); + let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger) .with_channel(42, ChannelLiquidity { min_liquidity_offset_msat: 200, max_liquidity_offset_msat: 400, last_updated, @@ -2025,42 +2286,42 @@ mod tests { // Check initial bounds. let liquidity = scorer.channel_liquidities.get(&42).unwrap() - .as_directed(&source, &target, 0, 1_000, &scorer.params); + .as_directed(&source, &target, 0, 1_000, decay_params); assert_eq!(liquidity.min_liquidity_msat(), 400); assert_eq!(liquidity.max_liquidity_msat(), 800); let liquidity = scorer.channel_liquidities.get(&42).unwrap() - .as_directed(&target, &source, 0, 1_000, &scorer.params); + .as_directed(&target, &source, 0, 1_000, decay_params); assert_eq!(liquidity.min_liquidity_msat(), 200); assert_eq!(liquidity.max_liquidity_msat(), 600); // Reset from source to target. scorer.channel_liquidities.get_mut(&42).unwrap() - .as_directed_mut(&source, &target, 0, 1_000, &scorer.params) + .as_directed_mut(&source, &target, 0, 1_000, decay_params) .set_max_liquidity_msat(300); let liquidity = scorer.channel_liquidities.get(&42).unwrap() - .as_directed(&source, &target, 0, 1_000, &scorer.params); + .as_directed(&source, &target, 0, 1_000, decay_params); assert_eq!(liquidity.min_liquidity_msat(), 0); assert_eq!(liquidity.max_liquidity_msat(), 300); let liquidity = scorer.channel_liquidities.get(&42).unwrap() - .as_directed(&target, &source, 0, 1_000, &scorer.params); + .as_directed(&target, &source, 0, 1_000, decay_params); assert_eq!(liquidity.min_liquidity_msat(), 700); assert_eq!(liquidity.max_liquidity_msat(), 1_000); // Reset from target to source. scorer.channel_liquidities.get_mut(&42).unwrap() - .as_directed_mut(&target, &source, 0, 1_000, &scorer.params) + .as_directed_mut(&target, &source, 0, 1_000, decay_params) .set_max_liquidity_msat(600); let liquidity = scorer.channel_liquidities.get(&42).unwrap() - .as_directed(&source, &target, 0, 1_000, &scorer.params); + .as_directed(&source, &target, 0, 1_000, decay_params); assert_eq!(liquidity.min_liquidity_msat(), 400); assert_eq!(liquidity.max_liquidity_msat(), 1_000); let liquidity = scorer.channel_liquidities.get(&42).unwrap() - .as_directed(&target, &source, 0, 1_000, &scorer.params); + .as_directed(&target, &source, 0, 1_000, decay_params); assert_eq!(liquidity.min_liquidity_msat(), 0); assert_eq!(liquidity.max_liquidity_msat(), 600); } @@ -2069,11 +2330,12 @@ mod tests { fn increased_penalty_nearing_liquidity_upper_bound() { let logger = TestLogger::new(); let network_graph = network_graph(&logger); - let params = ProbabilisticScoringParameters { + let params = ProbabilisticScoringFeeParameters { liquidity_penalty_multiplier_msat: 1_000, - ..ProbabilisticScoringParameters::zero_penalty() + ..ProbabilisticScoringFeeParameters::zero_penalty() }; - let scorer = ProbabilisticScorer::new(params, &network_graph, &logger); + let decay_params = ProbabilisticScoringDecayParameters::default(); + let scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger); let source = source_node_id(); let target = target_node_id(); @@ -2082,32 +2344,32 @@ mod tests { inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024_000, htlc_maximum_msat: 1_000 }, }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 0); let usage = ChannelUsage { amount_msat: 10_240, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 0); let usage = ChannelUsage { amount_msat: 102_400, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 47); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 47); let usage = ChannelUsage { amount_msat: 1_023_999, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 2_000); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 2_000); let usage = ChannelUsage { amount_msat: 128, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_000 }, }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 58); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 58); let usage = ChannelUsage { amount_msat: 256, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 125); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 125); let usage = ChannelUsage { amount_msat: 374, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 198); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 198); let usage = ChannelUsage { amount_msat: 512, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 300); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 300); let usage = ChannelUsage { amount_msat: 640, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 425); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 425); let usage = ChannelUsage { amount_msat: 768, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 602); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 602); let usage = ChannelUsage { amount_msat: 896, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 902); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 902); } #[test] @@ -2115,12 +2377,15 @@ mod tests { let logger = TestLogger::new(); let last_updated = SinceEpoch::now(); let network_graph = network_graph(&logger); - let params = ProbabilisticScoringParameters { + let params = ProbabilisticScoringFeeParameters { liquidity_penalty_multiplier_msat: 1_000, considered_impossible_penalty_msat: u64::max_value(), - ..ProbabilisticScoringParameters::zero_penalty() + ..ProbabilisticScoringFeeParameters::zero_penalty() + }; + let decay_params = ProbabilisticScoringDecayParameters { + ..ProbabilisticScoringDecayParameters::zero_penalty() }; - let scorer = ProbabilisticScorer::new(params, &network_graph, &logger) + let scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger) .with_channel(42, ChannelLiquidity { min_liquidity_offset_msat: 40, max_liquidity_offset_msat: 40, last_updated, @@ -2135,23 +2400,23 @@ mod tests { inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Total { capacity_msat: 100, htlc_maximum_msat: 1_000 }, }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 0); let usage = ChannelUsage { amount_msat: 50, ..usage }; - assert_ne!(scorer.channel_penalty_msat(42, &source, &target, usage), 0); - assert_ne!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value()); + assert_ne!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 0); + assert_ne!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), u64::max_value()); let usage = ChannelUsage { amount_msat: 61, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value()); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), u64::max_value()); } #[test] fn does_not_further_penalize_own_channel() { let logger = TestLogger::new(); let network_graph = network_graph(&logger); - let params = ProbabilisticScoringParameters { + let params = ProbabilisticScoringFeeParameters { liquidity_penalty_multiplier_msat: 1_000, - ..ProbabilisticScoringParameters::zero_penalty() + ..ProbabilisticScoringFeeParameters::zero_penalty() }; - let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger); + let mut scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger); let sender = sender_node_id(); let source = source_node_id(); let usage = ChannelUsage { @@ -2162,24 +2427,24 @@ mod tests { let failed_path = payment_path_for_amount(500); let successful_path = payment_path_for_amount(200); - assert_eq!(scorer.channel_penalty_msat(41, &sender, &source, usage), 301); + assert_eq!(scorer.channel_penalty_msat(41, &sender, &source, usage, ¶ms), 301); scorer.payment_path_failed(&failed_path, 41); - assert_eq!(scorer.channel_penalty_msat(41, &sender, &source, usage), 301); + assert_eq!(scorer.channel_penalty_msat(41, &sender, &source, usage, ¶ms), 301); scorer.payment_path_successful(&successful_path); - assert_eq!(scorer.channel_penalty_msat(41, &sender, &source, usage), 301); + assert_eq!(scorer.channel_penalty_msat(41, &sender, &source, usage, ¶ms), 301); } #[test] fn sets_liquidity_lower_bound_on_downstream_failure() { let logger = TestLogger::new(); let network_graph = network_graph(&logger); - let params = ProbabilisticScoringParameters { + let params = ProbabilisticScoringFeeParameters { liquidity_penalty_multiplier_msat: 1_000, - ..ProbabilisticScoringParameters::zero_penalty() + ..ProbabilisticScoringFeeParameters::zero_penalty() }; - let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger); + let mut scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger); let source = source_node_id(); let target = target_node_id(); let path = payment_path_for_amount(500); @@ -2189,32 +2454,32 @@ mod tests { inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: 1_000 }, }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 128); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 128); let usage = ChannelUsage { amount_msat: 500, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 301); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 301); let usage = ChannelUsage { amount_msat: 750, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 602); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 602); scorer.payment_path_failed(&path, 43); let usage = ChannelUsage { amount_msat: 250, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 0); let usage = ChannelUsage { amount_msat: 500, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 0); let usage = ChannelUsage { amount_msat: 750, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 300); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 300); } #[test] fn sets_liquidity_upper_bound_on_failure() { let logger = TestLogger::new(); let network_graph = network_graph(&logger); - let params = ProbabilisticScoringParameters { + let params = ProbabilisticScoringFeeParameters { liquidity_penalty_multiplier_msat: 1_000, considered_impossible_penalty_msat: u64::max_value(), - ..ProbabilisticScoringParameters::zero_penalty() + ..ProbabilisticScoringFeeParameters::zero_penalty() }; - let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger); + let mut scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger); let source = source_node_id(); let target = target_node_id(); let path = payment_path_for_amount(500); @@ -2224,20 +2489,20 @@ mod tests { inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: 1_000 }, }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 128); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 128); let usage = ChannelUsage { amount_msat: 500, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 301); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 301); let usage = ChannelUsage { amount_msat: 750, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 602); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 602); scorer.payment_path_failed(&path, 42); let usage = ChannelUsage { amount_msat: 250, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 300); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 300); let usage = ChannelUsage { amount_msat: 500, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value()); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), u64::max_value()); let usage = ChannelUsage { amount_msat: 750, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value()); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), u64::max_value()); } #[test] @@ -2274,39 +2539,39 @@ mod tests { let node_c = NodeId::from_pubkey(&pub_c); let node_d = NodeId::from_pubkey(&pub_d); - let params = ProbabilisticScoringParameters { + let params = ProbabilisticScoringFeeParameters { liquidity_penalty_multiplier_msat: 1_000, - ..ProbabilisticScoringParameters::zero_penalty() + ..ProbabilisticScoringFeeParameters::zero_penalty() }; - let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger); + let mut scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger); let usage = ChannelUsage { amount_msat: 250, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: 1_000 }, }; - assert_eq!(scorer.channel_penalty_msat(42, &node_a, &node_b, usage), 128); + assert_eq!(scorer.channel_penalty_msat(42, &node_a, &node_b, usage, ¶ms), 128); // Note that a default liquidity bound is used for B -> C as no channel exists - assert_eq!(scorer.channel_penalty_msat(43, &node_b, &node_c, usage), 128); - assert_eq!(scorer.channel_penalty_msat(44, &node_c, &node_d, usage), 128); + assert_eq!(scorer.channel_penalty_msat(43, &node_b, &node_c, usage, ¶ms), 128); + assert_eq!(scorer.channel_penalty_msat(44, &node_c, &node_d, usage, ¶ms), 128); scorer.payment_path_failed(&Path { hops: path, blinded_tail: None }, 43); - assert_eq!(scorer.channel_penalty_msat(42, &node_a, &node_b, usage), 80); + assert_eq!(scorer.channel_penalty_msat(42, &node_a, &node_b, usage, ¶ms), 80); // Note that a default liquidity bound is used for B -> C as no channel exists - assert_eq!(scorer.channel_penalty_msat(43, &node_b, &node_c, usage), 128); - assert_eq!(scorer.channel_penalty_msat(44, &node_c, &node_d, usage), 128); + assert_eq!(scorer.channel_penalty_msat(43, &node_b, &node_c, usage, ¶ms), 128); + assert_eq!(scorer.channel_penalty_msat(44, &node_c, &node_d, usage, ¶ms), 128); } #[test] fn reduces_liquidity_upper_bound_along_path_on_success() { let logger = TestLogger::new(); let network_graph = network_graph(&logger); - let params = ProbabilisticScoringParameters { + let params = ProbabilisticScoringFeeParameters { liquidity_penalty_multiplier_msat: 1_000, - ..ProbabilisticScoringParameters::zero_penalty() + ..ProbabilisticScoringFeeParameters::zero_penalty() }; - let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger); + let mut scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger); let sender = sender_node_id(); let source = source_node_id(); let target = target_node_id(); @@ -2317,28 +2582,31 @@ mod tests { effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: 1_000 }, }; - assert_eq!(scorer.channel_penalty_msat(41, &sender, &source, usage), 128); - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 128); - assert_eq!(scorer.channel_penalty_msat(43, &target, &recipient, usage), 128); + assert_eq!(scorer.channel_penalty_msat(41, &sender, &source, usage, ¶ms), 128); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 128); + assert_eq!(scorer.channel_penalty_msat(43, &target, &recipient, usage, ¶ms), 128); scorer.payment_path_successful(&payment_path_for_amount(500)); - assert_eq!(scorer.channel_penalty_msat(41, &sender, &source, usage), 128); - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 300); - assert_eq!(scorer.channel_penalty_msat(43, &target, &recipient, usage), 300); + assert_eq!(scorer.channel_penalty_msat(41, &sender, &source, usage, ¶ms), 128); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 300); + assert_eq!(scorer.channel_penalty_msat(43, &target, &recipient, usage, ¶ms), 300); } #[test] fn decays_liquidity_bounds_over_time() { let logger = TestLogger::new(); let network_graph = network_graph(&logger); - let params = ProbabilisticScoringParameters { + let params = ProbabilisticScoringFeeParameters { liquidity_penalty_multiplier_msat: 1_000, - liquidity_offset_half_life: Duration::from_secs(10), considered_impossible_penalty_msat: u64::max_value(), - ..ProbabilisticScoringParameters::zero_penalty() + ..ProbabilisticScoringFeeParameters::zero_penalty() }; - let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger); + let decay_params = ProbabilisticScoringDecayParameters { + liquidity_offset_half_life: Duration::from_secs(10), + ..ProbabilisticScoringDecayParameters::zero_penalty() + }; + let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger); let source = source_node_id(); let target = target_node_id(); @@ -2347,77 +2615,94 @@ mod tests { inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_024 }, }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 0); let usage = ChannelUsage { amount_msat: 1_023, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 2_000); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 2_000); scorer.payment_path_failed(&payment_path_for_amount(768), 42); scorer.payment_path_failed(&payment_path_for_amount(128), 43); + // Initial penalties let usage = ChannelUsage { amount_msat: 128, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 0); let usage = ChannelUsage { amount_msat: 256, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 93); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 93); let usage = ChannelUsage { amount_msat: 768, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1_479); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 1_479); let usage = ChannelUsage { amount_msat: 896, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value()); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), u64::max_value()); - SinceEpoch::advance(Duration::from_secs(9)); + // No decay + SinceEpoch::advance(Duration::from_secs(4)); let usage = ChannelUsage { amount_msat: 128, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 0); let usage = ChannelUsage { amount_msat: 256, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 93); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 93); let usage = ChannelUsage { amount_msat: 768, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1_479); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 1_479); let usage = ChannelUsage { amount_msat: 896, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value()); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), u64::max_value()); + // Half decay (i.e., three-quarter life) SinceEpoch::advance(Duration::from_secs(1)); + let usage = ChannelUsage { amount_msat: 128, ..usage }; + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 22); + let usage = ChannelUsage { amount_msat: 256, ..usage }; + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 106); + let usage = ChannelUsage { amount_msat: 768, ..usage }; + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 916); + let usage = ChannelUsage { amount_msat: 896, ..usage }; + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), u64::max_value()); + + // One decay (i.e., half life) + SinceEpoch::advance(Duration::from_secs(5)); let usage = ChannelUsage { amount_msat: 64, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 0); let usage = ChannelUsage { amount_msat: 128, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 34); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 34); let usage = ChannelUsage { amount_msat: 896, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1_970); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 1_970); let usage = ChannelUsage { amount_msat: 960, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value()); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), u64::max_value()); // Fully decay liquidity lower bound. SinceEpoch::advance(Duration::from_secs(10 * 7)); let usage = ChannelUsage { amount_msat: 0, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 0); let usage = ChannelUsage { amount_msat: 1, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 0); let usage = ChannelUsage { amount_msat: 1_023, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 2_000); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 2_000); let usage = ChannelUsage { amount_msat: 1_024, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value()); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), u64::max_value()); // Fully decay liquidity upper bound. SinceEpoch::advance(Duration::from_secs(10)); let usage = ChannelUsage { amount_msat: 0, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 0); let usage = ChannelUsage { amount_msat: 1_024, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value()); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), u64::max_value()); SinceEpoch::advance(Duration::from_secs(10)); let usage = ChannelUsage { amount_msat: 0, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 0); let usage = ChannelUsage { amount_msat: 1_024, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value()); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), u64::max_value()); } #[test] fn decays_liquidity_bounds_without_shift_overflow() { let logger = TestLogger::new(); let network_graph = network_graph(&logger); - let params = ProbabilisticScoringParameters { + let params = ProbabilisticScoringFeeParameters { liquidity_penalty_multiplier_msat: 1_000, + ..ProbabilisticScoringFeeParameters::zero_penalty() + }; + let decay_params = ProbabilisticScoringDecayParameters { liquidity_offset_half_life: Duration::from_secs(10), - ..ProbabilisticScoringParameters::zero_penalty() + ..ProbabilisticScoringDecayParameters::default() }; - let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger); + let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger); let source = source_node_id(); let target = target_node_id(); let usage = ChannelUsage { @@ -2425,30 +2710,33 @@ mod tests { inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_000 }, }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 125); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 125); scorer.payment_path_failed(&payment_path_for_amount(512), 42); - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 281); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 281); // An unchecked right shift 64 bits or more in DirectedChannelLiquidity::decayed_offset_msat // would cause an overflow. SinceEpoch::advance(Duration::from_secs(10 * 64)); - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 125); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 125); SinceEpoch::advance(Duration::from_secs(10)); - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 125); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 125); } #[test] fn restricts_liquidity_bounds_after_decay() { let logger = TestLogger::new(); let network_graph = network_graph(&logger); - let params = ProbabilisticScoringParameters { + let params = ProbabilisticScoringFeeParameters { liquidity_penalty_multiplier_msat: 1_000, + ..ProbabilisticScoringFeeParameters::zero_penalty() + }; + let decay_params = ProbabilisticScoringDecayParameters { liquidity_offset_half_life: Duration::from_secs(10), - ..ProbabilisticScoringParameters::zero_penalty() + ..ProbabilisticScoringDecayParameters::default() }; - let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger); + let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger); let source = source_node_id(); let target = target_node_id(); let usage = ChannelUsage { @@ -2457,43 +2745,46 @@ mod tests { effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_000 }, }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 300); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 300); // More knowledge gives higher confidence (256, 768), meaning a lower penalty. scorer.payment_path_failed(&payment_path_for_amount(768), 42); scorer.payment_path_failed(&payment_path_for_amount(256), 43); - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 281); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 281); // Decaying knowledge gives less confidence (128, 896), meaning a higher penalty. SinceEpoch::advance(Duration::from_secs(10)); - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 291); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 291); // Reducing the upper bound gives more confidence (128, 832) that the payment amount (512) // is closer to the upper bound, meaning a higher penalty. scorer.payment_path_successful(&payment_path_for_amount(64)); - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 331); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 331); // Increasing the lower bound gives more confidence (256, 832) that the payment amount (512) // is closer to the lower bound, meaning a lower penalty. scorer.payment_path_failed(&payment_path_for_amount(256), 43); - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 245); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 245); // Further decaying affects the lower bound more than the upper bound (128, 928). SinceEpoch::advance(Duration::from_secs(10)); - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 280); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 280); } #[test] fn restores_persisted_liquidity_bounds() { let logger = TestLogger::new(); let network_graph = network_graph(&logger); - let params = ProbabilisticScoringParameters { + let params = ProbabilisticScoringFeeParameters { liquidity_penalty_multiplier_msat: 1_000, - liquidity_offset_half_life: Duration::from_secs(10), considered_impossible_penalty_msat: u64::max_value(), - ..ProbabilisticScoringParameters::zero_penalty() + ..ProbabilisticScoringFeeParameters::zero_penalty() + }; + let decay_params = ProbabilisticScoringDecayParameters { + liquidity_offset_half_life: Duration::from_secs(10), + ..ProbabilisticScoringDecayParameters::default() }; - let mut scorer = ProbabilisticScorer::new(params.clone(), &network_graph, &logger); + let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger); let source = source_node_id(); let target = target_node_id(); let usage = ChannelUsage { @@ -2503,34 +2794,37 @@ mod tests { }; scorer.payment_path_failed(&payment_path_for_amount(500), 42); - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value()); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), u64::max_value()); SinceEpoch::advance(Duration::from_secs(10)); - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 473); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 473); scorer.payment_path_failed(&payment_path_for_amount(250), 43); - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 300); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 300); let mut serialized_scorer = Vec::new(); scorer.write(&mut serialized_scorer).unwrap(); let mut serialized_scorer = io::Cursor::new(&serialized_scorer); let deserialized_scorer = - ::read(&mut serialized_scorer, (params, &network_graph, &logger)).unwrap(); - assert_eq!(deserialized_scorer.channel_penalty_msat(42, &source, &target, usage), 300); + ::read(&mut serialized_scorer, (decay_params, &network_graph, &logger)).unwrap(); + assert_eq!(deserialized_scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 300); } #[test] fn decays_persisted_liquidity_bounds() { let logger = TestLogger::new(); let network_graph = network_graph(&logger); - let params = ProbabilisticScoringParameters { + let params = ProbabilisticScoringFeeParameters { liquidity_penalty_multiplier_msat: 1_000, - liquidity_offset_half_life: Duration::from_secs(10), considered_impossible_penalty_msat: u64::max_value(), - ..ProbabilisticScoringParameters::zero_penalty() + ..ProbabilisticScoringFeeParameters::zero_penalty() + }; + let decay_params = ProbabilisticScoringDecayParameters { + liquidity_offset_half_life: Duration::from_secs(10), + ..ProbabilisticScoringDecayParameters::zero_penalty() }; - let mut scorer = ProbabilisticScorer::new(params.clone(), &network_graph, &logger); + let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger); let source = source_node_id(); let target = target_node_id(); let usage = ChannelUsage { @@ -2540,7 +2834,7 @@ mod tests { }; scorer.payment_path_failed(&payment_path_for_amount(500), 42); - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value()); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), u64::max_value()); let mut serialized_scorer = Vec::new(); scorer.write(&mut serialized_scorer).unwrap(); @@ -2549,14 +2843,14 @@ mod tests { let mut serialized_scorer = io::Cursor::new(&serialized_scorer); let deserialized_scorer = - ::read(&mut serialized_scorer, (params, &network_graph, &logger)).unwrap(); - assert_eq!(deserialized_scorer.channel_penalty_msat(42, &source, &target, usage), 473); + ::read(&mut serialized_scorer, (decay_params, &network_graph, &logger)).unwrap(); + assert_eq!(deserialized_scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 473); scorer.payment_path_failed(&payment_path_for_amount(250), 43); - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 300); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 300); SinceEpoch::advance(Duration::from_secs(10)); - assert_eq!(deserialized_scorer.channel_penalty_msat(42, &source, &target, usage), 365); + assert_eq!(deserialized_scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 365); } #[test] @@ -2565,8 +2859,8 @@ mod tests { // 50k sat reserve). let logger = TestLogger::new(); let network_graph = network_graph(&logger); - let params = ProbabilisticScoringParameters::default(); - let scorer = ProbabilisticScorer::new(params, &network_graph, &logger); + let params = ProbabilisticScoringFeeParameters::default(); + let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger); let source = source_node_id(); let target = target_node_id(); @@ -2575,47 +2869,47 @@ mod tests { inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Total { capacity_msat: 950_000_000, htlc_maximum_msat: 1_000 }, }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 4375); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 4375); let usage = ChannelUsage { effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_950_000_000, htlc_maximum_msat: 1_000 }, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 2739); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 2739); let usage = ChannelUsage { effective_capacity: EffectiveCapacity::Total { capacity_msat: 2_950_000_000, htlc_maximum_msat: 1_000 }, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 2236); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 2236); let usage = ChannelUsage { effective_capacity: EffectiveCapacity::Total { capacity_msat: 3_950_000_000, htlc_maximum_msat: 1_000 }, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1983); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 1983); let usage = ChannelUsage { effective_capacity: EffectiveCapacity::Total { capacity_msat: 4_950_000_000, htlc_maximum_msat: 1_000 }, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1637); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 1637); let usage = ChannelUsage { effective_capacity: EffectiveCapacity::Total { capacity_msat: 5_950_000_000, htlc_maximum_msat: 1_000 }, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1606); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 1606); let usage = ChannelUsage { effective_capacity: EffectiveCapacity::Total { capacity_msat: 6_950_000_000, htlc_maximum_msat: 1_000 }, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1331); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 1331); let usage = ChannelUsage { effective_capacity: EffectiveCapacity::Total { capacity_msat: 7_450_000_000, htlc_maximum_msat: 1_000 }, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1387); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 1387); let usage = ChannelUsage { effective_capacity: EffectiveCapacity::Total { capacity_msat: 7_950_000_000, htlc_maximum_msat: 1_000 }, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1379); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 1379); let usage = ChannelUsage { effective_capacity: EffectiveCapacity::Total { capacity_msat: 8_950_000_000, htlc_maximum_msat: 1_000 }, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1363); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 1363); let usage = ChannelUsage { effective_capacity: EffectiveCapacity::Total { capacity_msat: 9_950_000_000, htlc_maximum_msat: 1_000 }, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1355); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 1355); } #[test] @@ -2630,28 +2924,28 @@ mod tests { effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_000 }, }; - let params = ProbabilisticScoringParameters { + let params = ProbabilisticScoringFeeParameters { liquidity_penalty_multiplier_msat: 1_000, - ..ProbabilisticScoringParameters::zero_penalty() + ..ProbabilisticScoringFeeParameters::zero_penalty() }; - let scorer = ProbabilisticScorer::new(params, &network_graph, &logger); - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 58); + let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 58); - let params = ProbabilisticScoringParameters { + let params = ProbabilisticScoringFeeParameters { base_penalty_msat: 500, liquidity_penalty_multiplier_msat: 1_000, - anti_probing_penalty_msat: 0, ..ProbabilisticScoringParameters::zero_penalty() + anti_probing_penalty_msat: 0, ..ProbabilisticScoringFeeParameters::zero_penalty() }; - let scorer = ProbabilisticScorer::new(params, &network_graph, &logger); - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 558); + let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 558); - let params = ProbabilisticScoringParameters { + let params = ProbabilisticScoringFeeParameters { base_penalty_msat: 500, liquidity_penalty_multiplier_msat: 1_000, base_penalty_amount_multiplier_msat: (1 << 30), - anti_probing_penalty_msat: 0, ..ProbabilisticScoringParameters::zero_penalty() + anti_probing_penalty_msat: 0, ..ProbabilisticScoringFeeParameters::zero_penalty() }; - let scorer = ProbabilisticScorer::new(params, &network_graph, &logger); - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 558 + 128); + let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 558 + 128); } #[test] @@ -2666,21 +2960,21 @@ mod tests { effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024_000, htlc_maximum_msat: 1_000 }, }; - let params = ProbabilisticScoringParameters { + let params = ProbabilisticScoringFeeParameters { liquidity_penalty_multiplier_msat: 1_000, liquidity_penalty_amount_multiplier_msat: 0, - ..ProbabilisticScoringParameters::zero_penalty() + ..ProbabilisticScoringFeeParameters::zero_penalty() }; - let scorer = ProbabilisticScorer::new(params, &network_graph, &logger); - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 300); + let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 300); - let params = ProbabilisticScoringParameters { + let params = ProbabilisticScoringFeeParameters { liquidity_penalty_multiplier_msat: 1_000, liquidity_penalty_amount_multiplier_msat: 256, - ..ProbabilisticScoringParameters::zero_penalty() + ..ProbabilisticScoringFeeParameters::zero_penalty() }; - let scorer = ProbabilisticScorer::new(params, &network_graph, &logger); - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 337); + let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 337); } #[test] @@ -2695,23 +2989,24 @@ mod tests { effective_capacity: EffectiveCapacity::Infinite, }; - let params = ProbabilisticScoringParameters { + let params = ProbabilisticScoringFeeParameters { liquidity_penalty_multiplier_msat: 40_000, - ..ProbabilisticScoringParameters::zero_penalty() + ..ProbabilisticScoringFeeParameters::zero_penalty() }; - let scorer = ProbabilisticScorer::new(params, &network_graph, &logger); - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 80_000); + let decay_params = ProbabilisticScoringDecayParameters::zero_penalty(); + let scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 80_000); } #[test] fn accounts_for_inflight_htlc_usage() { let logger = TestLogger::new(); let network_graph = network_graph(&logger); - let params = ProbabilisticScoringParameters { + let params = ProbabilisticScoringFeeParameters { considered_impossible_penalty_msat: u64::max_value(), - ..ProbabilisticScoringParameters::zero_penalty() + ..ProbabilisticScoringFeeParameters::zero_penalty() }; - let scorer = ProbabilisticScorer::new(params, &network_graph, &logger); + let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger); let source = source_node_id(); let target = target_node_id(); @@ -2720,18 +3015,18 @@ mod tests { inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: 1_000 }, }; - assert_ne!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value()); + assert_ne!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), u64::max_value()); let usage = ChannelUsage { inflight_htlc_msat: 251, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value()); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), u64::max_value()); } #[test] fn removes_uncertainity_when_exact_liquidity_known() { let logger = TestLogger::new(); let network_graph = network_graph(&logger); - let params = ProbabilisticScoringParameters::default(); - let scorer = ProbabilisticScorer::new(params.clone(), &network_graph, &logger); + let params = ProbabilisticScoringFeeParameters::default(); + let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger); let source = source_node_id(); let target = target_node_id(); @@ -2741,27 +3036,29 @@ mod tests { inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::ExactLiquidity { liquidity_msat: 1_000 }, }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), base_penalty_msat); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), base_penalty_msat); let usage = ChannelUsage { amount_msat: 1_000, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), base_penalty_msat); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), base_penalty_msat); let usage = ChannelUsage { amount_msat: 1_001, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value()); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), u64::max_value()); } #[test] fn remembers_historical_failures() { let logger = TestLogger::new(); let network_graph = network_graph(&logger); - let params = ProbabilisticScoringParameters { - liquidity_offset_half_life: Duration::from_secs(60 * 60), + let params = ProbabilisticScoringFeeParameters { historical_liquidity_penalty_multiplier_msat: 1024, historical_liquidity_penalty_amount_multiplier_msat: 1024, + ..ProbabilisticScoringFeeParameters::zero_penalty() + }; + let decay_params = ProbabilisticScoringDecayParameters { + liquidity_offset_half_life: Duration::from_secs(60 * 60), historical_no_updates_half_life: Duration::from_secs(10), - ..ProbabilisticScoringParameters::zero_penalty() }; - let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger); + let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger); let source = source_node_id(); let target = target_node_id(); @@ -2770,49 +3067,78 @@ mod tests { inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_024 }, }; + let usage_1 = ChannelUsage { + amount_msat: 1, + inflight_htlc_msat: 0, + effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_024 }, + }; + // With no historical data the normal liquidity penalty calculation is used. - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 47); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 47); assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target), None); + assert_eq!(scorer.historical_estimated_payment_success_probability(42, &target, 42), + None); scorer.payment_path_failed(&payment_path_for_amount(1), 42); - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 2048); - // The "it failed" increment is 32, where the probability should lie fully in the first - // octile. + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 2048); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage_1, ¶ms), 128); + // The "it failed" increment is 32, where the probability should lie several buckets into + // the first octile. assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target), - Some(([32, 0, 0, 0, 0, 0, 0, 0], [32, 0, 0, 0, 0, 0, 0, 0]))); + Some(([32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]))); + assert!(scorer.historical_estimated_payment_success_probability(42, &target, 1) + .unwrap() > 0.35); + assert_eq!(scorer.historical_estimated_payment_success_probability(42, &target, 500), + Some(0.0)); // Even after we tell the scorer we definitely have enough available liquidity, it will // still remember that there was some failure in the past, and assign a non-0 penalty. scorer.payment_path_failed(&payment_path_for_amount(1000), 43); - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 198); - // The first octile should be decayed just slightly and the last octile has a new point. + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 32); + // The first points should be decayed just slightly and the last bucket has a new point. assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target), - Some(([31, 0, 0, 0, 0, 0, 0, 32], [31, 0, 0, 0, 0, 0, 0, 32]))); + Some(([31, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 31, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32]))); + + // The exact success probability is a bit complicated and involves integer rounding, so we + // simply check bounds here. + let five_hundred_prob = + scorer.historical_estimated_payment_success_probability(42, &target, 500).unwrap(); + assert!(five_hundred_prob > 0.66); + assert!(five_hundred_prob < 0.68); + let one_prob = + scorer.historical_estimated_payment_success_probability(42, &target, 1).unwrap(); + assert!(one_prob < 1.0); + assert!(one_prob > 0.95); // Advance the time forward 16 half-lives (which the docs claim will ensure all data is // gone), and check that we're back to where we started. SinceEpoch::advance(Duration::from_secs(10 * 16)); - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 47); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 47); // Once fully decayed we still have data, but its all-0s. In the future we may remove the // data entirely instead. assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target), - Some(([0; 8], [0; 8]))); + None); + assert_eq!(scorer.historical_estimated_payment_success_probability(42, &target, 1), None); - let usage = ChannelUsage { + let mut usage = ChannelUsage { amount_msat: 100, inflight_htlc_msat: 1024, effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_024 }, }; scorer.payment_path_failed(&payment_path_for_amount(1), 42); - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 409); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 2048); + usage.inflight_htlc_msat = 0; + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 866); let usage = ChannelUsage { amount_msat: 1, inflight_htlc_msat: 0, - effective_capacity: EffectiveCapacity::MaximumHTLC { amount_msat: 0 }, + effective_capacity: EffectiveCapacity::AdvertisedMaxHTLC { amount_msat: 0 }, }; - assert_eq!(scorer.channel_penalty_msat(42, &target, &source, usage), 2048); + assert_eq!(scorer.channel_penalty_msat(42, &target, &source, usage, ¶ms), 2048); // Advance to decay all liquidity offsets to zero. SinceEpoch::advance(Duration::from_secs(60 * 60 * 10)); @@ -2833,11 +3159,11 @@ mod tests { let network_graph = network_graph(&logger); let source = source_node_id(); let target = target_node_id(); - let params = ProbabilisticScoringParameters { + let params = ProbabilisticScoringFeeParameters { anti_probing_penalty_msat: 500, - ..ProbabilisticScoringParameters::zero_penalty() + ..ProbabilisticScoringFeeParameters::zero_penalty() }; - let scorer = ProbabilisticScorer::new(params, &network_graph, &logger); + let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger); // Check we receive no penalty for a low htlc_maximum_msat. let usage = ChannelUsage { @@ -2845,7 +3171,7 @@ mod tests { inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024_000, htlc_maximum_msat: 1_000 }, }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 0); // Check we receive anti-probing penalty for htlc_maximum_msat == channel_capacity. let usage = ChannelUsage { @@ -2853,7 +3179,7 @@ mod tests { inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024_000, htlc_maximum_msat: 1_024_000 }, }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 500); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 500); // Check we receive anti-probing penalty for htlc_maximum_msat == channel_capacity/2. let usage = ChannelUsage { @@ -2861,7 +3187,7 @@ mod tests { inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024_000, htlc_maximum_msat: 512_000 }, }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 500); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 500); // Check we receive no anti-probing penalty for htlc_maximum_msat == channel_capacity/2 - 1. let usage = ChannelUsage { @@ -2869,7 +3195,7 @@ mod tests { inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024_000, htlc_maximum_msat: 511_999 }, }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 0); } #[test] @@ -2877,12 +3203,12 @@ mod tests { // Make sure we'll account for a blinded path's final_value_msat in scoring let logger = TestLogger::new(); let network_graph = network_graph(&logger); - let params = ProbabilisticScoringParameters { + let params = ProbabilisticScoringFeeParameters { liquidity_penalty_multiplier_msat: 1_000, - liquidity_offset_half_life: Duration::from_secs(10), - ..ProbabilisticScoringParameters::zero_penalty() + ..ProbabilisticScoringFeeParameters::zero_penalty() }; - let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger); + let decay_params = ProbabilisticScoringDecayParameters::default(); + let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger); let source = source_node_id(); let target = target_node_id(); let usage = ChannelUsage { @@ -2890,7 +3216,7 @@ mod tests { inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_000 }, }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 300); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 300); let mut path = payment_path_for_amount(768); let recipient_hop = path.hops.pop().unwrap(); @@ -2917,8 +3243,81 @@ mod tests { scorer.payment_path_failed(&path, 43); let liquidity = scorer.channel_liquidities.get(&42).unwrap() - .as_directed(&source, &target, 0, 1_000, &scorer.params); + .as_directed(&source, &target, 0, 1_000, decay_params); assert_eq!(liquidity.min_liquidity_msat(), 256); assert_eq!(liquidity.max_liquidity_msat(), 768); } + + #[test] + fn realistic_historical_failures() { + // The motivation for the unequal sized buckets came largely from attempting to pay 10k + // sats over a one bitcoin channel. This tests that case explicitly, ensuring that we score + // properly. + let logger = TestLogger::new(); + let mut network_graph = network_graph(&logger); + let params = ProbabilisticScoringFeeParameters { + historical_liquidity_penalty_multiplier_msat: 1024, + historical_liquidity_penalty_amount_multiplier_msat: 1024, + ..ProbabilisticScoringFeeParameters::zero_penalty() + }; + let decay_params = ProbabilisticScoringDecayParameters { + liquidity_offset_half_life: Duration::from_secs(60 * 60), + historical_no_updates_half_life: Duration::from_secs(10), + ..ProbabilisticScoringDecayParameters::default() + }; + + let capacity_msat = 100_000_000_000; + update_channel(&mut network_graph, 42, source_privkey(), 0, capacity_msat, 200); + update_channel(&mut network_graph, 42, target_privkey(), 1, capacity_msat, 200); + + let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger); + let source = source_node_id(); + let target = target_node_id(); + + let mut amount_msat = 10_000_000; + let usage = ChannelUsage { + amount_msat, + inflight_htlc_msat: 0, + effective_capacity: EffectiveCapacity::Total { capacity_msat, htlc_maximum_msat: capacity_msat }, + }; + // With no historical data the normal liquidity penalty calculation is used, which in this + // case is diminuitively low. + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 0); + assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target), + None); + assert_eq!(scorer.historical_estimated_payment_success_probability(42, &target, 42), + None); + + // Fail to pay once, and then check the buckets and penalty. + scorer.payment_path_failed(&payment_path_for_amount(amount_msat), 42); + // The penalty should be the maximum penalty, as the payment we're scoring is now in the + // same bucket which is the only maximum datapoint. + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), + 2048 + 2048 * amount_msat / super::AMOUNT_PENALTY_DIVISOR); + // The "it failed" increment is 32, which we should apply to the first upper-bound (between + // 6k sats and 12k sats). + assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target), + Some(([32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]))); + // The success probability estimate itself should be zero. + assert_eq!(scorer.historical_estimated_payment_success_probability(42, &target, amount_msat), + Some(0.0)); + + // Now test again with the amount in the bottom bucket. + amount_msat /= 2; + // The new amount is entirely within the only minimum bucket with score, so the probability + // we assign is 1/2. + assert_eq!(scorer.historical_estimated_payment_success_probability(42, &target, amount_msat), + Some(0.5)); + + // ...but once we see a failure, we consider the payment to be substantially less likely, + // even though not a probability of zero as we still look at the second max bucket which + // now shows 31. + scorer.payment_path_failed(&payment_path_for_amount(amount_msat), 42); + assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target), + Some(([63, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [32, 31, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]))); + assert_eq!(scorer.historical_estimated_payment_success_probability(42, &target, amount_msat), + Some(0.0)); + } }