1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 //! Utilities for scoring payment channels.
12 //! [`ProbabilisticScorer`] may be given to [`find_route`] to score payment channels during path
13 //! finding when a custom [`ScoreLookUp`] implementation is not needed.
18 //! # extern crate bitcoin;
20 //! # use lightning::routing::gossip::NetworkGraph;
21 //! # use lightning::routing::router::{RouteParameters, find_route};
22 //! # use lightning::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters, ProbabilisticScoringDecayParameters};
23 //! # use lightning::sign::KeysManager;
24 //! # use lightning::util::logger::{Logger, Record};
25 //! # use bitcoin::secp256k1::PublicKey;
27 //! # struct FakeLogger {};
28 //! # impl Logger for FakeLogger {
29 //! # fn log(&self, record: Record) { unimplemented!() }
31 //! # fn find_scored_route(payer: PublicKey, route_params: RouteParameters, network_graph: NetworkGraph<&FakeLogger>) {
32 //! # let logger = FakeLogger {};
34 //! // Use the default channel penalties.
35 //! let params = ProbabilisticScoringFeeParameters::default();
36 //! let decay_params = ProbabilisticScoringDecayParameters::default();
37 //! let scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger);
39 //! // Or use custom channel penalties.
40 //! let params = ProbabilisticScoringFeeParameters {
41 //! liquidity_penalty_multiplier_msat: 2 * 1000,
42 //! ..ProbabilisticScoringFeeParameters::default()
44 //! let decay_params = ProbabilisticScoringDecayParameters::default();
45 //! let scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger);
46 //! # let random_seed_bytes = [42u8; 32];
48 //! let route = find_route(&payer, &route_params, &network_graph, None, &logger, &scorer, ¶ms, &random_seed_bytes);
54 //! Persisting when built with feature `no-std` and restoring without it, or vice versa, uses
55 //! different types and thus is undefined.
57 //! [`find_route`]: crate::routing::router::find_route
59 use crate::ln::msgs::DecodeError;
60 use crate::routing::gossip::{EffectiveCapacity, NetworkGraph, NodeId};
61 use crate::routing::router::{Path, CandidateRouteHop};
62 use crate::routing::log_approx;
63 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
64 use crate::util::logger::Logger;
66 use crate::prelude::*;
68 use core::cell::{RefCell, RefMut, Ref};
69 use core::convert::TryInto;
70 use core::ops::{Deref, DerefMut};
71 use core::time::Duration;
72 use crate::io::{self, Read};
73 use crate::sync::{Mutex, MutexGuard, RwLock, RwLockReadGuard, RwLockWriteGuard};
75 /// We define Score ever-so-slightly differently based on whether we are being built for C bindings
76 /// or not. For users, `LockableScore` must somehow be writeable to disk. For Rust users, this is
77 /// no problem - you move a `Score` that implements `Writeable` into a `Mutex`, lock it, and now
78 /// you have the original, concrete, `Score` type, which presumably implements `Writeable`.
80 /// For C users, once you've moved the `Score` into a `LockableScore` all you have after locking it
81 /// is an opaque trait object with an opaque pointer with no type info. Users could take the unsafe
82 /// approach of blindly casting that opaque pointer to a concrete type and calling `Writeable` from
83 /// there, but other languages downstream of the C bindings (e.g. Java) can't even do that.
84 /// Instead, we really want `Score` and `LockableScore` to implement `Writeable` directly, which we
85 /// do here by defining `Score` differently for `cfg(c_bindings)`.
86 macro_rules! define_score { ($($supertrait: path)*) => {
87 /// An interface used to score payment channels for path finding.
89 /// `ScoreLookUp` is used to determine the penalty for a given channel.
91 /// Scoring is in terms of fees willing to be paid in order to avoid routing through a channel.
92 pub trait ScoreLookUp {
93 /// A configurable type which should contain various passed-in parameters for configuring the scorer,
94 /// on a per-routefinding-call basis through to the scorer methods,
95 /// which are used to determine the parameters for the suitability of channels for use.
97 /// Returns the fee in msats willing to be paid to avoid routing `send_amt_msat` through the
98 /// given channel in the direction from `source` to `target`.
100 /// The channel's capacity (less any other MPP parts that are also being considered for use in
101 /// the same payment) is given by `capacity_msat`. It may be determined from various sources
102 /// such as a chain data, network gossip, or invoice hints. For invoice hints, a capacity near
103 /// [`u64::max_value`] is given to indicate sufficient capacity for the invoice's full amount.
104 /// Thus, implementations should be overflow-safe.
105 fn channel_penalty_msat(
106 &self, candidate: &CandidateRouteHop, usage: ChannelUsage, score_params: &Self::ScoreParams
110 /// `ScoreUpdate` is used to update the scorer's internal state after a payment attempt.
111 pub trait ScoreUpdate {
112 /// Handles updating channel penalties after failing to route through a channel.
113 fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64, duration_since_epoch: Duration);
115 /// Handles updating channel penalties after successfully routing along a path.
116 fn payment_path_successful(&mut self, path: &Path, duration_since_epoch: Duration);
118 /// Handles updating channel penalties after a probe over the given path failed.
119 fn probe_failed(&mut self, path: &Path, short_channel_id: u64, duration_since_epoch: Duration);
121 /// Handles updating channel penalties after a probe over the given path succeeded.
122 fn probe_successful(&mut self, path: &Path, duration_since_epoch: Duration);
124 /// Scorers may wish to reduce their certainty of channel liquidity information over time.
125 /// Thus, this method is provided to allow scorers to observe the passage of time - the holder
126 /// of this object should call this method regularly (generally via the
127 /// `lightning-background-processor` crate).
128 fn decay_liquidity_certainty(&mut self, duration_since_epoch: Duration);
131 /// A trait which can both lookup and update routing channel penalty scores.
133 /// This is used in places where both bounds are required and implemented for all types which
134 /// implement [`ScoreLookUp`] and [`ScoreUpdate`].
136 /// Bindings users may need to manually implement this for their custom scoring implementations.
137 pub trait Score : ScoreLookUp + ScoreUpdate $(+ $supertrait)* {}
139 #[cfg(not(c_bindings))]
140 impl<T: ScoreLookUp + ScoreUpdate $(+ $supertrait)*> Score for T {}
142 #[cfg(not(c_bindings))]
143 impl<S: ScoreLookUp, T: Deref<Target=S>> ScoreLookUp for T {
144 type ScoreParams = S::ScoreParams;
145 fn channel_penalty_msat(
146 &self, candidate: &CandidateRouteHop, usage: ChannelUsage, score_params: &Self::ScoreParams
148 self.deref().channel_penalty_msat(candidate, usage, score_params)
152 #[cfg(not(c_bindings))]
153 impl<S: ScoreUpdate, T: DerefMut<Target=S>> ScoreUpdate for T {
154 fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64, duration_since_epoch: Duration) {
155 self.deref_mut().payment_path_failed(path, short_channel_id, duration_since_epoch)
158 fn payment_path_successful(&mut self, path: &Path, duration_since_epoch: Duration) {
159 self.deref_mut().payment_path_successful(path, duration_since_epoch)
162 fn probe_failed(&mut self, path: &Path, short_channel_id: u64, duration_since_epoch: Duration) {
163 self.deref_mut().probe_failed(path, short_channel_id, duration_since_epoch)
166 fn probe_successful(&mut self, path: &Path, duration_since_epoch: Duration) {
167 self.deref_mut().probe_successful(path, duration_since_epoch)
170 fn decay_liquidity_certainty(&mut self, duration_since_epoch: Duration) {
171 self.deref_mut().decay_liquidity_certainty(duration_since_epoch)
177 define_score!(Writeable);
179 #[cfg(not(c_bindings))]
182 /// A scorer that is accessed under a lock.
184 /// Needed so that calls to [`ScoreLookUp::channel_penalty_msat`] in [`find_route`] can be made while
185 /// having shared ownership of a scorer but without requiring internal locking in [`ScoreUpdate`]
186 /// implementations. Internal locking would be detrimental to route finding performance and could
187 /// result in [`ScoreLookUp::channel_penalty_msat`] returning a different value for the same channel.
189 /// [`find_route`]: crate::routing::router::find_route
190 pub trait LockableScore<'a> {
191 /// The [`ScoreUpdate`] type.
192 type ScoreUpdate: 'a + ScoreUpdate;
193 /// The [`ScoreLookUp`] type.
194 type ScoreLookUp: 'a + ScoreLookUp;
196 /// The write locked [`ScoreUpdate`] type.
197 type WriteLocked: DerefMut<Target = Self::ScoreUpdate> + Sized;
199 /// The read locked [`ScoreLookUp`] type.
200 type ReadLocked: Deref<Target = Self::ScoreLookUp> + Sized;
202 /// Returns read locked scorer.
203 fn read_lock(&'a self) -> Self::ReadLocked;
205 /// Returns write locked scorer.
206 fn write_lock(&'a self) -> Self::WriteLocked;
209 /// Refers to a scorer that is accessible under lock and also writeable to disk
211 /// We need this trait to be able to pass in a scorer to `lightning-background-processor` that will enable us to
212 /// use the Persister to persist it.
213 pub trait WriteableScore<'a>: LockableScore<'a> + Writeable {}
215 #[cfg(not(c_bindings))]
216 impl<'a, T> WriteableScore<'a> for T where T: LockableScore<'a> + Writeable {}
217 #[cfg(not(c_bindings))]
218 impl<'a, T: Score + 'a> LockableScore<'a> for Mutex<T> {
219 type ScoreUpdate = T;
220 type ScoreLookUp = T;
222 type WriteLocked = MutexGuard<'a, Self::ScoreUpdate>;
223 type ReadLocked = MutexGuard<'a, Self::ScoreLookUp>;
225 fn read_lock(&'a self) -> Self::ReadLocked {
226 Mutex::lock(self).unwrap()
229 fn write_lock(&'a self) -> Self::WriteLocked {
230 Mutex::lock(self).unwrap()
234 #[cfg(not(c_bindings))]
235 impl<'a, T: Score + 'a> LockableScore<'a> for RefCell<T> {
236 type ScoreUpdate = T;
237 type ScoreLookUp = T;
239 type WriteLocked = RefMut<'a, Self::ScoreUpdate>;
240 type ReadLocked = Ref<'a, Self::ScoreLookUp>;
242 fn write_lock(&'a self) -> Self::WriteLocked {
246 fn read_lock(&'a self) -> Self::ReadLocked {
251 #[cfg(not(c_bindings))]
252 impl<'a, T: Score + 'a> LockableScore<'a> for RwLock<T> {
253 type ScoreUpdate = T;
254 type ScoreLookUp = T;
256 type WriteLocked = RwLockWriteGuard<'a, Self::ScoreLookUp>;
257 type ReadLocked = RwLockReadGuard<'a, Self::ScoreUpdate>;
259 fn read_lock(&'a self) -> Self::ReadLocked {
260 RwLock::read(self).unwrap()
263 fn write_lock(&'a self) -> Self::WriteLocked {
264 RwLock::write(self).unwrap()
269 /// A concrete implementation of [`LockableScore`] which supports multi-threading.
270 pub struct MultiThreadedLockableScore<T: Score> {
275 impl<'a, T: Score + 'a> LockableScore<'a> for MultiThreadedLockableScore<T> {
276 type ScoreUpdate = T;
277 type ScoreLookUp = T;
278 type WriteLocked = MultiThreadedScoreLockWrite<'a, Self::ScoreUpdate>;
279 type ReadLocked = MultiThreadedScoreLockRead<'a, Self::ScoreLookUp>;
281 fn read_lock(&'a self) -> Self::ReadLocked {
282 MultiThreadedScoreLockRead(self.score.read().unwrap())
285 fn write_lock(&'a self) -> Self::WriteLocked {
286 MultiThreadedScoreLockWrite(self.score.write().unwrap())
291 impl<T: Score> Writeable for MultiThreadedLockableScore<T> {
292 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
293 self.score.read().unwrap().write(writer)
298 impl<'a, T: Score + 'a> WriteableScore<'a> for MultiThreadedLockableScore<T> {}
301 impl<T: Score> MultiThreadedLockableScore<T> {
302 /// Creates a new [`MultiThreadedLockableScore`] given an underlying [`Score`].
303 pub fn new(score: T) -> Self {
304 MultiThreadedLockableScore { score: RwLock::new(score) }
309 /// A locked `MultiThreadedLockableScore`.
310 pub struct MultiThreadedScoreLockRead<'a, T: Score>(RwLockReadGuard<'a, T>);
313 /// A locked `MultiThreadedLockableScore`.
314 pub struct MultiThreadedScoreLockWrite<'a, T: Score>(RwLockWriteGuard<'a, T>);
317 impl<'a, T: 'a + Score> Deref for MultiThreadedScoreLockRead<'a, T> {
320 fn deref(&self) -> &Self::Target {
326 impl<'a, T: Score> ScoreLookUp for MultiThreadedScoreLockRead<'a, T> {
327 type ScoreParams = T::ScoreParams;
328 fn channel_penalty_msat(&self, candidate:&CandidateRouteHop, usage: ChannelUsage, score_params: &Self::ScoreParams
330 self.0.channel_penalty_msat(candidate, usage, score_params)
335 impl<'a, T: Score> Writeable for MultiThreadedScoreLockWrite<'a, T> {
336 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
342 impl<'a, T: 'a + Score> Deref for MultiThreadedScoreLockWrite<'a, T> {
345 fn deref(&self) -> &Self::Target {
351 impl<'a, T: 'a + Score> DerefMut for MultiThreadedScoreLockWrite<'a, T> {
352 fn deref_mut(&mut self) -> &mut Self::Target {
358 impl<'a, T: Score> ScoreUpdate for MultiThreadedScoreLockWrite<'a, T> {
359 fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64, duration_since_epoch: Duration) {
360 self.0.payment_path_failed(path, short_channel_id, duration_since_epoch)
363 fn payment_path_successful(&mut self, path: &Path, duration_since_epoch: Duration) {
364 self.0.payment_path_successful(path, duration_since_epoch)
367 fn probe_failed(&mut self, path: &Path, short_channel_id: u64, duration_since_epoch: Duration) {
368 self.0.probe_failed(path, short_channel_id, duration_since_epoch)
371 fn probe_successful(&mut self, path: &Path, duration_since_epoch: Duration) {
372 self.0.probe_successful(path, duration_since_epoch)
375 fn decay_liquidity_certainty(&mut self, duration_since_epoch: Duration) {
376 self.0.decay_liquidity_certainty(duration_since_epoch)
381 /// Proposed use of a channel passed as a parameter to [`ScoreLookUp::channel_penalty_msat`].
382 #[derive(Clone, Copy, Debug, PartialEq)]
383 pub struct ChannelUsage {
384 /// The amount to send through the channel, denominated in millisatoshis.
385 pub amount_msat: u64,
387 /// Total amount, denominated in millisatoshis, already allocated to send through the channel
388 /// as part of a multi-path payment.
389 pub inflight_htlc_msat: u64,
391 /// The effective capacity of the channel.
392 pub effective_capacity: EffectiveCapacity,
396 /// [`ScoreLookUp`] implementation that uses a fixed penalty.
397 pub struct FixedPenaltyScorer {
401 impl FixedPenaltyScorer {
402 /// Creates a new scorer using `penalty_msat`.
403 pub fn with_penalty(penalty_msat: u64) -> Self {
404 Self { penalty_msat }
408 impl ScoreLookUp for FixedPenaltyScorer {
409 type ScoreParams = ();
410 fn channel_penalty_msat(&self, _: &CandidateRouteHop, _: ChannelUsage, _score_params: &Self::ScoreParams) -> u64 {
415 impl ScoreUpdate for FixedPenaltyScorer {
416 fn payment_path_failed(&mut self, _path: &Path, _short_channel_id: u64, _duration_since_epoch: Duration) {}
418 fn payment_path_successful(&mut self, _path: &Path, _duration_since_epoch: Duration) {}
420 fn probe_failed(&mut self, _path: &Path, _short_channel_id: u64, _duration_since_epoch: Duration) {}
422 fn probe_successful(&mut self, _path: &Path, _duration_since_epoch: Duration) {}
424 fn decay_liquidity_certainty(&mut self, _duration_since_epoch: Duration) {}
427 impl Writeable for FixedPenaltyScorer {
429 fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
430 write_tlv_fields!(w, {});
435 impl ReadableArgs<u64> for FixedPenaltyScorer {
437 fn read<R: Read>(r: &mut R, penalty_msat: u64) -> Result<Self, DecodeError> {
438 read_tlv_fields!(r, {});
439 Ok(Self { penalty_msat })
443 /// [`ScoreLookUp`] implementation using channel success probability distributions.
445 /// Channels are tracked with upper and lower liquidity bounds - when an HTLC fails at a channel,
446 /// we learn that the upper-bound on the available liquidity is lower than the amount of the HTLC.
447 /// When a payment is forwarded through a channel (but fails later in the route), we learn the
448 /// lower-bound on the channel's available liquidity must be at least the value of the HTLC.
450 /// These bounds are then used to determine a success probability using the formula from
451 /// *Optimally Reliable & Cheap Payment Flows on the Lightning Network* by Rene Pickhardt
452 /// and Stefan Richter [[1]] (i.e. `(upper_bound - payment_amount) / (upper_bound - lower_bound)`).
454 /// This probability is combined with the [`liquidity_penalty_multiplier_msat`] and
455 /// [`liquidity_penalty_amount_multiplier_msat`] parameters to calculate a concrete penalty in
456 /// milli-satoshis. The penalties, when added across all hops, have the property of being linear in
457 /// terms of the entire path's success probability. This allows the router to directly compare
458 /// penalties for different paths. See the documentation of those parameters for the exact formulas.
460 /// The liquidity bounds are decayed by halving them every [`liquidity_offset_half_life`].
462 /// Further, we track the history of our upper and lower liquidity bounds for each channel,
463 /// allowing us to assign a second penalty (using [`historical_liquidity_penalty_multiplier_msat`]
464 /// and [`historical_liquidity_penalty_amount_multiplier_msat`]) based on the same probability
465 /// formula, but using the history of a channel rather than our latest estimates for the liquidity
468 /// [1]: https://arxiv.org/abs/2107.05322
469 /// [`liquidity_penalty_multiplier_msat`]: ProbabilisticScoringFeeParameters::liquidity_penalty_multiplier_msat
470 /// [`liquidity_penalty_amount_multiplier_msat`]: ProbabilisticScoringFeeParameters::liquidity_penalty_amount_multiplier_msat
471 /// [`liquidity_offset_half_life`]: ProbabilisticScoringDecayParameters::liquidity_offset_half_life
472 /// [`historical_liquidity_penalty_multiplier_msat`]: ProbabilisticScoringFeeParameters::historical_liquidity_penalty_multiplier_msat
473 /// [`historical_liquidity_penalty_amount_multiplier_msat`]: ProbabilisticScoringFeeParameters::historical_liquidity_penalty_amount_multiplier_msat
474 pub struct ProbabilisticScorer<G: Deref<Target = NetworkGraph<L>>, L: Deref>
475 where L::Target: Logger {
476 decay_params: ProbabilisticScoringDecayParameters,
479 channel_liquidities: HashMap<u64, ChannelLiquidity>,
482 /// Parameters for configuring [`ProbabilisticScorer`].
484 /// Used to configure base, liquidity, and amount penalties, the sum of which comprises the channel
485 /// penalty (i.e., the amount in msats willing to be paid to avoid routing through the channel).
487 /// The penalty applied to any channel by the [`ProbabilisticScorer`] is the sum of each of the
490 pub struct ProbabilisticScoringFeeParameters {
491 /// A fixed penalty in msats to apply to each channel.
493 /// Default value: 500 msat
494 pub base_penalty_msat: u64,
496 /// A multiplier used with the total amount flowing over a channel to calculate a fixed penalty
497 /// applied to each channel, in excess of the [`base_penalty_msat`].
499 /// The purpose of the amount penalty is to avoid having fees dominate the channel cost (i.e.,
500 /// fees plus penalty) for large payments. The penalty is computed as the product of this
501 /// multiplier and `2^30`ths of the total amount flowing over a channel (i.e. the payment
502 /// amount plus the amount of any other HTLCs flowing we sent over the same channel).
504 /// ie `base_penalty_amount_multiplier_msat * amount_msat / 2^30`
506 /// Default value: 8,192 msat
508 /// [`base_penalty_msat`]: Self::base_penalty_msat
509 pub base_penalty_amount_multiplier_msat: u64,
511 /// A multiplier used in conjunction with the negative `log10` of the channel's success
512 /// probability for a payment, as determined by our latest estimates of the channel's
513 /// liquidity, to determine the liquidity penalty.
515 /// The penalty is based in part on the knowledge learned from prior successful and unsuccessful
516 /// payments. This knowledge is decayed over time based on [`liquidity_offset_half_life`]. The
517 /// penalty is effectively limited to `2 * liquidity_penalty_multiplier_msat` (corresponding to
518 /// lower bounding the success probability to `0.01`) when the amount falls within the
519 /// uncertainty bounds of the channel liquidity balance. Amounts above the upper bound will
520 /// result in a `u64::max_value` penalty, however.
522 /// `-log10(success_probability) * liquidity_penalty_multiplier_msat`
524 /// Default value: 30,000 msat
526 /// [`liquidity_offset_half_life`]: ProbabilisticScoringDecayParameters::liquidity_offset_half_life
527 pub liquidity_penalty_multiplier_msat: u64,
529 /// A multiplier used in conjunction with the total amount flowing over a channel and the
530 /// negative `log10` of the channel's success probability for the payment, as determined by our
531 /// latest estimates of the channel's liquidity, to determine the amount penalty.
533 /// The purpose of the amount penalty is to avoid having fees dominate the channel cost (i.e.,
534 /// fees plus penalty) for large payments. The penalty is computed as the product of this
535 /// multiplier and `2^20`ths of the amount flowing over this channel, weighted by the negative
536 /// `log10` of the success probability.
538 /// `-log10(success_probability) * liquidity_penalty_amount_multiplier_msat * amount_msat / 2^20`
540 /// In practice, this means for 0.1 success probability (`-log10(0.1) == 1`) each `2^20`th of
541 /// the amount will result in a penalty of the multiplier. And, as the success probability
542 /// decreases, the negative `log10` weighting will increase dramatically. For higher success
543 /// probabilities, the multiplier will have a decreasing effect as the negative `log10` will
546 /// Default value: 192 msat
547 pub liquidity_penalty_amount_multiplier_msat: u64,
549 /// A multiplier used in conjunction with the negative `log10` of the channel's success
550 /// probability for the payment, as determined based on the history of our estimates of the
551 /// channel's available liquidity, to determine a penalty.
553 /// This penalty is similar to [`liquidity_penalty_multiplier_msat`], however, instead of using
554 /// only our latest estimate for the current liquidity available in the channel, it estimates
555 /// success probability based on the estimated liquidity available in the channel through
556 /// history. Specifically, every time we update our liquidity bounds on a given channel, we
557 /// track which of several buckets those bounds fall into, exponentially decaying the
558 /// probability of each bucket as new samples are added.
560 /// Default value: 10,000 msat
562 /// [`liquidity_penalty_multiplier_msat`]: Self::liquidity_penalty_multiplier_msat
563 pub historical_liquidity_penalty_multiplier_msat: u64,
565 /// A multiplier used in conjunction with the total amount flowing over a channel and the
566 /// negative `log10` of the channel's success probability for the payment, as determined based
567 /// on the history of our estimates of the channel's available liquidity, to determine a
570 /// The purpose of the amount penalty is to avoid having fees dominate the channel cost for
571 /// large payments. The penalty is computed as the product of this multiplier and `2^20`ths
572 /// of the amount flowing over this channel, weighted by the negative `log10` of the success
575 /// This penalty is similar to [`liquidity_penalty_amount_multiplier_msat`], however, instead
576 /// of using only our latest estimate for the current liquidity available in the channel, it
577 /// estimates success probability based on the estimated liquidity available in the channel
578 /// through history. Specifically, every time we update our liquidity bounds on a given
579 /// channel, we track which of several buckets those bounds fall into, exponentially decaying
580 /// the probability of each bucket as new samples are added.
582 /// Default value: 64 msat
584 /// [`liquidity_penalty_amount_multiplier_msat`]: Self::liquidity_penalty_amount_multiplier_msat
585 pub historical_liquidity_penalty_amount_multiplier_msat: u64,
587 /// Manual penalties used for the given nodes. Allows to set a particular penalty for a given
588 /// node. Note that a manual penalty of `u64::max_value()` means the node would not ever be
589 /// considered during path finding.
591 /// This is not exported to bindings users
592 pub manual_node_penalties: HashMap<NodeId, u64>,
594 /// This penalty is applied when `htlc_maximum_msat` is equal to or larger than half of the
595 /// channel's capacity, (ie. htlc_maximum_msat >= 0.5 * channel_capacity) which makes us
596 /// prefer nodes with a smaller `htlc_maximum_msat`. We treat such nodes preferentially
597 /// as this makes balance discovery attacks harder to execute, thereby creating an incentive
598 /// to restrict `htlc_maximum_msat` and improve privacy.
600 /// Default value: 250 msat
601 pub anti_probing_penalty_msat: u64,
603 /// This penalty is applied when the total amount flowing over a channel exceeds our current
604 /// estimate of the channel's available liquidity. The total amount is the amount of the
605 /// current HTLC plus any HTLCs which we've sent over the same channel.
607 /// Note that in this case all other penalties, including the
608 /// [`liquidity_penalty_multiplier_msat`] and [`liquidity_penalty_amount_multiplier_msat`]-based
609 /// penalties, as well as the [`base_penalty_msat`] and the [`anti_probing_penalty_msat`], if
610 /// applicable, are still included in the overall penalty.
612 /// If you wish to avoid creating paths with such channels entirely, setting this to a value of
613 /// `u64::max_value()` will guarantee that.
615 /// Default value: 1_0000_0000_000 msat (1 Bitcoin)
617 /// [`liquidity_penalty_multiplier_msat`]: Self::liquidity_penalty_multiplier_msat
618 /// [`liquidity_penalty_amount_multiplier_msat`]: Self::liquidity_penalty_amount_multiplier_msat
619 /// [`base_penalty_msat`]: Self::base_penalty_msat
620 /// [`anti_probing_penalty_msat`]: Self::anti_probing_penalty_msat
621 pub considered_impossible_penalty_msat: u64,
623 /// In order to calculate most of the scores above, we must first convert a lower and upper
624 /// bound on the available liquidity in a channel into the probability that we think a payment
625 /// will succeed. That probability is derived from a Probability Density Function for where we
626 /// think the liquidity in a channel likely lies, given such bounds.
628 /// If this flag is set, that PDF is simply a constant - we assume that the actual available
629 /// liquidity in a channel is just as likely to be at any point between our lower and upper
632 /// If this flag is *not* set, that PDF is `(x - 0.5*capacity) ^ 2`. That is, we use an
633 /// exponential curve which expects the liquidity of a channel to lie "at the edges". This
634 /// matches experimental results - most routing nodes do not aggressively rebalance their
635 /// channels and flows in the network are often unbalanced, leaving liquidity usually
638 /// Thus, for the "best" routes, leave this flag `false`. However, the flag does imply a number
639 /// of floating-point multiplications in the hottest routing code, which may lead to routing
640 /// performance degradation on some machines.
642 /// Default value: false
643 pub linear_success_probability: bool,
646 impl Default for ProbabilisticScoringFeeParameters {
647 fn default() -> Self {
649 base_penalty_msat: 500,
650 base_penalty_amount_multiplier_msat: 8192,
651 liquidity_penalty_multiplier_msat: 30_000,
652 liquidity_penalty_amount_multiplier_msat: 192,
653 manual_node_penalties: HashMap::new(),
654 anti_probing_penalty_msat: 250,
655 considered_impossible_penalty_msat: 1_0000_0000_000,
656 historical_liquidity_penalty_multiplier_msat: 10_000,
657 historical_liquidity_penalty_amount_multiplier_msat: 64,
658 linear_success_probability: false,
663 impl ProbabilisticScoringFeeParameters {
664 /// Marks the node with the given `node_id` as banned,
665 /// i.e it will be avoided during path finding.
666 pub fn add_banned(&mut self, node_id: &NodeId) {
667 self.manual_node_penalties.insert(*node_id, u64::max_value());
670 /// Marks all nodes in the given list as banned, i.e.,
671 /// they will be avoided during path finding.
672 pub fn add_banned_from_list(&mut self, node_ids: Vec<NodeId>) {
674 self.manual_node_penalties.insert(id, u64::max_value());
678 /// Removes the node with the given `node_id` from the list of nodes to avoid.
679 pub fn remove_banned(&mut self, node_id: &NodeId) {
680 self.manual_node_penalties.remove(node_id);
683 /// Sets a manual penalty for the given node.
684 pub fn set_manual_penalty(&mut self, node_id: &NodeId, penalty: u64) {
685 self.manual_node_penalties.insert(*node_id, penalty);
688 /// Removes the node with the given `node_id` from the list of manual penalties.
689 pub fn remove_manual_penalty(&mut self, node_id: &NodeId) {
690 self.manual_node_penalties.remove(node_id);
693 /// Clears the list of manual penalties that are applied during path finding.
694 pub fn clear_manual_penalties(&mut self) {
695 self.manual_node_penalties = HashMap::new();
700 impl ProbabilisticScoringFeeParameters {
701 fn zero_penalty() -> Self {
703 base_penalty_msat: 0,
704 base_penalty_amount_multiplier_msat: 0,
705 liquidity_penalty_multiplier_msat: 0,
706 liquidity_penalty_amount_multiplier_msat: 0,
707 historical_liquidity_penalty_multiplier_msat: 0,
708 historical_liquidity_penalty_amount_multiplier_msat: 0,
709 manual_node_penalties: HashMap::new(),
710 anti_probing_penalty_msat: 0,
711 considered_impossible_penalty_msat: 0,
712 linear_success_probability: true,
717 /// Parameters for configuring [`ProbabilisticScorer`].
719 /// Used to configure decay parameters that are static throughout the lifetime of the scorer.
720 /// these decay parameters affect the score of the channel penalty and are not changed on a
721 /// per-route penalty cost call.
722 #[derive(Copy, Clone)]
723 pub struct ProbabilisticScoringDecayParameters {
724 /// If we aren't learning any new datapoints for a channel, the historical liquidity bounds
725 /// tracking can simply live on with increasingly stale data. Instead, when a channel has not
726 /// seen a liquidity estimate update for this amount of time, the historical datapoints are
728 /// For an example of historical_no_updates_half_life being used see [`historical_estimated_channel_liquidity_probabilities`]
730 /// Note that after 16 or more half lives all historical data will be completely gone.
732 /// Default value: 14 days
734 /// [`historical_estimated_channel_liquidity_probabilities`]: ProbabilisticScorer::historical_estimated_channel_liquidity_probabilities
735 pub historical_no_updates_half_life: Duration,
737 /// Whenever this amount of time elapses since the last update to a channel's liquidity bounds,
738 /// the distance from the bounds to "zero" is cut in half. In other words, the lower-bound on
739 /// the available liquidity is halved and the upper-bound moves half-way to the channel's total
742 /// Because halving the liquidity bounds grows the uncertainty on the channel's liquidity,
743 /// the penalty for an amount within the new bounds may change. See the [`ProbabilisticScorer`]
744 /// struct documentation for more info on the way the liquidity bounds are used.
746 /// For example, if the channel's capacity is 1 million sats, and the current upper and lower
747 /// liquidity bounds are 200,000 sats and 600,000 sats, after this amount of time the upper
748 /// and lower liquidity bounds will be decayed to 100,000 and 800,000 sats.
750 /// Default value: 6 hours
754 /// When built with the `no-std` feature, time will never elapse. Therefore, the channel
755 /// liquidity knowledge will never decay except when the bounds cross.
756 pub liquidity_offset_half_life: Duration,
759 impl Default for ProbabilisticScoringDecayParameters {
760 fn default() -> Self {
762 liquidity_offset_half_life: Duration::from_secs(6 * 60 * 60),
763 historical_no_updates_half_life: Duration::from_secs(60 * 60 * 24 * 14),
769 impl ProbabilisticScoringDecayParameters {
770 fn zero_penalty() -> Self {
772 liquidity_offset_half_life: Duration::from_secs(6 * 60 * 60),
773 historical_no_updates_half_life: Duration::from_secs(60 * 60 * 24 * 14),
778 /// Accounting for channel liquidity balance uncertainty.
780 /// Direction is defined in terms of [`NodeId`] partial ordering, where the source node is the
781 /// first node in the ordering of the channel's counterparties. Thus, swapping the two liquidity
782 /// offset fields gives the opposite direction.
783 struct ChannelLiquidity {
784 /// Lower channel liquidity bound in terms of an offset from zero.
785 min_liquidity_offset_msat: u64,
787 /// Upper channel liquidity bound in terms of an offset from the effective capacity.
788 max_liquidity_offset_msat: u64,
790 min_liquidity_offset_history: HistoricalBucketRangeTracker,
791 max_liquidity_offset_history: HistoricalBucketRangeTracker,
793 /// Time when the liquidity bounds were last modified as an offset since the unix epoch.
794 last_updated: Duration,
796 /// Time when the historical liquidity bounds were last modified as an offset against the unix
798 offset_history_last_updated: Duration,
801 /// A snapshot of [`ChannelLiquidity`] in one direction assuming a certain channel capacity and
802 /// decayed with a given half life.
803 struct DirectedChannelLiquidity<L: Deref<Target = u64>, BRT: Deref<Target = HistoricalBucketRangeTracker>, T: Deref<Target = Duration>> {
804 min_liquidity_offset_msat: L,
805 max_liquidity_offset_msat: L,
806 liquidity_history: HistoricalMinMaxBuckets<BRT>,
809 offset_history_last_updated: T,
812 impl<G: Deref<Target = NetworkGraph<L>>, L: Deref> ProbabilisticScorer<G, L> where L::Target: Logger {
813 /// Creates a new scorer using the given scoring parameters for sending payments from a node
814 /// through a network graph.
815 pub fn new(decay_params: ProbabilisticScoringDecayParameters, network_graph: G, logger: L) -> Self {
820 channel_liquidities: HashMap::new(),
825 fn with_channel(mut self, short_channel_id: u64, liquidity: ChannelLiquidity) -> Self {
826 assert!(self.channel_liquidities.insert(short_channel_id, liquidity).is_none());
830 /// Dump the contents of this scorer into the configured logger.
832 /// Note that this writes roughly one line per channel for which we have a liquidity estimate,
833 /// which may be a substantial amount of log output.
834 pub fn debug_log_liquidity_stats(&self) {
835 let graph = self.network_graph.read_only();
836 for (scid, liq) in self.channel_liquidities.iter() {
837 if let Some(chan_debug) = graph.channels().get(scid) {
838 let log_direction = |source, target| {
839 if let Some((directed_info, _)) = chan_debug.as_directed_to(target) {
840 let amt = directed_info.effective_capacity().as_msat();
841 let dir_liq = liq.as_directed(source, target, amt);
843 let min_buckets = &dir_liq.liquidity_history.min_liquidity_offset_history.buckets;
844 let max_buckets = &dir_liq.liquidity_history.max_liquidity_offset_history.buckets;
846 log_debug!(self.logger, core::concat!(
847 "Liquidity from {} to {} via {} is in the range ({}, {}).\n",
848 "\tHistorical min liquidity bucket relative probabilities:\n",
849 "\t\t{} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {}\n",
850 "\tHistorical max liquidity bucket relative probabilities:\n",
851 "\t\t{} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {}"),
852 source, target, scid, dir_liq.min_liquidity_msat(), dir_liq.max_liquidity_msat(),
853 min_buckets[ 0], min_buckets[ 1], min_buckets[ 2], min_buckets[ 3],
854 min_buckets[ 4], min_buckets[ 5], min_buckets[ 6], min_buckets[ 7],
855 min_buckets[ 8], min_buckets[ 9], min_buckets[10], min_buckets[11],
856 min_buckets[12], min_buckets[13], min_buckets[14], min_buckets[15],
857 min_buckets[16], min_buckets[17], min_buckets[18], min_buckets[19],
858 min_buckets[20], min_buckets[21], min_buckets[22], min_buckets[23],
859 min_buckets[24], min_buckets[25], min_buckets[26], min_buckets[27],
860 min_buckets[28], min_buckets[29], min_buckets[30], min_buckets[31],
861 // Note that the liquidity buckets are an offset from the edge, so we
862 // inverse the max order to get the probabilities from zero.
863 max_buckets[31], max_buckets[30], max_buckets[29], max_buckets[28],
864 max_buckets[27], max_buckets[26], max_buckets[25], max_buckets[24],
865 max_buckets[23], max_buckets[22], max_buckets[21], max_buckets[20],
866 max_buckets[19], max_buckets[18], max_buckets[17], max_buckets[16],
867 max_buckets[15], max_buckets[14], max_buckets[13], max_buckets[12],
868 max_buckets[11], max_buckets[10], max_buckets[ 9], max_buckets[ 8],
869 max_buckets[ 7], max_buckets[ 6], max_buckets[ 5], max_buckets[ 4],
870 max_buckets[ 3], max_buckets[ 2], max_buckets[ 1], max_buckets[ 0]);
872 log_debug!(self.logger, "No amount known for SCID {} from {:?} to {:?}", scid, source, target);
876 log_direction(&chan_debug.node_one, &chan_debug.node_two);
877 log_direction(&chan_debug.node_two, &chan_debug.node_one);
879 log_debug!(self.logger, "No network graph entry for SCID {}", scid);
884 /// Query the estimated minimum and maximum liquidity available for sending a payment over the
885 /// channel with `scid` towards the given `target` node.
886 pub fn estimated_channel_liquidity_range(&self, scid: u64, target: &NodeId) -> Option<(u64, u64)> {
887 let graph = self.network_graph.read_only();
889 if let Some(chan) = graph.channels().get(&scid) {
890 if let Some(liq) = self.channel_liquidities.get(&scid) {
891 if let Some((directed_info, source)) = chan.as_directed_to(target) {
892 let amt = directed_info.effective_capacity().as_msat();
893 let dir_liq = liq.as_directed(source, target, amt);
894 return Some((dir_liq.min_liquidity_msat(), dir_liq.max_liquidity_msat()));
901 /// Query the historical estimated minimum and maximum liquidity available for sending a
902 /// payment over the channel with `scid` towards the given `target` node.
904 /// Returns two sets of 32 buckets. The first set describes the lower-bound liquidity history,
905 /// the second set describes the upper-bound liquidity history. Each bucket describes the
906 /// relative frequency at which we've seen a liquidity bound in the bucket's range relative to
907 /// the channel's total capacity, on an arbitrary scale. Because the values are slowly decayed,
908 /// more recent data points are weighted more heavily than older datapoints.
910 /// Note that the range of each bucket varies by its location to provide more granular results
911 /// at the edges of a channel's capacity, where it is more likely to sit.
913 /// When scoring, the estimated probability that an upper-/lower-bound lies in a given bucket
914 /// is calculated by dividing that bucket's value with the total value of all buckets.
916 /// For example, using a lower bucket count for illustrative purposes, a value of
917 /// `[0, 0, 0, ..., 0, 32]` indicates that we believe the probability of a bound being very
918 /// close to the channel's capacity to be 100%, and have never (recently) seen it in any other
919 /// bucket. A value of `[31, 0, 0, ..., 0, 0, 32]` indicates we've seen the bound being both
920 /// in the top and bottom bucket, and roughly with similar (recent) frequency.
922 /// Because the datapoints are decayed slowly over time, values will eventually return to
923 /// `Some(([0; 32], [0; 32]))` or `None` if no data remains for a channel.
925 /// In order to fetch a single success probability from the buckets provided here, as used in
926 /// the scoring model, see [`Self::historical_estimated_payment_success_probability`].
927 pub fn historical_estimated_channel_liquidity_probabilities(&self, scid: u64, target: &NodeId)
928 -> Option<([u16; 32], [u16; 32])> {
929 let graph = self.network_graph.read_only();
931 if let Some(chan) = graph.channels().get(&scid) {
932 if let Some(liq) = self.channel_liquidities.get(&scid) {
933 if let Some((directed_info, source)) = chan.as_directed_to(target) {
934 let amt = directed_info.effective_capacity().as_msat();
935 let dir_liq = liq.as_directed(source, target, amt);
937 let min_buckets = dir_liq.liquidity_history.min_liquidity_offset_history.buckets;
938 let mut max_buckets = dir_liq.liquidity_history.max_liquidity_offset_history.buckets;
940 // Note that the liquidity buckets are an offset from the edge, so we inverse
941 // the max order to get the probabilities from zero.
942 max_buckets.reverse();
943 return Some((min_buckets, max_buckets));
950 /// Query the probability of payment success sending the given `amount_msat` over the channel
951 /// with `scid` towards the given `target` node, based on the historical estimated liquidity
954 /// These are the same bounds as returned by
955 /// [`Self::historical_estimated_channel_liquidity_probabilities`] (but not those returned by
956 /// [`Self::estimated_channel_liquidity_range`]).
957 pub fn historical_estimated_payment_success_probability(
958 &self, scid: u64, target: &NodeId, amount_msat: u64, params: &ProbabilisticScoringFeeParameters)
960 let graph = self.network_graph.read_only();
962 if let Some(chan) = graph.channels().get(&scid) {
963 if let Some(liq) = self.channel_liquidities.get(&scid) {
964 if let Some((directed_info, source)) = chan.as_directed_to(target) {
965 let capacity_msat = directed_info.effective_capacity().as_msat();
966 let dir_liq = liq.as_directed(source, target, capacity_msat);
968 return dir_liq.liquidity_history.calculate_success_probability_times_billion(
969 ¶ms, amount_msat, capacity_msat
970 ).map(|p| p as f64 / (1024 * 1024 * 1024) as f64);
978 impl ChannelLiquidity {
979 fn new(last_updated: Duration) -> Self {
981 min_liquidity_offset_msat: 0,
982 max_liquidity_offset_msat: 0,
983 min_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
984 max_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
986 offset_history_last_updated: last_updated,
990 /// Returns a view of the channel liquidity directed from `source` to `target` assuming
993 &self, source: &NodeId, target: &NodeId, capacity_msat: u64,
994 ) -> DirectedChannelLiquidity<&u64, &HistoricalBucketRangeTracker, &Duration> {
995 let (min_liquidity_offset_msat, max_liquidity_offset_msat, min_liquidity_offset_history, max_liquidity_offset_history) =
997 (&self.min_liquidity_offset_msat, &self.max_liquidity_offset_msat,
998 &self.min_liquidity_offset_history, &self.max_liquidity_offset_history)
1000 (&self.max_liquidity_offset_msat, &self.min_liquidity_offset_msat,
1001 &self.max_liquidity_offset_history, &self.min_liquidity_offset_history)
1004 DirectedChannelLiquidity {
1005 min_liquidity_offset_msat,
1006 max_liquidity_offset_msat,
1007 liquidity_history: HistoricalMinMaxBuckets {
1008 min_liquidity_offset_history,
1009 max_liquidity_offset_history,
1012 last_updated: &self.last_updated,
1013 offset_history_last_updated: &self.offset_history_last_updated,
1017 /// Returns a mutable view of the channel liquidity directed from `source` to `target` assuming
1018 /// `capacity_msat`.
1020 &mut self, source: &NodeId, target: &NodeId, capacity_msat: u64,
1021 ) -> DirectedChannelLiquidity<&mut u64, &mut HistoricalBucketRangeTracker, &mut Duration> {
1022 let (min_liquidity_offset_msat, max_liquidity_offset_msat, min_liquidity_offset_history, max_liquidity_offset_history) =
1023 if source < target {
1024 (&mut self.min_liquidity_offset_msat, &mut self.max_liquidity_offset_msat,
1025 &mut self.min_liquidity_offset_history, &mut self.max_liquidity_offset_history)
1027 (&mut self.max_liquidity_offset_msat, &mut self.min_liquidity_offset_msat,
1028 &mut self.max_liquidity_offset_history, &mut self.min_liquidity_offset_history)
1031 DirectedChannelLiquidity {
1032 min_liquidity_offset_msat,
1033 max_liquidity_offset_msat,
1034 liquidity_history: HistoricalMinMaxBuckets {
1035 min_liquidity_offset_history,
1036 max_liquidity_offset_history,
1039 last_updated: &mut self.last_updated,
1040 offset_history_last_updated: &mut self.offset_history_last_updated,
1044 fn decayed_offset(&self, offset: u64, duration_since_epoch: Duration,
1045 decay_params: ProbabilisticScoringDecayParameters
1047 let half_life = decay_params.liquidity_offset_half_life.as_secs_f64();
1048 if half_life != 0.0 {
1049 let elapsed_time = duration_since_epoch.saturating_sub(self.last_updated).as_secs_f64();
1050 ((offset as f64) * powf64(0.5, elapsed_time / half_life)) as u64
1057 /// Bounds `-log10` to avoid excessive liquidity penalties for payments with low success
1059 const NEGATIVE_LOG10_UPPER_BOUND: u64 = 2;
1061 /// The rough cutoff at which our precision falls off and we should stop bothering to try to log a
1062 /// ratio, as X in 1/X.
1063 const PRECISION_LOWER_BOUND_DENOMINATOR: u64 = log_approx::LOWER_BITS_BOUND;
1065 /// The divisor used when computing the amount penalty.
1066 const AMOUNT_PENALTY_DIVISOR: u64 = 1 << 20;
1067 const BASE_AMOUNT_PENALTY_DIVISOR: u64 = 1 << 30;
1069 /// Raises three `f64`s to the 3rd power, without `powi` because it requires `std` (dunno why).
1071 fn three_f64_pow_3(a: f64, b: f64, c: f64) -> (f64, f64, f64) {
1072 (a * a * a, b * b * b, c * c * c)
1075 /// Given liquidity bounds, calculates the success probability (in the form of a numerator and
1076 /// denominator) of an HTLC. This is a key assumption in our scoring models.
1078 /// Must not return a numerator or denominator greater than 2^31 for arguments less than 2^31.
1080 /// min_zero_implies_no_successes signals that a `min_liquidity_msat` of 0 means we've not
1081 /// (recently) seen an HTLC successfully complete over this channel.
1083 fn success_probability(
1084 amount_msat: u64, min_liquidity_msat: u64, max_liquidity_msat: u64, capacity_msat: u64,
1085 params: &ProbabilisticScoringFeeParameters, min_zero_implies_no_successes: bool,
1087 debug_assert!(min_liquidity_msat <= amount_msat);
1088 debug_assert!(amount_msat < max_liquidity_msat);
1089 debug_assert!(max_liquidity_msat <= capacity_msat);
1091 let (numerator, mut denominator) =
1092 if params.linear_success_probability {
1093 (max_liquidity_msat - amount_msat,
1094 (max_liquidity_msat - min_liquidity_msat).saturating_add(1))
1096 let capacity = capacity_msat as f64;
1097 let min = (min_liquidity_msat as f64) / capacity;
1098 let max = (max_liquidity_msat as f64) / capacity;
1099 let amount = (amount_msat as f64) / capacity;
1101 // Assume the channel has a probability density function of (x - 0.5)^2 for values from
1102 // 0 to 1 (where 1 is the channel's full capacity). The success probability given some
1103 // liquidity bounds is thus the integral under the curve from the amount to maximum
1104 // estimated liquidity, divided by the same integral from the minimum to the maximum
1105 // estimated liquidity bounds.
1107 // Because the integral from x to y is simply (y - 0.5)^3 - (x - 0.5)^3, we can
1108 // calculate the cumulative density function between the min/max bounds trivially. Note
1109 // that we don't bother to normalize the CDF to total to 1, as it will come out in the
1110 // division of num / den.
1111 let (max_pow, amt_pow, min_pow) = three_f64_pow_3(max - 0.5, amount - 0.5, min - 0.5);
1112 let num = max_pow - amt_pow;
1113 let den = max_pow - min_pow;
1115 // Because our numerator and denominator max out at 0.5^3 we need to multiply them by
1116 // quite a large factor to get something useful (ideally in the 2^30 range).
1117 const BILLIONISH: f64 = 1024.0 * 1024.0 * 1024.0;
1118 let numerator = (num * BILLIONISH) as u64 + 1;
1119 let denominator = (den * BILLIONISH) as u64 + 1;
1120 debug_assert!(numerator <= 1 << 30, "Got large numerator ({}) from float {}.", numerator, num);
1121 debug_assert!(denominator <= 1 << 30, "Got large denominator ({}) from float {}.", denominator, den);
1122 (numerator, denominator)
1125 if min_zero_implies_no_successes && min_liquidity_msat == 0 &&
1126 denominator < u64::max_value() / 21
1128 // If we have no knowledge of the channel, scale probability down by ~75%
1129 // Note that we prefer to increase the denominator rather than decrease the numerator as
1130 // the denominator is more likely to be larger and thus provide greater precision. This is
1131 // mostly an overoptimization but makes a large difference in tests.
1132 denominator = denominator * 21 / 16
1135 (numerator, denominator)
1138 impl<L: Deref<Target = u64>, BRT: Deref<Target = HistoricalBucketRangeTracker>, T: Deref<Target = Duration>>
1139 DirectedChannelLiquidity< L, BRT, T> {
1140 /// Returns a liquidity penalty for routing the given HTLC `amount_msat` through the channel in
1142 fn penalty_msat(&self, amount_msat: u64, score_params: &ProbabilisticScoringFeeParameters) -> u64 {
1143 let available_capacity = self.capacity_msat;
1144 let max_liquidity_msat = self.max_liquidity_msat();
1145 let min_liquidity_msat = core::cmp::min(self.min_liquidity_msat(), max_liquidity_msat);
1147 let mut res = if amount_msat <= min_liquidity_msat {
1149 } else if amount_msat >= max_liquidity_msat {
1150 // Equivalent to hitting the else clause below with the amount equal to the effective
1151 // capacity and without any certainty on the liquidity upper bound, plus the
1152 // impossibility penalty.
1153 let negative_log10_times_2048 = NEGATIVE_LOG10_UPPER_BOUND * 2048;
1154 Self::combined_penalty_msat(amount_msat, negative_log10_times_2048,
1155 score_params.liquidity_penalty_multiplier_msat,
1156 score_params.liquidity_penalty_amount_multiplier_msat)
1157 .saturating_add(score_params.considered_impossible_penalty_msat)
1159 let (numerator, denominator) = success_probability(amount_msat,
1160 min_liquidity_msat, max_liquidity_msat, available_capacity, score_params, false);
1161 if denominator - numerator < denominator / PRECISION_LOWER_BOUND_DENOMINATOR {
1162 // If the failure probability is < 1.5625% (as 1 - numerator/denominator < 1/64),
1163 // don't bother trying to use the log approximation as it gets too noisy to be
1164 // particularly helpful, instead just round down to 0.
1167 let negative_log10_times_2048 =
1168 log_approx::negative_log10_times_2048(numerator, denominator);
1169 Self::combined_penalty_msat(amount_msat, negative_log10_times_2048,
1170 score_params.liquidity_penalty_multiplier_msat,
1171 score_params.liquidity_penalty_amount_multiplier_msat)
1175 if amount_msat >= available_capacity {
1176 // We're trying to send more than the capacity, use a max penalty.
1177 res = res.saturating_add(Self::combined_penalty_msat(amount_msat,
1178 NEGATIVE_LOG10_UPPER_BOUND * 2048,
1179 score_params.historical_liquidity_penalty_multiplier_msat,
1180 score_params.historical_liquidity_penalty_amount_multiplier_msat));
1184 if score_params.historical_liquidity_penalty_multiplier_msat != 0 ||
1185 score_params.historical_liquidity_penalty_amount_multiplier_msat != 0 {
1186 if let Some(cumulative_success_prob_times_billion) = self.liquidity_history
1187 .calculate_success_probability_times_billion(
1188 score_params, amount_msat, self.capacity_msat)
1190 let historical_negative_log10_times_2048 =
1191 log_approx::negative_log10_times_2048(cumulative_success_prob_times_billion + 1, 1024 * 1024 * 1024);
1192 res = res.saturating_add(Self::combined_penalty_msat(amount_msat,
1193 historical_negative_log10_times_2048, score_params.historical_liquidity_penalty_multiplier_msat,
1194 score_params.historical_liquidity_penalty_amount_multiplier_msat));
1196 // If we don't have any valid points (or, once decayed, we have less than a full
1197 // point), redo the non-historical calculation with no liquidity bounds tracked and
1198 // the historical penalty multipliers.
1199 let (numerator, denominator) = success_probability(amount_msat, 0,
1200 available_capacity, available_capacity, score_params, true);
1201 let negative_log10_times_2048 =
1202 log_approx::negative_log10_times_2048(numerator, denominator);
1203 res = res.saturating_add(Self::combined_penalty_msat(amount_msat, negative_log10_times_2048,
1204 score_params.historical_liquidity_penalty_multiplier_msat,
1205 score_params.historical_liquidity_penalty_amount_multiplier_msat));
1212 /// Computes the liquidity penalty from the penalty multipliers.
1214 fn combined_penalty_msat(amount_msat: u64, mut negative_log10_times_2048: u64,
1215 liquidity_penalty_multiplier_msat: u64, liquidity_penalty_amount_multiplier_msat: u64,
1217 negative_log10_times_2048 =
1218 negative_log10_times_2048.min(NEGATIVE_LOG10_UPPER_BOUND * 2048);
1220 // Upper bound the liquidity penalty to ensure some channel is selected.
1221 let liquidity_penalty_msat = negative_log10_times_2048
1222 .saturating_mul(liquidity_penalty_multiplier_msat) / 2048;
1223 let amount_penalty_msat = negative_log10_times_2048
1224 .saturating_mul(liquidity_penalty_amount_multiplier_msat)
1225 .saturating_mul(amount_msat) / 2048 / AMOUNT_PENALTY_DIVISOR;
1227 liquidity_penalty_msat.saturating_add(amount_penalty_msat)
1230 /// Returns the lower bound of the channel liquidity balance in this direction.
1232 fn min_liquidity_msat(&self) -> u64 {
1233 *self.min_liquidity_offset_msat
1236 /// Returns the upper bound of the channel liquidity balance in this direction.
1238 fn max_liquidity_msat(&self) -> u64 {
1240 .saturating_sub(*self.max_liquidity_offset_msat)
1244 impl<L: DerefMut<Target = u64>, BRT: DerefMut<Target = HistoricalBucketRangeTracker>, T: DerefMut<Target = Duration>>
1245 DirectedChannelLiquidity<L, BRT, T> {
1246 /// Adjusts the channel liquidity balance bounds when failing to route `amount_msat`.
1247 fn failed_at_channel<Log: Deref>(
1248 &mut self, amount_msat: u64, duration_since_epoch: Duration, chan_descr: fmt::Arguments, logger: &Log
1249 ) where Log::Target: Logger {
1250 let existing_max_msat = self.max_liquidity_msat();
1251 if amount_msat < existing_max_msat {
1252 log_debug!(logger, "Setting max liquidity of {} from {} to {}", chan_descr, existing_max_msat, amount_msat);
1253 self.set_max_liquidity_msat(amount_msat, duration_since_epoch);
1255 log_trace!(logger, "Max liquidity of {} is {} (already less than or equal to {})",
1256 chan_descr, existing_max_msat, amount_msat);
1258 self.update_history_buckets(0, duration_since_epoch);
1261 /// Adjusts the channel liquidity balance bounds when failing to route `amount_msat` downstream.
1262 fn failed_downstream<Log: Deref>(
1263 &mut self, amount_msat: u64, duration_since_epoch: Duration, chan_descr: fmt::Arguments, logger: &Log
1264 ) where Log::Target: Logger {
1265 let existing_min_msat = self.min_liquidity_msat();
1266 if amount_msat > existing_min_msat {
1267 log_debug!(logger, "Setting min liquidity of {} from {} to {}", existing_min_msat, chan_descr, amount_msat);
1268 self.set_min_liquidity_msat(amount_msat, duration_since_epoch);
1270 log_trace!(logger, "Min liquidity of {} is {} (already greater than or equal to {})",
1271 chan_descr, existing_min_msat, amount_msat);
1273 self.update_history_buckets(0, duration_since_epoch);
1276 /// Adjusts the channel liquidity balance bounds when successfully routing `amount_msat`.
1277 fn successful<Log: Deref>(&mut self,
1278 amount_msat: u64, duration_since_epoch: Duration, chan_descr: fmt::Arguments, logger: &Log
1279 ) where Log::Target: Logger {
1280 let max_liquidity_msat = self.max_liquidity_msat().checked_sub(amount_msat).unwrap_or(0);
1281 log_debug!(logger, "Subtracting {} from max liquidity of {} (setting it to {})", amount_msat, chan_descr, max_liquidity_msat);
1282 self.set_max_liquidity_msat(max_liquidity_msat, duration_since_epoch);
1283 self.update_history_buckets(amount_msat, duration_since_epoch);
1286 /// Updates the history buckets for this channel. Because the history buckets track what we now
1287 /// know about the channel's state *prior to our payment* (i.e. what we assume is "steady
1288 /// state"), we allow the caller to set an offset applied to our liquidity bounds which
1289 /// represents the amount of the successful payment we just made.
1290 fn update_history_buckets(&mut self, bucket_offset_msat: u64, duration_since_epoch: Duration) {
1291 self.liquidity_history.min_liquidity_offset_history.track_datapoint(
1292 *self.min_liquidity_offset_msat + bucket_offset_msat, self.capacity_msat
1294 self.liquidity_history.max_liquidity_offset_history.track_datapoint(
1295 self.max_liquidity_offset_msat.saturating_sub(bucket_offset_msat), self.capacity_msat
1297 *self.offset_history_last_updated = duration_since_epoch;
1300 /// Adjusts the lower bound of the channel liquidity balance in this direction.
1301 fn set_min_liquidity_msat(&mut self, amount_msat: u64, duration_since_epoch: Duration) {
1302 *self.min_liquidity_offset_msat = amount_msat;
1303 if amount_msat > self.max_liquidity_msat() {
1304 *self.max_liquidity_offset_msat = 0;
1306 *self.last_updated = duration_since_epoch;
1309 /// Adjusts the upper bound of the channel liquidity balance in this direction.
1310 fn set_max_liquidity_msat(&mut self, amount_msat: u64, duration_since_epoch: Duration) {
1311 *self.max_liquidity_offset_msat = self.capacity_msat.checked_sub(amount_msat).unwrap_or(0);
1312 if amount_msat < *self.min_liquidity_offset_msat {
1313 *self.min_liquidity_offset_msat = 0;
1315 *self.last_updated = duration_since_epoch;
1319 impl<G: Deref<Target = NetworkGraph<L>>, L: Deref> ScoreLookUp for ProbabilisticScorer<G, L> where L::Target: Logger {
1320 type ScoreParams = ProbabilisticScoringFeeParameters;
1321 fn channel_penalty_msat(
1322 &self, candidate: &CandidateRouteHop, usage: ChannelUsage, score_params: &ProbabilisticScoringFeeParameters
1324 let (scid, target) = match candidate {
1325 CandidateRouteHop::PublicHop { info, short_channel_id } => {
1326 (short_channel_id, info.target())
1330 let source = candidate.source();
1331 if let Some(penalty) = score_params.manual_node_penalties.get(&target) {
1335 let base_penalty_msat = score_params.base_penalty_msat.saturating_add(
1336 score_params.base_penalty_amount_multiplier_msat
1337 .saturating_mul(usage.amount_msat) / BASE_AMOUNT_PENALTY_DIVISOR);
1339 let mut anti_probing_penalty_msat = 0;
1340 match usage.effective_capacity {
1341 EffectiveCapacity::ExactLiquidity { liquidity_msat: amount_msat } |
1342 EffectiveCapacity::HintMaxHTLC { amount_msat } =>
1344 if usage.amount_msat > amount_msat {
1345 return u64::max_value();
1347 return base_penalty_msat;
1350 EffectiveCapacity::Total { capacity_msat, htlc_maximum_msat } => {
1351 if htlc_maximum_msat >= capacity_msat/2 {
1352 anti_probing_penalty_msat = score_params.anti_probing_penalty_msat;
1358 let amount_msat = usage.amount_msat.saturating_add(usage.inflight_htlc_msat);
1359 let capacity_msat = usage.effective_capacity.as_msat();
1360 self.channel_liquidities
1362 .unwrap_or(&ChannelLiquidity::new(Duration::ZERO))
1363 .as_directed(&source, &target, capacity_msat)
1364 .penalty_msat(amount_msat, score_params)
1365 .saturating_add(anti_probing_penalty_msat)
1366 .saturating_add(base_penalty_msat)
1370 impl<G: Deref<Target = NetworkGraph<L>>, L: Deref> ScoreUpdate for ProbabilisticScorer<G, L> where L::Target: Logger {
1371 fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64, duration_since_epoch: Duration) {
1372 let amount_msat = path.final_value_msat();
1373 log_trace!(self.logger, "Scoring path through to SCID {} as having failed at {} msat", short_channel_id, amount_msat);
1374 let network_graph = self.network_graph.read_only();
1375 for (hop_idx, hop) in path.hops.iter().enumerate() {
1376 let target = NodeId::from_pubkey(&hop.pubkey);
1377 let channel_directed_from_source = network_graph.channels()
1378 .get(&hop.short_channel_id)
1379 .and_then(|channel| channel.as_directed_to(&target));
1381 let at_failed_channel = hop.short_channel_id == short_channel_id;
1382 if at_failed_channel && hop_idx == 0 {
1383 log_warn!(self.logger, "Payment failed at the first hop - we do not attempt to learn channel info in such cases as we can directly observe local state.\n\tBecause we know the local state, we should generally not see failures here - this may be an indication that your channel peer on channel {} is broken and you may wish to close the channel.", hop.short_channel_id);
1386 // Only score announced channels.
1387 if let Some((channel, source)) = channel_directed_from_source {
1388 let capacity_msat = channel.effective_capacity().as_msat();
1389 if at_failed_channel {
1390 self.channel_liquidities
1391 .entry(hop.short_channel_id)
1392 .or_insert_with(|| ChannelLiquidity::new(duration_since_epoch))
1393 .as_directed_mut(source, &target, capacity_msat)
1394 .failed_at_channel(amount_msat, duration_since_epoch,
1395 format_args!("SCID {}, towards {:?}", hop.short_channel_id, target), &self.logger);
1397 self.channel_liquidities
1398 .entry(hop.short_channel_id)
1399 .or_insert_with(|| ChannelLiquidity::new(duration_since_epoch))
1400 .as_directed_mut(source, &target, capacity_msat)
1401 .failed_downstream(amount_msat, duration_since_epoch,
1402 format_args!("SCID {}, towards {:?}", hop.short_channel_id, target), &self.logger);
1405 log_debug!(self.logger, "Not able to penalize channel with SCID {} as we do not have graph info for it (likely a route-hint last-hop).",
1406 hop.short_channel_id);
1408 if at_failed_channel { break; }
1412 fn payment_path_successful(&mut self, path: &Path, duration_since_epoch: Duration) {
1413 let amount_msat = path.final_value_msat();
1414 log_trace!(self.logger, "Scoring path through SCID {} as having succeeded at {} msat.",
1415 path.hops.split_last().map(|(hop, _)| hop.short_channel_id).unwrap_or(0), amount_msat);
1416 let network_graph = self.network_graph.read_only();
1417 for hop in &path.hops {
1418 let target = NodeId::from_pubkey(&hop.pubkey);
1419 let channel_directed_from_source = network_graph.channels()
1420 .get(&hop.short_channel_id)
1421 .and_then(|channel| channel.as_directed_to(&target));
1423 // Only score announced channels.
1424 if let Some((channel, source)) = channel_directed_from_source {
1425 let capacity_msat = channel.effective_capacity().as_msat();
1426 self.channel_liquidities
1427 .entry(hop.short_channel_id)
1428 .or_insert_with(|| ChannelLiquidity::new(duration_since_epoch))
1429 .as_directed_mut(source, &target, capacity_msat)
1430 .successful(amount_msat, duration_since_epoch,
1431 format_args!("SCID {}, towards {:?}", hop.short_channel_id, target), &self.logger);
1433 log_debug!(self.logger, "Not able to learn for channel with SCID {} as we do not have graph info for it (likely a route-hint last-hop).",
1434 hop.short_channel_id);
1439 fn probe_failed(&mut self, path: &Path, short_channel_id: u64, duration_since_epoch: Duration) {
1440 self.payment_path_failed(path, short_channel_id, duration_since_epoch)
1443 fn probe_successful(&mut self, path: &Path, duration_since_epoch: Duration) {
1444 self.payment_path_failed(path, u64::max_value(), duration_since_epoch)
1447 fn decay_liquidity_certainty(&mut self, duration_since_epoch: Duration) {
1448 let decay_params = self.decay_params;
1449 self.channel_liquidities.retain(|_scid, liquidity| {
1450 liquidity.min_liquidity_offset_msat =
1451 liquidity.decayed_offset(liquidity.min_liquidity_offset_msat, duration_since_epoch, decay_params);
1452 liquidity.max_liquidity_offset_msat =
1453 liquidity.decayed_offset(liquidity.max_liquidity_offset_msat, duration_since_epoch, decay_params);
1454 liquidity.last_updated = duration_since_epoch;
1457 duration_since_epoch.saturating_sub(liquidity.offset_history_last_updated);
1458 if elapsed_time > decay_params.historical_no_updates_half_life {
1459 let half_life = decay_params.historical_no_updates_half_life.as_secs_f64();
1460 if half_life != 0.0 {
1461 let divisor = powf64(2048.0, elapsed_time.as_secs_f64() / half_life) as u64;
1462 for bucket in liquidity.min_liquidity_offset_history.buckets.iter_mut() {
1463 *bucket = ((*bucket as u64) * 1024 / divisor) as u16;
1465 for bucket in liquidity.max_liquidity_offset_history.buckets.iter_mut() {
1466 *bucket = ((*bucket as u64) * 1024 / divisor) as u16;
1468 liquidity.offset_history_last_updated = duration_since_epoch;
1471 liquidity.min_liquidity_offset_msat != 0 || liquidity.max_liquidity_offset_msat != 0 ||
1472 liquidity.min_liquidity_offset_history.buckets != [0; 32] ||
1473 liquidity.max_liquidity_offset_history.buckets != [0; 32]
1479 impl<G: Deref<Target = NetworkGraph<L>>, L: Deref> Score for ProbabilisticScorer<G, L>
1480 where L::Target: Logger {}
1482 #[cfg(feature = "std")]
1484 fn powf64(n: f64, exp: f64) -> f64 {
1487 #[cfg(not(feature = "std"))]
1488 fn powf64(n: f64, exp: f64) -> f64 {
1489 libm::powf(n as f32, exp as f32) as f64
1492 mod bucketed_history {
1495 // Because liquidity is often skewed heavily in one direction, we store historical state
1496 // distribution in buckets of different size. For backwards compatibility, buckets of size 1/8th
1497 // must fit evenly into the buckets here.
1499 // The smallest bucket is 2^-14th of the channel, for each of our 32 buckets here we define the
1500 // width of the bucket in 2^14'ths of the channel. This increases exponentially until we reach
1501 // a full 16th of the channel's capacity, which is reapeated a few times for backwards
1502 // compatibility. The four middle buckets represent full octiles of the channel's capacity.
1504 // For a 1 BTC channel, this let's us differentiate between failures in the bottom 6k sats, or
1505 // between the 12,000th sat and 24,000th sat, while only needing to store and operate on 32
1506 // buckets in total.
1508 const BUCKET_START_POS: [u16; 33] = [
1509 0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 3072, 4096, 6144, 8192, 10240, 12288,
1510 13312, 14336, 15360, 15872, 16128, 16256, 16320, 16352, 16368, 16376, 16380, 16382, 16383, 16384,
1513 const LEGACY_TO_BUCKET_RANGE: [(u8, u8); 8] = [
1514 (0, 12), (12, 14), (14, 15), (15, 16), (16, 17), (17, 18), (18, 20), (20, 32)
1517 const POSITION_TICKS: u16 = 1 << 14;
1519 fn pos_to_bucket(pos: u16) -> usize {
1520 for bucket in 0..32 {
1521 if pos < BUCKET_START_POS[bucket + 1] {
1525 debug_assert!(false);
1531 fn check_bucket_maps() {
1532 const BUCKET_WIDTH_IN_16384S: [u16; 32] = [
1533 1, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 1024, 1024, 2048, 2048,
1534 2048, 2048, 1024, 1024, 1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1, 1];
1536 let mut min_size_iter = 0;
1537 let mut legacy_bucket_iter = 0;
1538 for (bucket, width) in BUCKET_WIDTH_IN_16384S.iter().enumerate() {
1539 assert_eq!(BUCKET_START_POS[bucket], min_size_iter);
1540 for i in 0..*width {
1541 assert_eq!(pos_to_bucket(min_size_iter + i) as usize, bucket);
1543 min_size_iter += *width;
1544 if min_size_iter % (POSITION_TICKS / 8) == 0 {
1545 assert_eq!(LEGACY_TO_BUCKET_RANGE[legacy_bucket_iter].1 as usize, bucket + 1);
1546 if legacy_bucket_iter + 1 < 8 {
1547 assert_eq!(LEGACY_TO_BUCKET_RANGE[legacy_bucket_iter + 1].0 as usize, bucket + 1);
1549 legacy_bucket_iter += 1;
1552 assert_eq!(BUCKET_START_POS[32], POSITION_TICKS);
1553 assert_eq!(min_size_iter, POSITION_TICKS);
1557 fn amount_to_pos(amount_msat: u64, capacity_msat: u64) -> u16 {
1558 let pos = if amount_msat < u64::max_value() / (POSITION_TICKS as u64) {
1559 (amount_msat * (POSITION_TICKS as u64) / capacity_msat.saturating_add(1))
1560 .try_into().unwrap_or(POSITION_TICKS)
1562 // Only use 128-bit arithmetic when multiplication will overflow to avoid 128-bit
1563 // division. This branch should only be hit in fuzz testing since the amount would
1564 // need to be over 2.88 million BTC in practice.
1565 ((amount_msat as u128) * (POSITION_TICKS as u128)
1566 / (capacity_msat as u128).saturating_add(1))
1567 .try_into().unwrap_or(POSITION_TICKS)
1569 // If we are running in a client that doesn't validate gossip, its possible for a channel's
1570 // capacity to change due to a `channel_update` message which, if received while a payment
1571 // is in-flight, could cause this to fail. Thus, we only assert in test.
1573 debug_assert!(pos < POSITION_TICKS);
1577 /// Prior to LDK 0.0.117 we used eight buckets which were split evenly across the either
1578 /// octiles. This was changed to use 32 buckets for accuracy reasons in 0.0.117, however we
1579 /// support reading the legacy values here for backwards compatibility.
1580 pub(super) struct LegacyHistoricalBucketRangeTracker {
1584 impl LegacyHistoricalBucketRangeTracker {
1585 pub(crate) fn into_current(&self) -> HistoricalBucketRangeTracker {
1586 let mut buckets = [0; 32];
1587 for (idx, legacy_bucket) in self.buckets.iter().enumerate() {
1588 let mut new_val = *legacy_bucket;
1589 let (start, end) = LEGACY_TO_BUCKET_RANGE[idx];
1590 new_val /= (end - start) as u16;
1591 for i in start..end {
1592 buckets[i as usize] = new_val;
1595 HistoricalBucketRangeTracker { buckets }
1599 /// Tracks the historical state of a distribution as a weighted average of how much time was spent
1600 /// in each of 32 buckets.
1601 #[derive(Clone, Copy)]
1602 pub(super) struct HistoricalBucketRangeTracker {
1603 pub(super) buckets: [u16; 32],
1606 /// Buckets are stored in fixed point numbers with a 5 bit fractional part. Thus, the value
1607 /// "one" is 32, or this constant.
1608 pub const BUCKET_FIXED_POINT_ONE: u16 = 32;
1610 impl HistoricalBucketRangeTracker {
1611 pub(super) fn new() -> Self { Self { buckets: [0; 32] } }
1612 pub(super) fn track_datapoint(&mut self, liquidity_offset_msat: u64, capacity_msat: u64) {
1613 // We have 32 leaky buckets for min and max liquidity. Each bucket tracks the amount of time
1614 // we spend in each bucket as a 16-bit fixed-point number with a 5 bit fractional part.
1616 // Each time we update our liquidity estimate, we add 32 (1.0 in our fixed-point system) to
1617 // the buckets for the current min and max liquidity offset positions.
1619 // We then decay each bucket by multiplying by 2047/2048 (avoiding dividing by a
1620 // non-power-of-two). This ensures we can't actually overflow the u16 - when we get to
1621 // 63,457 adding 32 and decaying by 2047/2048 leaves us back at 63,457.
1623 // In total, this allows us to track data for the last 8,000 or so payments across a given
1626 // These constants are a balance - we try to fit in 2 bytes per bucket to reduce overhead,
1627 // and need to balance having more bits in the decimal part (to ensure decay isn't too
1628 // non-linear) with having too few bits in the mantissa, causing us to not store very many
1631 // The constants were picked experimentally, selecting a decay amount that restricts us
1632 // from overflowing buckets without having to cap them manually.
1634 let pos: u16 = amount_to_pos(liquidity_offset_msat, capacity_msat);
1635 if pos < POSITION_TICKS {
1636 for e in self.buckets.iter_mut() {
1637 *e = ((*e as u32) * 2047 / 2048) as u16;
1639 let bucket = pos_to_bucket(pos);
1640 self.buckets[bucket] = self.buckets[bucket].saturating_add(BUCKET_FIXED_POINT_ONE);
1645 impl_writeable_tlv_based!(HistoricalBucketRangeTracker, { (0, buckets, required) });
1646 impl_writeable_tlv_based!(LegacyHistoricalBucketRangeTracker, { (0, buckets, required) });
1648 /// A set of buckets representing the history of where we've seen the minimum- and maximum-
1649 /// liquidity bounds for a given channel.
1650 pub(super) struct HistoricalMinMaxBuckets<D: Deref<Target = HistoricalBucketRangeTracker>> {
1651 /// Buckets tracking where and how often we've seen the minimum liquidity bound for a
1653 pub(super) min_liquidity_offset_history: D,
1654 /// Buckets tracking where and how often we've seen the maximum liquidity bound for a
1656 pub(super) max_liquidity_offset_history: D,
1659 impl<D: Deref<Target = HistoricalBucketRangeTracker>> HistoricalMinMaxBuckets<D> {
1661 pub(super) fn calculate_success_probability_times_billion(
1662 &self, params: &ProbabilisticScoringFeeParameters, amount_msat: u64,
1665 // If historical penalties are enabled, we try to calculate a probability of success
1666 // given our historical distribution of min- and max-liquidity bounds in a channel.
1667 // To do so, we walk the set of historical liquidity bucket (min, max) combinations
1668 // (where min_idx < max_idx, as having a minimum above our maximum is an invalid
1669 // state). For each pair, we calculate the probability as if the bucket's corresponding
1670 // min- and max- liquidity bounds were our current liquidity bounds and then multiply
1671 // that probability by the weight of the selected buckets.
1672 let payment_pos = amount_to_pos(amount_msat, capacity_msat);
1673 if payment_pos >= POSITION_TICKS { return None; }
1675 let mut total_valid_points_tracked = 0;
1676 for (min_idx, min_bucket) in self.min_liquidity_offset_history.buckets.iter().enumerate() {
1677 for max_bucket in self.max_liquidity_offset_history.buckets.iter().take(32 - min_idx) {
1678 total_valid_points_tracked += (*min_bucket as u64) * (*max_bucket as u64);
1682 // If the total valid points is smaller than 1.0 (i.e. 32 in our fixed-point scheme),
1683 // treat it as if we were fully decayed.
1684 const FULLY_DECAYED: u16 = BUCKET_FIXED_POINT_ONE * BUCKET_FIXED_POINT_ONE;
1685 if total_valid_points_tracked < FULLY_DECAYED.into() {
1689 let mut cumulative_success_prob_times_billion = 0;
1690 // Special-case the 0th min bucket - it generally means we failed a payment, so only
1691 // consider the highest (i.e. largest-offset-from-max-capacity) max bucket for all
1692 // points against the 0th min bucket. This avoids the case where we fail to route
1693 // increasingly lower values over a channel, but treat each failure as a separate
1694 // datapoint, many of which may have relatively high maximum-available-liquidity
1695 // values, which will result in us thinking we have some nontrivial probability of
1696 // routing up to that amount.
1697 if self.min_liquidity_offset_history.buckets[0] != 0 {
1698 let mut highest_max_bucket_with_points = 0; // The highest max-bucket with any data
1699 let mut total_max_points = 0; // Total points in max-buckets to consider
1700 for (max_idx, max_bucket) in self.max_liquidity_offset_history.buckets.iter().enumerate() {
1701 if *max_bucket >= BUCKET_FIXED_POINT_ONE {
1702 highest_max_bucket_with_points = cmp::max(highest_max_bucket_with_points, max_idx);
1704 total_max_points += *max_bucket as u64;
1706 let max_bucket_end_pos = BUCKET_START_POS[32 - highest_max_bucket_with_points] - 1;
1707 if payment_pos < max_bucket_end_pos {
1708 let (numerator, denominator) = success_probability(payment_pos as u64, 0,
1709 max_bucket_end_pos as u64, POSITION_TICKS as u64 - 1, params, true);
1710 let bucket_prob_times_billion =
1711 (self.min_liquidity_offset_history.buckets[0] as u64) * total_max_points
1712 * 1024 * 1024 * 1024 / total_valid_points_tracked;
1713 cumulative_success_prob_times_billion += bucket_prob_times_billion *
1714 numerator / denominator;
1718 for (min_idx, min_bucket) in self.min_liquidity_offset_history.buckets.iter().enumerate().skip(1) {
1719 let min_bucket_start_pos = BUCKET_START_POS[min_idx];
1720 for (max_idx, max_bucket) in self.max_liquidity_offset_history.buckets.iter().enumerate().take(32 - min_idx) {
1721 let max_bucket_end_pos = BUCKET_START_POS[32 - max_idx] - 1;
1722 // Note that this multiply can only barely not overflow - two 16 bit ints plus
1723 // 30 bits is 62 bits.
1724 let bucket_prob_times_billion = (*min_bucket as u64) * (*max_bucket as u64)
1725 * 1024 * 1024 * 1024 / total_valid_points_tracked;
1726 if payment_pos >= max_bucket_end_pos {
1727 // Success probability 0, the payment amount may be above the max liquidity
1729 } else if payment_pos < min_bucket_start_pos {
1730 cumulative_success_prob_times_billion += bucket_prob_times_billion;
1732 let (numerator, denominator) = success_probability(payment_pos as u64,
1733 min_bucket_start_pos as u64, max_bucket_end_pos as u64,
1734 POSITION_TICKS as u64 - 1, params, true);
1735 cumulative_success_prob_times_billion += bucket_prob_times_billion *
1736 numerator / denominator;
1741 Some(cumulative_success_prob_times_billion)
1745 use bucketed_history::{LegacyHistoricalBucketRangeTracker, HistoricalBucketRangeTracker, HistoricalMinMaxBuckets};
1747 impl<G: Deref<Target = NetworkGraph<L>>, L: Deref> Writeable for ProbabilisticScorer<G, L> where L::Target: Logger {
1749 fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
1750 write_tlv_fields!(w, {
1751 (0, self.channel_liquidities, required),
1757 impl<G: Deref<Target = NetworkGraph<L>>, L: Deref>
1758 ReadableArgs<(ProbabilisticScoringDecayParameters, G, L)> for ProbabilisticScorer<G, L> where L::Target: Logger {
1761 r: &mut R, args: (ProbabilisticScoringDecayParameters, G, L)
1762 ) -> Result<Self, DecodeError> {
1763 let (decay_params, network_graph, logger) = args;
1764 let mut channel_liquidities = HashMap::new();
1765 read_tlv_fields!(r, {
1766 (0, channel_liquidities, required),
1772 channel_liquidities,
1777 impl Writeable for ChannelLiquidity {
1779 fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
1780 write_tlv_fields!(w, {
1781 (0, self.min_liquidity_offset_msat, required),
1782 // 1 was the min_liquidity_offset_history in octile form
1783 (2, self.max_liquidity_offset_msat, required),
1784 // 3 was the max_liquidity_offset_history in octile form
1785 (4, self.last_updated, required),
1786 (5, Some(self.min_liquidity_offset_history), option),
1787 (7, Some(self.max_liquidity_offset_history), option),
1788 (9, self.offset_history_last_updated, required),
1794 impl Readable for ChannelLiquidity {
1796 fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
1797 let mut min_liquidity_offset_msat = 0;
1798 let mut max_liquidity_offset_msat = 0;
1799 let mut legacy_min_liq_offset_history: Option<LegacyHistoricalBucketRangeTracker> = None;
1800 let mut legacy_max_liq_offset_history: Option<LegacyHistoricalBucketRangeTracker> = None;
1801 let mut min_liquidity_offset_history: Option<HistoricalBucketRangeTracker> = None;
1802 let mut max_liquidity_offset_history: Option<HistoricalBucketRangeTracker> = None;
1803 let mut last_updated = Duration::from_secs(0);
1804 let mut offset_history_last_updated = None;
1805 read_tlv_fields!(r, {
1806 (0, min_liquidity_offset_msat, required),
1807 (1, legacy_min_liq_offset_history, option),
1808 (2, max_liquidity_offset_msat, required),
1809 (3, legacy_max_liq_offset_history, option),
1810 (4, last_updated, required),
1811 (5, min_liquidity_offset_history, option),
1812 (7, max_liquidity_offset_history, option),
1813 (9, offset_history_last_updated, option),
1816 if min_liquidity_offset_history.is_none() {
1817 if let Some(legacy_buckets) = legacy_min_liq_offset_history {
1818 min_liquidity_offset_history = Some(legacy_buckets.into_current());
1820 min_liquidity_offset_history = Some(HistoricalBucketRangeTracker::new());
1823 if max_liquidity_offset_history.is_none() {
1824 if let Some(legacy_buckets) = legacy_max_liq_offset_history {
1825 max_liquidity_offset_history = Some(legacy_buckets.into_current());
1827 max_liquidity_offset_history = Some(HistoricalBucketRangeTracker::new());
1831 min_liquidity_offset_msat,
1832 max_liquidity_offset_msat,
1833 min_liquidity_offset_history: min_liquidity_offset_history.unwrap(),
1834 max_liquidity_offset_history: max_liquidity_offset_history.unwrap(),
1836 offset_history_last_updated: offset_history_last_updated.unwrap_or(last_updated),
1843 use super::{ChannelLiquidity, HistoricalBucketRangeTracker, ProbabilisticScoringFeeParameters, ProbabilisticScoringDecayParameters, ProbabilisticScorer};
1844 use crate::blinded_path::{BlindedHop, BlindedPath};
1845 use crate::util::config::UserConfig;
1847 use crate::ln::channelmanager;
1848 use crate::ln::msgs::{ChannelAnnouncement, ChannelUpdate, UnsignedChannelAnnouncement, UnsignedChannelUpdate};
1849 use crate::routing::gossip::{EffectiveCapacity, NetworkGraph, NodeId};
1850 use crate::routing::router::{BlindedTail, Path, RouteHop, CandidateRouteHop};
1851 use crate::routing::scoring::{ChannelUsage, ScoreLookUp, ScoreUpdate};
1852 use crate::util::ser::{ReadableArgs, Writeable};
1853 use crate::util::test_utils::{self, TestLogger};
1855 use bitcoin::blockdata::constants::ChainHash;
1856 use bitcoin::hashes::Hash;
1857 use bitcoin::hashes::sha256d::Hash as Sha256dHash;
1858 use bitcoin::network::constants::Network;
1859 use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
1860 use core::time::Duration;
1863 fn source_privkey() -> SecretKey {
1864 SecretKey::from_slice(&[42; 32]).unwrap()
1867 fn target_privkey() -> SecretKey {
1868 SecretKey::from_slice(&[43; 32]).unwrap()
1871 fn source_pubkey() -> PublicKey {
1872 let secp_ctx = Secp256k1::new();
1873 PublicKey::from_secret_key(&secp_ctx, &source_privkey())
1876 fn target_pubkey() -> PublicKey {
1877 let secp_ctx = Secp256k1::new();
1878 PublicKey::from_secret_key(&secp_ctx, &target_privkey())
1881 fn source_node_id() -> NodeId {
1882 NodeId::from_pubkey(&source_pubkey())
1885 fn target_node_id() -> NodeId {
1886 NodeId::from_pubkey(&target_pubkey())
1889 // `ProbabilisticScorer` tests
1891 fn sender_privkey() -> SecretKey {
1892 SecretKey::from_slice(&[41; 32]).unwrap()
1895 fn recipient_privkey() -> SecretKey {
1896 SecretKey::from_slice(&[45; 32]).unwrap()
1899 fn sender_pubkey() -> PublicKey {
1900 let secp_ctx = Secp256k1::new();
1901 PublicKey::from_secret_key(&secp_ctx, &sender_privkey())
1904 fn recipient_pubkey() -> PublicKey {
1905 let secp_ctx = Secp256k1::new();
1906 PublicKey::from_secret_key(&secp_ctx, &recipient_privkey())
1909 fn recipient_node_id() -> NodeId {
1910 NodeId::from_pubkey(&recipient_pubkey())
1913 fn network_graph(logger: &TestLogger) -> NetworkGraph<&TestLogger> {
1914 let mut network_graph = NetworkGraph::new(Network::Testnet, logger);
1915 add_channel(&mut network_graph, 42, source_privkey(), target_privkey());
1916 add_channel(&mut network_graph, 43, target_privkey(), recipient_privkey());
1922 network_graph: &mut NetworkGraph<&TestLogger>, short_channel_id: u64, node_1_key: SecretKey,
1923 node_2_key: SecretKey
1925 let genesis_hash = ChainHash::using_genesis_block(Network::Testnet);
1926 let node_1_secret = &SecretKey::from_slice(&[39; 32]).unwrap();
1927 let node_2_secret = &SecretKey::from_slice(&[40; 32]).unwrap();
1928 let secp_ctx = Secp256k1::new();
1929 let unsigned_announcement = UnsignedChannelAnnouncement {
1930 features: channelmanager::provided_channel_features(&UserConfig::default()),
1931 chain_hash: genesis_hash,
1933 node_id_1: NodeId::from_pubkey(&PublicKey::from_secret_key(&secp_ctx, &node_1_key)),
1934 node_id_2: NodeId::from_pubkey(&PublicKey::from_secret_key(&secp_ctx, &node_2_key)),
1935 bitcoin_key_1: NodeId::from_pubkey(&PublicKey::from_secret_key(&secp_ctx, &node_1_secret)),
1936 bitcoin_key_2: NodeId::from_pubkey(&PublicKey::from_secret_key(&secp_ctx, &node_2_secret)),
1937 excess_data: Vec::new(),
1939 let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]);
1940 let signed_announcement = ChannelAnnouncement {
1941 node_signature_1: secp_ctx.sign_ecdsa(&msghash, &node_1_key),
1942 node_signature_2: secp_ctx.sign_ecdsa(&msghash, &node_2_key),
1943 bitcoin_signature_1: secp_ctx.sign_ecdsa(&msghash, &node_1_secret),
1944 bitcoin_signature_2: secp_ctx.sign_ecdsa(&msghash, &node_2_secret),
1945 contents: unsigned_announcement,
1947 let chain_source: Option<&crate::util::test_utils::TestChainSource> = None;
1948 network_graph.update_channel_from_announcement(
1949 &signed_announcement, &chain_source).unwrap();
1950 update_channel(network_graph, short_channel_id, node_1_key, 0, 1_000, 100);
1951 update_channel(network_graph, short_channel_id, node_2_key, 1, 0, 100);
1955 network_graph: &mut NetworkGraph<&TestLogger>, short_channel_id: u64, node_key: SecretKey,
1956 flags: u8, htlc_maximum_msat: u64, timestamp: u32,
1958 let genesis_hash = ChainHash::using_genesis_block(Network::Testnet);
1959 let secp_ctx = Secp256k1::new();
1960 let unsigned_update = UnsignedChannelUpdate {
1961 chain_hash: genesis_hash,
1965 cltv_expiry_delta: 18,
1966 htlc_minimum_msat: 0,
1969 fee_proportional_millionths: 0,
1970 excess_data: Vec::new(),
1972 let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_update.encode()[..])[..]);
1973 let signed_update = ChannelUpdate {
1974 signature: secp_ctx.sign_ecdsa(&msghash, &node_key),
1975 contents: unsigned_update,
1977 network_graph.update_channel(&signed_update).unwrap();
1980 fn path_hop(pubkey: PublicKey, short_channel_id: u64, fee_msat: u64) -> RouteHop {
1981 let config = UserConfig::default();
1984 node_features: channelmanager::provided_node_features(&config),
1986 channel_features: channelmanager::provided_channel_features(&config),
1988 cltv_expiry_delta: 18,
1989 maybe_announced_channel: true,
1993 fn payment_path_for_amount(amount_msat: u64) -> Path {
1996 path_hop(source_pubkey(), 41, 1),
1997 path_hop(target_pubkey(), 42, 2),
1998 path_hop(recipient_pubkey(), 43, amount_msat),
1999 ], blinded_tail: None,
2004 fn liquidity_bounds_directed_from_lowest_node_id() {
2005 let logger = TestLogger::new();
2006 let last_updated = Duration::ZERO;
2007 let offset_history_last_updated = Duration::ZERO;
2008 let network_graph = network_graph(&logger);
2009 let decay_params = ProbabilisticScoringDecayParameters::default();
2010 let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger)
2013 min_liquidity_offset_msat: 700, max_liquidity_offset_msat: 100,
2014 last_updated, offset_history_last_updated,
2015 min_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
2016 max_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
2020 min_liquidity_offset_msat: 700, max_liquidity_offset_msat: 100,
2021 last_updated, offset_history_last_updated,
2022 min_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
2023 max_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
2025 let source = source_node_id();
2026 let target = target_node_id();
2027 let recipient = recipient_node_id();
2028 assert!(source > target);
2029 assert!(target < recipient);
2031 // Update minimum liquidity.
2033 let liquidity = scorer.channel_liquidities.get(&42).unwrap()
2034 .as_directed(&source, &target, 1_000);
2035 assert_eq!(liquidity.min_liquidity_msat(), 100);
2036 assert_eq!(liquidity.max_liquidity_msat(), 300);
2038 let liquidity = scorer.channel_liquidities.get(&42).unwrap()
2039 .as_directed(&target, &source, 1_000);
2040 assert_eq!(liquidity.min_liquidity_msat(), 700);
2041 assert_eq!(liquidity.max_liquidity_msat(), 900);
2043 scorer.channel_liquidities.get_mut(&42).unwrap()
2044 .as_directed_mut(&source, &target, 1_000)
2045 .set_min_liquidity_msat(200, Duration::ZERO);
2047 let liquidity = scorer.channel_liquidities.get(&42).unwrap()
2048 .as_directed(&source, &target, 1_000);
2049 assert_eq!(liquidity.min_liquidity_msat(), 200);
2050 assert_eq!(liquidity.max_liquidity_msat(), 300);
2052 let liquidity = scorer.channel_liquidities.get(&42).unwrap()
2053 .as_directed(&target, &source, 1_000);
2054 assert_eq!(liquidity.min_liquidity_msat(), 700);
2055 assert_eq!(liquidity.max_liquidity_msat(), 800);
2057 // Update maximum liquidity.
2059 let liquidity = scorer.channel_liquidities.get(&43).unwrap()
2060 .as_directed(&target, &recipient, 1_000);
2061 assert_eq!(liquidity.min_liquidity_msat(), 700);
2062 assert_eq!(liquidity.max_liquidity_msat(), 900);
2064 let liquidity = scorer.channel_liquidities.get(&43).unwrap()
2065 .as_directed(&recipient, &target, 1_000);
2066 assert_eq!(liquidity.min_liquidity_msat(), 100);
2067 assert_eq!(liquidity.max_liquidity_msat(), 300);
2069 scorer.channel_liquidities.get_mut(&43).unwrap()
2070 .as_directed_mut(&target, &recipient, 1_000)
2071 .set_max_liquidity_msat(200, Duration::ZERO);
2073 let liquidity = scorer.channel_liquidities.get(&43).unwrap()
2074 .as_directed(&target, &recipient, 1_000);
2075 assert_eq!(liquidity.min_liquidity_msat(), 0);
2076 assert_eq!(liquidity.max_liquidity_msat(), 200);
2078 let liquidity = scorer.channel_liquidities.get(&43).unwrap()
2079 .as_directed(&recipient, &target, 1_000);
2080 assert_eq!(liquidity.min_liquidity_msat(), 800);
2081 assert_eq!(liquidity.max_liquidity_msat(), 1000);
2085 fn resets_liquidity_upper_bound_when_crossed_by_lower_bound() {
2086 let logger = TestLogger::new();
2087 let last_updated = Duration::ZERO;
2088 let offset_history_last_updated = Duration::ZERO;
2089 let network_graph = network_graph(&logger);
2090 let decay_params = ProbabilisticScoringDecayParameters::default();
2091 let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger)
2094 min_liquidity_offset_msat: 200, max_liquidity_offset_msat: 400,
2095 last_updated, offset_history_last_updated,
2096 min_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
2097 max_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
2099 let source = source_node_id();
2100 let target = target_node_id();
2101 assert!(source > target);
2103 // Check initial bounds.
2104 let liquidity = scorer.channel_liquidities.get(&42).unwrap()
2105 .as_directed(&source, &target, 1_000);
2106 assert_eq!(liquidity.min_liquidity_msat(), 400);
2107 assert_eq!(liquidity.max_liquidity_msat(), 800);
2109 let liquidity = scorer.channel_liquidities.get(&42).unwrap()
2110 .as_directed(&target, &source, 1_000);
2111 assert_eq!(liquidity.min_liquidity_msat(), 200);
2112 assert_eq!(liquidity.max_liquidity_msat(), 600);
2114 // Reset from source to target.
2115 scorer.channel_liquidities.get_mut(&42).unwrap()
2116 .as_directed_mut(&source, &target, 1_000)
2117 .set_min_liquidity_msat(900, Duration::ZERO);
2119 let liquidity = scorer.channel_liquidities.get(&42).unwrap()
2120 .as_directed(&source, &target, 1_000);
2121 assert_eq!(liquidity.min_liquidity_msat(), 900);
2122 assert_eq!(liquidity.max_liquidity_msat(), 1_000);
2124 let liquidity = scorer.channel_liquidities.get(&42).unwrap()
2125 .as_directed(&target, &source, 1_000);
2126 assert_eq!(liquidity.min_liquidity_msat(), 0);
2127 assert_eq!(liquidity.max_liquidity_msat(), 100);
2129 // Reset from target to source.
2130 scorer.channel_liquidities.get_mut(&42).unwrap()
2131 .as_directed_mut(&target, &source, 1_000)
2132 .set_min_liquidity_msat(400, Duration::ZERO);
2134 let liquidity = scorer.channel_liquidities.get(&42).unwrap()
2135 .as_directed(&source, &target, 1_000);
2136 assert_eq!(liquidity.min_liquidity_msat(), 0);
2137 assert_eq!(liquidity.max_liquidity_msat(), 600);
2139 let liquidity = scorer.channel_liquidities.get(&42).unwrap()
2140 .as_directed(&target, &source, 1_000);
2141 assert_eq!(liquidity.min_liquidity_msat(), 400);
2142 assert_eq!(liquidity.max_liquidity_msat(), 1_000);
2146 fn resets_liquidity_lower_bound_when_crossed_by_upper_bound() {
2147 let logger = TestLogger::new();
2148 let last_updated = Duration::ZERO;
2149 let offset_history_last_updated = Duration::ZERO;
2150 let network_graph = network_graph(&logger);
2151 let decay_params = ProbabilisticScoringDecayParameters::default();
2152 let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger)
2155 min_liquidity_offset_msat: 200, max_liquidity_offset_msat: 400,
2156 last_updated, offset_history_last_updated,
2157 min_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
2158 max_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
2160 let source = source_node_id();
2161 let target = target_node_id();
2162 assert!(source > target);
2164 // Check initial bounds.
2165 let liquidity = scorer.channel_liquidities.get(&42).unwrap()
2166 .as_directed(&source, &target, 1_000);
2167 assert_eq!(liquidity.min_liquidity_msat(), 400);
2168 assert_eq!(liquidity.max_liquidity_msat(), 800);
2170 let liquidity = scorer.channel_liquidities.get(&42).unwrap()
2171 .as_directed(&target, &source, 1_000);
2172 assert_eq!(liquidity.min_liquidity_msat(), 200);
2173 assert_eq!(liquidity.max_liquidity_msat(), 600);
2175 // Reset from source to target.
2176 scorer.channel_liquidities.get_mut(&42).unwrap()
2177 .as_directed_mut(&source, &target, 1_000)
2178 .set_max_liquidity_msat(300, Duration::ZERO);
2180 let liquidity = scorer.channel_liquidities.get(&42).unwrap()
2181 .as_directed(&source, &target, 1_000);
2182 assert_eq!(liquidity.min_liquidity_msat(), 0);
2183 assert_eq!(liquidity.max_liquidity_msat(), 300);
2185 let liquidity = scorer.channel_liquidities.get(&42).unwrap()
2186 .as_directed(&target, &source, 1_000);
2187 assert_eq!(liquidity.min_liquidity_msat(), 700);
2188 assert_eq!(liquidity.max_liquidity_msat(), 1_000);
2190 // Reset from target to source.
2191 scorer.channel_liquidities.get_mut(&42).unwrap()
2192 .as_directed_mut(&target, &source, 1_000)
2193 .set_max_liquidity_msat(600, Duration::ZERO);
2195 let liquidity = scorer.channel_liquidities.get(&42).unwrap()
2196 .as_directed(&source, &target, 1_000);
2197 assert_eq!(liquidity.min_liquidity_msat(), 400);
2198 assert_eq!(liquidity.max_liquidity_msat(), 1_000);
2200 let liquidity = scorer.channel_liquidities.get(&42).unwrap()
2201 .as_directed(&target, &source, 1_000);
2202 assert_eq!(liquidity.min_liquidity_msat(), 0);
2203 assert_eq!(liquidity.max_liquidity_msat(), 600);
2207 fn increased_penalty_nearing_liquidity_upper_bound() {
2208 let logger = TestLogger::new();
2209 let network_graph = network_graph(&logger);
2210 let params = ProbabilisticScoringFeeParameters {
2211 liquidity_penalty_multiplier_msat: 1_000,
2212 ..ProbabilisticScoringFeeParameters::zero_penalty()
2214 let decay_params = ProbabilisticScoringDecayParameters::default();
2215 let scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger);
2216 let source = source_node_id();
2218 let usage = ChannelUsage {
2220 inflight_htlc_msat: 0,
2221 effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024_000, htlc_maximum_msat: 1_000 },
2223 let network_graph = network_graph.read_only();
2224 let channel = network_graph.channel(42).unwrap();
2225 let (info, _) = channel.as_directed_from(&source).unwrap();
2226 let candidate = CandidateRouteHop::PublicHop {
2228 short_channel_id: 42,
2230 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 0);
2231 let usage = ChannelUsage { amount_msat: 10_240, ..usage };
2232 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 0);
2233 let usage = ChannelUsage { amount_msat: 102_400, ..usage };
2234 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 47);
2235 let usage = ChannelUsage { amount_msat: 1_023_999, ..usage };
2236 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 2_000);
2238 let usage = ChannelUsage {
2240 inflight_htlc_msat: 0,
2241 effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_000 },
2243 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 58);
2244 let usage = ChannelUsage { amount_msat: 256, ..usage };
2245 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 125);
2246 let usage = ChannelUsage { amount_msat: 374, ..usage };
2247 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 198);
2248 let usage = ChannelUsage { amount_msat: 512, ..usage };
2249 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 300);
2250 let usage = ChannelUsage { amount_msat: 640, ..usage };
2251 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 425);
2252 let usage = ChannelUsage { amount_msat: 768, ..usage };
2253 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 602);
2254 let usage = ChannelUsage { amount_msat: 896, ..usage };
2255 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 902);
2259 fn constant_penalty_outside_liquidity_bounds() {
2260 let logger = TestLogger::new();
2261 let last_updated = Duration::ZERO;
2262 let offset_history_last_updated = Duration::ZERO;
2263 let network_graph = network_graph(&logger);
2264 let params = ProbabilisticScoringFeeParameters {
2265 liquidity_penalty_multiplier_msat: 1_000,
2266 considered_impossible_penalty_msat: u64::max_value(),
2267 ..ProbabilisticScoringFeeParameters::zero_penalty()
2269 let decay_params = ProbabilisticScoringDecayParameters {
2270 ..ProbabilisticScoringDecayParameters::zero_penalty()
2272 let scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger)
2275 min_liquidity_offset_msat: 40, max_liquidity_offset_msat: 40,
2276 last_updated, offset_history_last_updated,
2277 min_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
2278 max_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
2280 let source = source_node_id();
2282 let usage = ChannelUsage {
2284 inflight_htlc_msat: 0,
2285 effective_capacity: EffectiveCapacity::Total { capacity_msat: 100, htlc_maximum_msat: 1_000 },
2287 let channel = network_graph.read_only().channel(42).unwrap().to_owned();
2288 let (info, _) = channel.as_directed_from(&source).unwrap();
2289 let candidate = CandidateRouteHop::PublicHop {
2291 short_channel_id: 42,
2293 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 0);
2294 let usage = ChannelUsage { amount_msat: 50, ..usage };
2295 assert_ne!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 0);
2296 assert_ne!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), u64::max_value());
2297 let usage = ChannelUsage { amount_msat: 61, ..usage };
2298 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), u64::max_value());
2302 fn does_not_further_penalize_own_channel() {
2303 let logger = TestLogger::new();
2304 let network_graph = network_graph(&logger);
2305 let params = ProbabilisticScoringFeeParameters {
2306 liquidity_penalty_multiplier_msat: 1_000,
2307 ..ProbabilisticScoringFeeParameters::zero_penalty()
2309 let mut scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
2310 let source = source_node_id();
2311 let usage = ChannelUsage {
2313 inflight_htlc_msat: 0,
2314 effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: 1_000 },
2316 let failed_path = payment_path_for_amount(500);
2317 let successful_path = payment_path_for_amount(200);
2318 let channel = &network_graph.read_only().channel(42).unwrap().to_owned();
2319 let (info, _) = channel.as_directed_from(&source).unwrap();
2320 let candidate = CandidateRouteHop::PublicHop {
2322 short_channel_id: 41,
2325 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 301);
2327 scorer.payment_path_failed(&failed_path, 41, Duration::ZERO);
2328 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 301);
2330 scorer.payment_path_successful(&successful_path, Duration::ZERO);
2331 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 301);
2335 fn sets_liquidity_lower_bound_on_downstream_failure() {
2336 let logger = TestLogger::new();
2337 let network_graph = network_graph(&logger);
2338 let params = ProbabilisticScoringFeeParameters {
2339 liquidity_penalty_multiplier_msat: 1_000,
2340 ..ProbabilisticScoringFeeParameters::zero_penalty()
2342 let mut scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
2343 let source = source_node_id();
2344 let path = payment_path_for_amount(500);
2346 let usage = ChannelUsage {
2348 inflight_htlc_msat: 0,
2349 effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: 1_000 },
2351 let channel = network_graph.read_only().channel(42).unwrap().to_owned();
2352 let (info, _) = channel.as_directed_from(&source).unwrap();
2353 let candidate = CandidateRouteHop::PublicHop {
2355 short_channel_id: 42,
2357 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 128);
2358 let usage = ChannelUsage { amount_msat: 500, ..usage };
2359 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 301);
2360 let usage = ChannelUsage { amount_msat: 750, ..usage };
2361 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 602);
2363 scorer.payment_path_failed(&path, 43, Duration::ZERO);
2365 let usage = ChannelUsage { amount_msat: 250, ..usage };
2366 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 0);
2367 let usage = ChannelUsage { amount_msat: 500, ..usage };
2368 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 0);
2369 let usage = ChannelUsage { amount_msat: 750, ..usage };
2370 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 300);
2374 fn sets_liquidity_upper_bound_on_failure() {
2375 let logger = TestLogger::new();
2376 let network_graph = network_graph(&logger);
2377 let params = ProbabilisticScoringFeeParameters {
2378 liquidity_penalty_multiplier_msat: 1_000,
2379 considered_impossible_penalty_msat: u64::max_value(),
2380 ..ProbabilisticScoringFeeParameters::zero_penalty()
2382 let mut scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
2383 let source = source_node_id();
2384 let path = payment_path_for_amount(500);
2386 let usage = ChannelUsage {
2388 inflight_htlc_msat: 0,
2389 effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: 1_000 },
2391 let channel = network_graph.read_only().channel(42).unwrap().to_owned();
2392 let (info, _) = channel.as_directed_from(&source).unwrap();
2393 let candidate = CandidateRouteHop::PublicHop {
2395 short_channel_id: 42,
2397 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 128);
2398 let usage = ChannelUsage { amount_msat: 500, ..usage };
2399 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 301);
2400 let usage = ChannelUsage { amount_msat: 750, ..usage };
2401 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 602);
2403 scorer.payment_path_failed(&path, 42, Duration::ZERO);
2405 let usage = ChannelUsage { amount_msat: 250, ..usage };
2406 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 300);
2407 let usage = ChannelUsage { amount_msat: 500, ..usage };
2408 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), u64::max_value());
2409 let usage = ChannelUsage { amount_msat: 750, ..usage };
2410 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), u64::max_value());
2414 fn ignores_channels_after_removed_failed_channel() {
2415 // Previously, if we'd tried to send over a channel which was removed from the network
2416 // graph before we call `payment_path_failed` (which is the default if the we get a "no
2417 // such channel" error in the `InvoicePayer`), we would call `failed_downstream` on all
2418 // channels in the route, even ones which they payment never reached. This tests to ensure
2419 // we do not score such channels.
2420 let secp_ctx = Secp256k1::new();
2421 let logger = TestLogger::new();
2422 let mut network_graph = NetworkGraph::new(Network::Testnet, &logger);
2423 let secret_a = SecretKey::from_slice(&[42; 32]).unwrap();
2424 let secret_b = SecretKey::from_slice(&[43; 32]).unwrap();
2425 let secret_c = SecretKey::from_slice(&[44; 32]).unwrap();
2426 let secret_d = SecretKey::from_slice(&[45; 32]).unwrap();
2427 add_channel(&mut network_graph, 42, secret_a, secret_b);
2428 // Don't add the channel from B -> C.
2429 add_channel(&mut network_graph, 44, secret_c, secret_d);
2431 let pub_a = PublicKey::from_secret_key(&secp_ctx, &secret_a);
2432 let pub_b = PublicKey::from_secret_key(&secp_ctx, &secret_b);
2433 let pub_c = PublicKey::from_secret_key(&secp_ctx, &secret_c);
2434 let pub_d = PublicKey::from_secret_key(&secp_ctx, &secret_d);
2437 path_hop(pub_b, 42, 1),
2438 path_hop(pub_c, 43, 2),
2439 path_hop(pub_d, 44, 100),
2442 let node_a = NodeId::from_pubkey(&pub_a);
2443 let node_b = NodeId::from_pubkey(&pub_b);
2444 let node_c = NodeId::from_pubkey(&pub_c);
2446 let params = ProbabilisticScoringFeeParameters {
2447 liquidity_penalty_multiplier_msat: 1_000,
2448 ..ProbabilisticScoringFeeParameters::zero_penalty()
2450 let mut scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
2452 let usage = ChannelUsage {
2454 inflight_htlc_msat: 0,
2455 effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: 1_000 },
2457 let channel = network_graph.read_only().channel(42).unwrap().to_owned();
2458 let (info, _) = channel.as_directed_from(&node_a).unwrap();
2459 let candidate = CandidateRouteHop::PublicHop {
2461 short_channel_id: 42,
2463 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 128);
2464 // Note that a default liquidity bound is used for B -> C as no channel exists
2465 let channel = network_graph.read_only().channel(42).unwrap().to_owned();
2466 let (info, _) = channel.as_directed_from(&node_b).unwrap();
2467 let candidate = CandidateRouteHop::PublicHop {
2469 short_channel_id: 43,
2471 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 128);
2472 let channel = network_graph.read_only().channel(44).unwrap().to_owned();
2473 let (info, _) = channel.as_directed_from(&node_c).unwrap();
2474 let candidate = CandidateRouteHop::PublicHop {
2476 short_channel_id: 44,
2478 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 128);
2480 scorer.payment_path_failed(&Path { hops: path, blinded_tail: None }, 43, Duration::ZERO);
2482 let channel = network_graph.read_only().channel(42).unwrap().to_owned();
2483 let (info, _) = channel.as_directed_from(&node_a).unwrap();
2484 let candidate = CandidateRouteHop::PublicHop {
2486 short_channel_id: 42,
2488 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 80);
2489 // Note that a default liquidity bound is used for B -> C as no channel exists
2490 let channel = network_graph.read_only().channel(42).unwrap().to_owned();
2491 let (info, _) = channel.as_directed_from(&node_b).unwrap();
2492 let candidate = CandidateRouteHop::PublicHop {
2494 short_channel_id: 43,
2496 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 128);
2497 let channel = network_graph.read_only().channel(44).unwrap().to_owned();
2498 let (info, _) = channel.as_directed_from(&node_c).unwrap();
2499 let candidate = CandidateRouteHop::PublicHop {
2501 short_channel_id: 44,
2503 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 128);
2507 fn reduces_liquidity_upper_bound_along_path_on_success() {
2508 let logger = TestLogger::new();
2509 let network_graph = network_graph(&logger);
2510 let params = ProbabilisticScoringFeeParameters {
2511 liquidity_penalty_multiplier_msat: 1_000,
2512 ..ProbabilisticScoringFeeParameters::zero_penalty()
2514 let mut scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
2515 let source = source_node_id();
2516 let usage = ChannelUsage {
2518 inflight_htlc_msat: 0,
2519 effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: 1_000 },
2521 let network_graph = network_graph.read_only().channels().clone();
2522 let channel_42 = network_graph.get(&42).unwrap();
2523 let channel_43 = network_graph.get(&43).unwrap();
2524 let (info, _) = channel_42.as_directed_from(&source).unwrap();
2525 let candidate_41 = CandidateRouteHop::PublicHop {
2527 short_channel_id: 41,
2529 let (info, target) = channel_42.as_directed_from(&source).unwrap();
2530 let candidate_42 = CandidateRouteHop::PublicHop {
2532 short_channel_id: 42,
2534 let (info, _) = channel_43.as_directed_from(&target).unwrap();
2535 let candidate_43 = CandidateRouteHop::PublicHop {
2537 short_channel_id: 43,
2539 assert_eq!(scorer.channel_penalty_msat(&candidate_41, usage, ¶ms), 128);
2540 assert_eq!(scorer.channel_penalty_msat(&candidate_42, usage, ¶ms), 128);
2541 assert_eq!(scorer.channel_penalty_msat(&candidate_43, usage, ¶ms), 128);
2543 scorer.payment_path_successful(&payment_path_for_amount(500), Duration::ZERO);
2545 assert_eq!(scorer.channel_penalty_msat(&candidate_41, usage, ¶ms), 128);
2546 assert_eq!(scorer.channel_penalty_msat(&candidate_42, usage, ¶ms), 300);
2547 assert_eq!(scorer.channel_penalty_msat(&candidate_43, usage, ¶ms), 300);
2551 fn decays_liquidity_bounds_over_time() {
2552 let logger = TestLogger::new();
2553 let network_graph = network_graph(&logger);
2554 let params = ProbabilisticScoringFeeParameters {
2555 liquidity_penalty_multiplier_msat: 1_000,
2556 considered_impossible_penalty_msat: u64::max_value(),
2557 ..ProbabilisticScoringFeeParameters::zero_penalty()
2559 let decay_params = ProbabilisticScoringDecayParameters {
2560 liquidity_offset_half_life: Duration::from_secs(10),
2561 ..ProbabilisticScoringDecayParameters::zero_penalty()
2563 let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger);
2564 let source = source_node_id();
2566 let usage = ChannelUsage {
2568 inflight_htlc_msat: 0,
2569 effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_024 },
2571 let channel = network_graph.read_only().channel(42).unwrap().to_owned();
2572 let (info, _) = channel.as_directed_from(&source).unwrap();
2573 let candidate = CandidateRouteHop::PublicHop {
2575 short_channel_id: 42,
2577 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 0);
2578 let usage = ChannelUsage { amount_msat: 1_023, ..usage };
2579 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 2_000);
2581 scorer.payment_path_failed(&payment_path_for_amount(768), 42, Duration::ZERO);
2582 scorer.payment_path_failed(&payment_path_for_amount(128), 43, Duration::ZERO);
2584 // Initial penalties
2585 let usage = ChannelUsage { amount_msat: 128, ..usage };
2586 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 0);
2587 let usage = ChannelUsage { amount_msat: 256, ..usage };
2588 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 93);
2589 let usage = ChannelUsage { amount_msat: 768, ..usage };
2590 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 1_479);
2591 let usage = ChannelUsage { amount_msat: 896, ..usage };
2592 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), u64::max_value());
2594 // Half decay (i.e., three-quarter life)
2595 scorer.decay_liquidity_certainty(Duration::from_secs(5));
2596 let usage = ChannelUsage { amount_msat: 128, ..usage };
2597 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 22);
2598 let usage = ChannelUsage { amount_msat: 256, ..usage };
2599 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 106);
2600 let usage = ChannelUsage { amount_msat: 768, ..usage };
2601 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 921);
2602 let usage = ChannelUsage { amount_msat: 896, ..usage };
2603 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), u64::max_value());
2605 // One decay (i.e., half life)
2606 scorer.decay_liquidity_certainty(Duration::from_secs(10));
2607 let usage = ChannelUsage { amount_msat: 64, ..usage };
2608 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 0);
2609 let usage = ChannelUsage { amount_msat: 128, ..usage };
2610 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 34);
2611 let usage = ChannelUsage { amount_msat: 896, ..usage };
2612 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 1_970);
2613 let usage = ChannelUsage { amount_msat: 960, ..usage };
2614 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), u64::max_value());
2616 // Fully decay liquidity lower bound.
2617 scorer.decay_liquidity_certainty(Duration::from_secs(10 * 8));
2618 let usage = ChannelUsage { amount_msat: 0, ..usage };
2619 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 0);
2620 let usage = ChannelUsage { amount_msat: 1, ..usage };
2621 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 0);
2622 let usage = ChannelUsage { amount_msat: 1_023, ..usage };
2623 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 2_000);
2624 let usage = ChannelUsage { amount_msat: 1_024, ..usage };
2625 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), u64::max_value());
2627 // Fully decay liquidity upper bound.
2628 scorer.decay_liquidity_certainty(Duration::from_secs(10 * 9));
2629 let usage = ChannelUsage { amount_msat: 0, ..usage };
2630 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 0);
2631 let usage = ChannelUsage { amount_msat: 1_024, ..usage };
2632 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), u64::max_value());
2634 scorer.decay_liquidity_certainty(Duration::from_secs(10 * 10));
2635 let usage = ChannelUsage { amount_msat: 0, ..usage };
2636 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 0);
2637 let usage = ChannelUsage { amount_msat: 1_024, ..usage };
2638 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), u64::max_value());
2642 fn restricts_liquidity_bounds_after_decay() {
2643 let logger = TestLogger::new();
2644 let network_graph = network_graph(&logger);
2645 let params = ProbabilisticScoringFeeParameters {
2646 liquidity_penalty_multiplier_msat: 1_000,
2647 ..ProbabilisticScoringFeeParameters::zero_penalty()
2649 let decay_params = ProbabilisticScoringDecayParameters {
2650 liquidity_offset_half_life: Duration::from_secs(10),
2651 ..ProbabilisticScoringDecayParameters::default()
2653 let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger);
2654 let source = source_node_id();
2655 let usage = ChannelUsage {
2657 inflight_htlc_msat: 0,
2658 effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_000 },
2660 let channel = network_graph.read_only().channel(42).unwrap().to_owned();
2661 let (info, _) = channel.as_directed_from(&source).unwrap();
2662 let candidate = CandidateRouteHop::PublicHop {
2664 short_channel_id: 42,
2667 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 300);
2669 // More knowledge gives higher confidence (256, 768), meaning a lower penalty.
2670 scorer.payment_path_failed(&payment_path_for_amount(768), 42, Duration::ZERO);
2671 scorer.payment_path_failed(&payment_path_for_amount(256), 43, Duration::ZERO);
2672 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 281);
2674 // Decaying knowledge gives less confidence (128, 896), meaning a higher penalty.
2675 scorer.decay_liquidity_certainty(Duration::from_secs(10));
2676 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 291);
2678 // Reducing the upper bound gives more confidence (128, 832) that the payment amount (512)
2679 // is closer to the upper bound, meaning a higher penalty.
2680 scorer.payment_path_successful(&payment_path_for_amount(64), Duration::from_secs(10));
2681 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 331);
2683 // Increasing the lower bound gives more confidence (256, 832) that the payment amount (512)
2684 // is closer to the lower bound, meaning a lower penalty.
2685 scorer.payment_path_failed(&payment_path_for_amount(256), 43, Duration::from_secs(10));
2686 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 245);
2688 // Further decaying affects the lower bound more than the upper bound (128, 928).
2689 scorer.decay_liquidity_certainty(Duration::from_secs(20));
2690 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 280);
2694 fn restores_persisted_liquidity_bounds() {
2695 let logger = TestLogger::new();
2696 let network_graph = network_graph(&logger);
2697 let params = ProbabilisticScoringFeeParameters {
2698 liquidity_penalty_multiplier_msat: 1_000,
2699 considered_impossible_penalty_msat: u64::max_value(),
2700 ..ProbabilisticScoringFeeParameters::zero_penalty()
2702 let decay_params = ProbabilisticScoringDecayParameters {
2703 liquidity_offset_half_life: Duration::from_secs(10),
2704 ..ProbabilisticScoringDecayParameters::default()
2706 let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger);
2707 let source = source_node_id();
2708 let usage = ChannelUsage {
2710 inflight_htlc_msat: 0,
2711 effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: 1_000 },
2714 scorer.payment_path_failed(&payment_path_for_amount(500), 42, Duration::ZERO);
2715 let channel = network_graph.read_only().channel(42).unwrap().to_owned();
2716 let (info, _) = channel.as_directed_from(&source).unwrap();
2717 let candidate = CandidateRouteHop::PublicHop {
2719 short_channel_id: 42,
2721 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), u64::max_value());
2723 scorer.decay_liquidity_certainty(Duration::from_secs(10));
2724 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 473);
2726 scorer.payment_path_failed(&payment_path_for_amount(250), 43, Duration::from_secs(10));
2727 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 300);
2729 let mut serialized_scorer = Vec::new();
2730 scorer.write(&mut serialized_scorer).unwrap();
2732 let mut serialized_scorer = io::Cursor::new(&serialized_scorer);
2733 let deserialized_scorer =
2734 <ProbabilisticScorer<_, _>>::read(&mut serialized_scorer, (decay_params, &network_graph, &logger)).unwrap();
2735 assert_eq!(deserialized_scorer.channel_penalty_msat(&candidate, usage, ¶ms), 300);
2738 fn do_decays_persisted_liquidity_bounds(decay_before_reload: bool) {
2739 let logger = TestLogger::new();
2740 let network_graph = network_graph(&logger);
2741 let params = ProbabilisticScoringFeeParameters {
2742 liquidity_penalty_multiplier_msat: 1_000,
2743 considered_impossible_penalty_msat: u64::max_value(),
2744 ..ProbabilisticScoringFeeParameters::zero_penalty()
2746 let decay_params = ProbabilisticScoringDecayParameters {
2747 liquidity_offset_half_life: Duration::from_secs(10),
2748 ..ProbabilisticScoringDecayParameters::zero_penalty()
2750 let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger);
2751 let source = source_node_id();
2752 let usage = ChannelUsage {
2754 inflight_htlc_msat: 0,
2755 effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: 1_000 },
2758 scorer.payment_path_failed(&payment_path_for_amount(500), 42, Duration::ZERO);
2759 let channel = network_graph.read_only().channel(42).unwrap().to_owned();
2760 let (info, _) = channel.as_directed_from(&source).unwrap();
2761 let candidate = CandidateRouteHop::PublicHop {
2763 short_channel_id: 42,
2765 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), u64::max_value());
2767 if decay_before_reload {
2768 scorer.decay_liquidity_certainty(Duration::from_secs(10));
2771 let mut serialized_scorer = Vec::new();
2772 scorer.write(&mut serialized_scorer).unwrap();
2774 let mut serialized_scorer = io::Cursor::new(&serialized_scorer);
2775 let mut deserialized_scorer =
2776 <ProbabilisticScorer<_, _>>::read(&mut serialized_scorer, (decay_params, &network_graph, &logger)).unwrap();
2777 if !decay_before_reload {
2778 scorer.decay_liquidity_certainty(Duration::from_secs(10));
2779 deserialized_scorer.decay_liquidity_certainty(Duration::from_secs(10));
2781 assert_eq!(deserialized_scorer.channel_penalty_msat(&candidate, usage, ¶ms), 473);
2783 scorer.payment_path_failed(&payment_path_for_amount(250), 43, Duration::from_secs(10));
2784 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 300);
2786 deserialized_scorer.decay_liquidity_certainty(Duration::from_secs(20));
2787 assert_eq!(deserialized_scorer.channel_penalty_msat(&candidate, usage, ¶ms), 370);
2791 fn decays_persisted_liquidity_bounds() {
2792 do_decays_persisted_liquidity_bounds(false);
2793 do_decays_persisted_liquidity_bounds(true);
2797 fn scores_realistic_payments() {
2798 // Shows the scores of "realistic" sends of 100k sats over channels of 1-10m sats (with a
2799 // 50k sat reserve).
2800 let logger = TestLogger::new();
2801 let network_graph = network_graph(&logger);
2802 let params = ProbabilisticScoringFeeParameters::default();
2803 let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
2804 let source = source_node_id();
2806 let usage = ChannelUsage {
2807 amount_msat: 100_000_000,
2808 inflight_htlc_msat: 0,
2809 effective_capacity: EffectiveCapacity::Total { capacity_msat: 950_000_000, htlc_maximum_msat: 1_000 },
2811 let channel = network_graph.read_only().channel(42).unwrap().to_owned();
2812 let (info, _) = channel.as_directed_from(&source).unwrap();
2813 let candidate = CandidateRouteHop::PublicHop {
2815 short_channel_id: 42,
2817 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 11497);
2818 let usage = ChannelUsage {
2819 effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_950_000_000, htlc_maximum_msat: 1_000 }, ..usage
2821 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 7408);
2822 let usage = ChannelUsage {
2823 effective_capacity: EffectiveCapacity::Total { capacity_msat: 2_950_000_000, htlc_maximum_msat: 1_000 }, ..usage
2825 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 6151);
2826 let usage = ChannelUsage {
2827 effective_capacity: EffectiveCapacity::Total { capacity_msat: 3_950_000_000, htlc_maximum_msat: 1_000 }, ..usage
2829 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 5427);
2830 let usage = ChannelUsage {
2831 effective_capacity: EffectiveCapacity::Total { capacity_msat: 4_950_000_000, htlc_maximum_msat: 1_000 }, ..usage
2833 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 4955);
2834 let usage = ChannelUsage {
2835 effective_capacity: EffectiveCapacity::Total { capacity_msat: 5_950_000_000, htlc_maximum_msat: 1_000 }, ..usage
2837 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 4736);
2838 let usage = ChannelUsage {
2839 effective_capacity: EffectiveCapacity::Total { capacity_msat: 6_950_000_000, htlc_maximum_msat: 1_000 }, ..usage
2841 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 4484);
2842 let usage = ChannelUsage {
2843 effective_capacity: EffectiveCapacity::Total { capacity_msat: 7_450_000_000, htlc_maximum_msat: 1_000 }, ..usage
2845 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 4484);
2846 let usage = ChannelUsage {
2847 effective_capacity: EffectiveCapacity::Total { capacity_msat: 7_950_000_000, htlc_maximum_msat: 1_000 }, ..usage
2849 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 4263);
2850 let usage = ChannelUsage {
2851 effective_capacity: EffectiveCapacity::Total { capacity_msat: 8_950_000_000, htlc_maximum_msat: 1_000 }, ..usage
2853 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 4263);
2854 let usage = ChannelUsage {
2855 effective_capacity: EffectiveCapacity::Total { capacity_msat: 9_950_000_000, htlc_maximum_msat: 1_000 }, ..usage
2857 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 4044);
2861 fn adds_base_penalty_to_liquidity_penalty() {
2862 let logger = TestLogger::new();
2863 let network_graph = network_graph(&logger);
2864 let source = source_node_id();
2865 let usage = ChannelUsage {
2867 inflight_htlc_msat: 0,
2868 effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_000 },
2871 let params = ProbabilisticScoringFeeParameters {
2872 liquidity_penalty_multiplier_msat: 1_000,
2873 ..ProbabilisticScoringFeeParameters::zero_penalty()
2875 let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
2876 let channel = network_graph.read_only().channel(42).unwrap().to_owned();
2877 let (info, _) = channel.as_directed_from(&source).unwrap();
2878 let candidate = CandidateRouteHop::PublicHop {
2880 short_channel_id: 42,
2882 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 58);
2884 let params = ProbabilisticScoringFeeParameters {
2885 base_penalty_msat: 500, liquidity_penalty_multiplier_msat: 1_000,
2886 anti_probing_penalty_msat: 0, ..ProbabilisticScoringFeeParameters::zero_penalty()
2888 let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
2889 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 558);
2891 let params = ProbabilisticScoringFeeParameters {
2892 base_penalty_msat: 500, liquidity_penalty_multiplier_msat: 1_000,
2893 base_penalty_amount_multiplier_msat: (1 << 30),
2894 anti_probing_penalty_msat: 0, ..ProbabilisticScoringFeeParameters::zero_penalty()
2897 let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
2898 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 558 + 128);
2902 fn adds_amount_penalty_to_liquidity_penalty() {
2903 let logger = TestLogger::new();
2904 let network_graph = network_graph(&logger);
2905 let source = source_node_id();
2906 let usage = ChannelUsage {
2907 amount_msat: 512_000,
2908 inflight_htlc_msat: 0,
2909 effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024_000, htlc_maximum_msat: 1_000 },
2912 let params = ProbabilisticScoringFeeParameters {
2913 liquidity_penalty_multiplier_msat: 1_000,
2914 liquidity_penalty_amount_multiplier_msat: 0,
2915 ..ProbabilisticScoringFeeParameters::zero_penalty()
2917 let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
2918 let channel = network_graph.read_only().channel(42).unwrap().to_owned();
2919 let (info, _) = channel.as_directed_from(&source).unwrap();
2920 let candidate = CandidateRouteHop::PublicHop {
2922 short_channel_id: 42,
2924 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 300);
2926 let params = ProbabilisticScoringFeeParameters {
2927 liquidity_penalty_multiplier_msat: 1_000,
2928 liquidity_penalty_amount_multiplier_msat: 256,
2929 ..ProbabilisticScoringFeeParameters::zero_penalty()
2931 let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
2932 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 337);
2936 fn calculates_log10_without_overflowing_u64_max_value() {
2937 let logger = TestLogger::new();
2938 let network_graph = network_graph(&logger);
2939 let source = source_node_id();
2940 let usage = ChannelUsage {
2941 amount_msat: u64::max_value(),
2942 inflight_htlc_msat: 0,
2943 effective_capacity: EffectiveCapacity::Infinite,
2945 let params = ProbabilisticScoringFeeParameters {
2946 liquidity_penalty_multiplier_msat: 40_000,
2947 ..ProbabilisticScoringFeeParameters::zero_penalty()
2949 let decay_params = ProbabilisticScoringDecayParameters::zero_penalty();
2950 let channel = network_graph.read_only().channel(42).unwrap().to_owned();
2951 let (info, _) = channel.as_directed_from(&source).unwrap();
2952 let candidate = CandidateRouteHop::PublicHop {
2954 short_channel_id: 42,
2956 let scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger);
2957 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 80_000);
2961 fn accounts_for_inflight_htlc_usage() {
2962 let logger = TestLogger::new();
2963 let network_graph = network_graph(&logger);
2964 let params = ProbabilisticScoringFeeParameters {
2965 considered_impossible_penalty_msat: u64::max_value(),
2966 ..ProbabilisticScoringFeeParameters::zero_penalty()
2968 let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
2969 let source = source_node_id();
2971 let usage = ChannelUsage {
2973 inflight_htlc_msat: 0,
2974 effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: 1_000 },
2976 let network_graph = network_graph.read_only();
2977 let channel = network_graph.channel(42).unwrap();
2978 let (info, _) = channel.as_directed_from(&source).unwrap();
2979 let candidate = CandidateRouteHop::PublicHop {
2981 short_channel_id: 42,
2983 assert_ne!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), u64::max_value());
2985 let usage = ChannelUsage { inflight_htlc_msat: 251, ..usage };
2986 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), u64::max_value());
2990 fn removes_uncertainity_when_exact_liquidity_known() {
2991 let logger = TestLogger::new();
2992 let network_graph = network_graph(&logger);
2993 let params = ProbabilisticScoringFeeParameters::default();
2994 let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
2995 let source = source_node_id();
2997 let base_penalty_msat = params.base_penalty_msat;
2998 let usage = ChannelUsage {
3000 inflight_htlc_msat: 0,
3001 effective_capacity: EffectiveCapacity::ExactLiquidity { liquidity_msat: 1_000 },
3003 let network_graph = network_graph.read_only();
3004 let channel = network_graph.channel(42).unwrap();
3005 let (info, _) = channel.as_directed_from(&source).unwrap();
3006 let candidate = CandidateRouteHop::PublicHop {
3008 short_channel_id: 42,
3010 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), base_penalty_msat);
3012 let usage = ChannelUsage { amount_msat: 1_000, ..usage };
3013 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), base_penalty_msat);
3015 let usage = ChannelUsage { amount_msat: 1_001, ..usage };
3016 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), u64::max_value());
3020 fn remembers_historical_failures() {
3021 let logger = TestLogger::new();
3022 let network_graph = network_graph(&logger);
3023 let params = ProbabilisticScoringFeeParameters {
3024 historical_liquidity_penalty_multiplier_msat: 1024,
3025 historical_liquidity_penalty_amount_multiplier_msat: 1024,
3026 ..ProbabilisticScoringFeeParameters::zero_penalty()
3028 let decay_params = ProbabilisticScoringDecayParameters {
3029 liquidity_offset_half_life: Duration::from_secs(60 * 60),
3030 historical_no_updates_half_life: Duration::from_secs(10),
3032 let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger);
3033 let source = source_node_id();
3034 let target = target_node_id();
3036 let usage = ChannelUsage {
3038 inflight_htlc_msat: 0,
3039 effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_024 },
3041 let usage_1 = ChannelUsage {
3043 inflight_htlc_msat: 0,
3044 effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_024 },
3048 let network_graph = network_graph.read_only();
3049 let channel = network_graph.channel(42).unwrap();
3050 let (info, _) = channel.as_directed_from(&source).unwrap();
3051 let candidate = CandidateRouteHop::PublicHop {
3053 short_channel_id: 42,
3056 // With no historical data the normal liquidity penalty calculation is used.
3057 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 168);
3059 assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
3061 assert_eq!(scorer.historical_estimated_payment_success_probability(42, &target, 42, ¶ms),
3064 scorer.payment_path_failed(&payment_path_for_amount(1), 42, Duration::ZERO);
3066 let network_graph = network_graph.read_only();
3067 let channel = network_graph.channel(42).unwrap();
3068 let (info, _) = channel.as_directed_from(&source).unwrap();
3069 let candidate = CandidateRouteHop::PublicHop {
3071 short_channel_id: 42,
3074 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 2048);
3075 assert_eq!(scorer.channel_penalty_msat(&candidate, usage_1, ¶ms), 249);
3077 // The "it failed" increment is 32, where the probability should lie several buckets into
3078 // the first octile.
3079 assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
3080 Some(([32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
3081 [0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])));
3082 assert!(scorer.historical_estimated_payment_success_probability(42, &target, 1, ¶ms)
3084 assert_eq!(scorer.historical_estimated_payment_success_probability(42, &target, 500, ¶ms),
3087 // Even after we tell the scorer we definitely have enough available liquidity, it will
3088 // still remember that there was some failure in the past, and assign a non-0 penalty.
3089 scorer.payment_path_failed(&payment_path_for_amount(1000), 43, Duration::ZERO);
3091 let network_graph = network_graph.read_only();
3092 let channel = network_graph.channel(42).unwrap();
3093 let (info, _) = channel.as_directed_from(&source).unwrap();
3094 let candidate = CandidateRouteHop::PublicHop {
3096 short_channel_id: 42,
3099 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 105);
3101 // The first points should be decayed just slightly and the last bucket has a new point.
3102 assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
3103 Some(([31, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0],
3104 [0, 0, 0, 0, 0, 0, 31, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32])));
3106 // The exact success probability is a bit complicated and involves integer rounding, so we
3107 // simply check bounds here.
3108 let five_hundred_prob =
3109 scorer.historical_estimated_payment_success_probability(42, &target, 500, ¶ms).unwrap();
3110 assert!(five_hundred_prob > 0.59);
3111 assert!(five_hundred_prob < 0.60);
3113 scorer.historical_estimated_payment_success_probability(42, &target, 1, ¶ms).unwrap();
3114 assert!(one_prob < 0.85);
3115 assert!(one_prob > 0.84);
3117 // Advance the time forward 16 half-lives (which the docs claim will ensure all data is
3118 // gone), and check that we're back to where we started.
3119 scorer.decay_liquidity_certainty(Duration::from_secs(10 * 16));
3121 let network_graph = network_graph.read_only();
3122 let channel = network_graph.channel(42).unwrap();
3123 let (info, _) = channel.as_directed_from(&source).unwrap();
3124 let candidate = CandidateRouteHop::PublicHop {
3126 short_channel_id: 42,
3129 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 168);
3131 // Once fully decayed we still have data, but its all-0s. In the future we may remove the
3132 // data entirely instead.
3133 assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
3134 Some(([0; 32], [0; 32])));
3135 assert_eq!(scorer.historical_estimated_payment_success_probability(42, &target, 1, ¶ms), None);
3137 let mut usage = ChannelUsage {
3139 inflight_htlc_msat: 1024,
3140 effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_024 },
3142 scorer.payment_path_failed(&payment_path_for_amount(1), 42, Duration::from_secs(10 * 16));
3144 let network_graph = network_graph.read_only();
3145 let channel = network_graph.channel(42).unwrap();
3146 let (info, _) = channel.as_directed_from(&source).unwrap();
3147 let candidate = CandidateRouteHop::PublicHop {
3149 short_channel_id: 42,
3152 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 2050);
3154 let usage = ChannelUsage {
3156 inflight_htlc_msat: 0,
3157 effective_capacity: EffectiveCapacity::AdvertisedMaxHTLC { amount_msat: 0 },
3159 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 2048);
3162 // Advance to decay all liquidity offsets to zero.
3163 scorer.decay_liquidity_certainty(Duration::from_secs(10 * (16 + 60 * 60)));
3165 // Once even the bounds have decayed information about the channel should be removed
3167 assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
3170 // Use a path in the opposite direction, which have zero for htlc_maximum_msat. This will
3171 // ensure that the effective capacity is zero to test division-by-zero edge cases.
3173 path_hop(target_pubkey(), 43, 2),
3174 path_hop(source_pubkey(), 42, 1),
3175 path_hop(sender_pubkey(), 41, 0),
3177 scorer.payment_path_failed(&Path { hops: path, blinded_tail: None }, 42, Duration::from_secs(10 * (16 + 60 * 60)));
3181 fn adds_anti_probing_penalty() {
3182 let logger = TestLogger::new();
3183 let network_graph = network_graph(&logger);
3184 let source = source_node_id();
3185 let params = ProbabilisticScoringFeeParameters {
3186 anti_probing_penalty_msat: 500,
3187 ..ProbabilisticScoringFeeParameters::zero_penalty()
3189 let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
3191 // Check we receive no penalty for a low htlc_maximum_msat.
3192 let usage = ChannelUsage {
3193 amount_msat: 512_000,
3194 inflight_htlc_msat: 0,
3195 effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024_000, htlc_maximum_msat: 1_000 },
3197 let network_graph = network_graph.read_only();
3198 let channel = network_graph.channel(42).unwrap();
3199 let (info, _) = channel.as_directed_from(&source).unwrap();
3200 let candidate = CandidateRouteHop::PublicHop {
3202 short_channel_id: 42,
3204 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 0);
3206 // Check we receive anti-probing penalty for htlc_maximum_msat == channel_capacity.
3207 let usage = ChannelUsage {
3208 amount_msat: 512_000,
3209 inflight_htlc_msat: 0,
3210 effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024_000, htlc_maximum_msat: 1_024_000 },
3212 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 500);
3214 // Check we receive anti-probing penalty for htlc_maximum_msat == channel_capacity/2.
3215 let usage = ChannelUsage {
3216 amount_msat: 512_000,
3217 inflight_htlc_msat: 0,
3218 effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024_000, htlc_maximum_msat: 512_000 },
3220 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 500);
3222 // Check we receive no anti-probing penalty for htlc_maximum_msat == channel_capacity/2 - 1.
3223 let usage = ChannelUsage {
3224 amount_msat: 512_000,
3225 inflight_htlc_msat: 0,
3226 effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024_000, htlc_maximum_msat: 511_999 },
3228 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 0);
3232 fn scores_with_blinded_path() {
3233 // Make sure we'll account for a blinded path's final_value_msat in scoring
3234 let logger = TestLogger::new();
3235 let network_graph = network_graph(&logger);
3236 let params = ProbabilisticScoringFeeParameters {
3237 liquidity_penalty_multiplier_msat: 1_000,
3238 ..ProbabilisticScoringFeeParameters::zero_penalty()
3240 let decay_params = ProbabilisticScoringDecayParameters::default();
3241 let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger);
3242 let source = source_node_id();
3243 let usage = ChannelUsage {
3245 inflight_htlc_msat: 0,
3246 effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_000 },
3248 let channel = network_graph.read_only().channel(42).unwrap().to_owned();
3249 let (info, target) = channel.as_directed_from(&source).unwrap();
3250 let candidate = CandidateRouteHop::PublicHop {
3252 short_channel_id: 42,
3254 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 300);
3256 let mut path = payment_path_for_amount(768);
3257 let recipient_hop = path.hops.pop().unwrap();
3258 let blinded_path = BlindedPath {
3259 introduction_node_id: path.hops.last().as_ref().unwrap().pubkey,
3260 blinding_point: test_utils::pubkey(42),
3262 BlindedHop { blinded_node_id: test_utils::pubkey(44), encrypted_payload: Vec::new() }
3265 path.blinded_tail = Some(BlindedTail {
3266 hops: blinded_path.blinded_hops,
3267 blinding_point: blinded_path.blinding_point,
3268 excess_final_cltv_expiry_delta: recipient_hop.cltv_expiry_delta,
3269 final_value_msat: recipient_hop.fee_msat,
3272 // Check the liquidity before and after scoring payment failures to ensure the blinded path's
3273 // final value is taken into account.
3274 assert!(scorer.channel_liquidities.get(&42).is_none());
3276 scorer.payment_path_failed(&path, 42, Duration::ZERO);
3277 path.blinded_tail.as_mut().unwrap().final_value_msat = 256;
3278 scorer.payment_path_failed(&path, 43, Duration::ZERO);
3280 let liquidity = scorer.channel_liquidities.get(&42).unwrap()
3281 .as_directed(&source, &target, 1_000);
3282 assert_eq!(liquidity.min_liquidity_msat(), 256);
3283 assert_eq!(liquidity.max_liquidity_msat(), 768);
3287 fn realistic_historical_failures() {
3288 // The motivation for the unequal sized buckets came largely from attempting to pay 10k
3289 // sats over a one bitcoin channel. This tests that case explicitly, ensuring that we score
3291 let logger = TestLogger::new();
3292 let mut network_graph = network_graph(&logger);
3293 let params = ProbabilisticScoringFeeParameters {
3294 historical_liquidity_penalty_multiplier_msat: 1024,
3295 historical_liquidity_penalty_amount_multiplier_msat: 1024,
3296 ..ProbabilisticScoringFeeParameters::zero_penalty()
3298 let decay_params = ProbabilisticScoringDecayParameters {
3299 liquidity_offset_half_life: Duration::from_secs(60 * 60),
3300 historical_no_updates_half_life: Duration::from_secs(10),
3301 ..ProbabilisticScoringDecayParameters::default()
3304 let capacity_msat = 100_000_000_000;
3305 update_channel(&mut network_graph, 42, source_privkey(), 0, capacity_msat, 200);
3306 update_channel(&mut network_graph, 42, target_privkey(), 1, capacity_msat, 200);
3308 let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger);
3309 let source = source_node_id();
3311 let mut amount_msat = 10_000_000;
3312 let usage = ChannelUsage {
3314 inflight_htlc_msat: 0,
3315 effective_capacity: EffectiveCapacity::Total { capacity_msat, htlc_maximum_msat: capacity_msat },
3317 let channel = network_graph.read_only().channel(42).unwrap().to_owned();
3318 let (info, target) = channel.as_directed_from(&source).unwrap();
3319 let candidate = CandidateRouteHop::PublicHop {
3321 short_channel_id: 42,
3323 // With no historical data the normal liquidity penalty calculation is used, which results
3324 // in a success probability of ~75%.
3325 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 1269);
3326 assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
3328 assert_eq!(scorer.historical_estimated_payment_success_probability(42, &target, 42, ¶ms),
3331 // Fail to pay once, and then check the buckets and penalty.
3332 scorer.payment_path_failed(&payment_path_for_amount(amount_msat), 42, Duration::ZERO);
3333 // The penalty should be the maximum penalty, as the payment we're scoring is now in the
3334 // same bucket which is the only maximum datapoint.
3335 assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms),
3336 2048 + 2048 * amount_msat / super::AMOUNT_PENALTY_DIVISOR);
3337 // The "it failed" increment is 32, which we should apply to the first upper-bound (between
3338 // 6k sats and 12k sats).
3339 assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
3340 Some(([32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
3341 [0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])));
3342 // The success probability estimate itself should be zero.
3343 assert_eq!(scorer.historical_estimated_payment_success_probability(42, &target, amount_msat, ¶ms),
3346 // Now test again with the amount in the bottom bucket.
3348 // The new amount is entirely within the only minimum bucket with score, so the probability
3349 // we assign is 1/2.
3350 assert_eq!(scorer.historical_estimated_payment_success_probability(42, &target, amount_msat, ¶ms),
3353 // ...but once we see a failure, we consider the payment to be substantially less likely,
3354 // even though not a probability of zero as we still look at the second max bucket which
3356 scorer.payment_path_failed(&payment_path_for_amount(amount_msat), 42, Duration::ZERO);
3357 assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
3358 Some(([63, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
3359 [32, 31, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])));
3360 assert_eq!(scorer.historical_estimated_payment_success_probability(42, &target, amount_msat, ¶ms),
3368 use criterion::Criterion;
3369 use crate::routing::router::{bench_utils, RouteHop};
3370 use crate::util::test_utils::TestLogger;
3371 use crate::ln::features::{ChannelFeatures, NodeFeatures};
3373 pub fn decay_100k_channel_bounds(bench: &mut Criterion) {
3374 let logger = TestLogger::new();
3375 let network_graph = bench_utils::read_network_graph(&logger).unwrap();
3376 let mut scorer = ProbabilisticScorer::new(Default::default(), &network_graph, &logger);
3377 // Score a number of random channels
3378 let mut seed: u64 = 0xdeadbeef;
3379 for _ in 0..100_000 {
3380 seed = seed.overflowing_mul(6364136223846793005).0.overflowing_add(1).0;
3381 let (victim, victim_dst, amt) = {
3382 let rong = network_graph.read_only();
3383 let channels = rong.channels();
3384 let chan = channels.unordered_iter()
3385 .skip((seed as usize) % channels.len())
3387 seed = seed.overflowing_mul(6364136223846793005).0.overflowing_add(1).0;
3388 let amt = seed % chan.1.capacity_sats.map(|c| c * 1000)
3389 .or(chan.1.one_to_two.as_ref().map(|info| info.htlc_maximum_msat))
3390 .or(chan.1.two_to_one.as_ref().map(|info| info.htlc_maximum_msat))
3391 .unwrap_or(1_000_000_000).saturating_add(1);
3392 (*chan.0, chan.1.node_two, amt)
3395 hops: vec![RouteHop {
3396 pubkey: victim_dst.as_pubkey().unwrap(),
3397 node_features: NodeFeatures::empty(),
3398 short_channel_id: victim,
3399 channel_features: ChannelFeatures::empty(),
3401 cltv_expiry_delta: 42,
3402 maybe_announced_channel: true,
3406 seed = seed.overflowing_mul(6364136223846793005).0.overflowing_add(1).0;
3408 scorer.probe_failed(&path, victim, Duration::ZERO);
3410 scorer.probe_successful(&path, Duration::ZERO);
3413 let mut cur_time = Duration::ZERO;
3414 cur_time += Duration::from_millis(1);
3415 scorer.decay_liquidity_certainty(cur_time);
3416 bench.bench_function("decay_100k_channel_bounds", |b| b.iter(|| {
3417 cur_time += Duration::from_millis(1);
3418 scorer.decay_liquidity_certainty(cur_time);