X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Frouting%2Fscoring.rs;h=8b475ecc0d31fe43a35a17b9252448a843da753b;hb=6852ea974178c60211a37d4558c14678889a2932;hp=3ca4d13f2de7c55fc2e5b5b85142a20892870ffd;hpb=29e34c8a10cf813116c9251188a86978cc97e259;p=rust-lightning diff --git a/lightning/src/routing/scoring.rs b/lightning/src/routing/scoring.rs index 3ca4d13f..8b475ecc 100644 --- a/lightning/src/routing/scoring.rs +++ b/lightning/src/routing/scoring.rs @@ -64,6 +64,7 @@ use util::time::Time; use prelude::*; use core::fmt; use core::cell::{RefCell, RefMut}; +use core::convert::TryInto; use core::ops::{Deref, DerefMut}; use core::time::Duration; use io::{self, Read}; @@ -324,6 +325,9 @@ where L::Target: Logger { /// /// Used to configure base, liquidity, and amount penalties, the sum of which comprises the channel /// penalty (i.e., the amount in msats willing to be paid to avoid routing through the channel). +/// +/// The penalty applied to any channel by the [`ProbabilisticScorer`] is the sum of each of the +/// parameters here. #[derive(Clone)] pub struct ProbabilisticScoringParameters { /// A fixed penalty in msats to apply to each channel. @@ -331,8 +335,23 @@ pub struct ProbabilisticScoringParameters { /// Default value: 500 msat pub base_penalty_msat: u64, + /// A multiplier used with the payment amount to calculate a fixed penalty applied to each + /// channel, in excess of the [`base_penalty_msat`]. + /// + /// The purpose of the amount penalty is to avoid having fees dominate the channel cost (i.e., + /// fees plus penalty) for large payments. The penalty is computed as the product of this + /// multiplier and `2^30`ths of the payment amount. + /// + /// ie `base_penalty_amount_multiplier_msat * amount_msat / 2^30` + /// + /// Default value: 8,192 msat + /// + /// [`base_penalty_msat`]: Self::base_penalty_msat + pub base_penalty_amount_multiplier_msat: u64, + /// A multiplier used in conjunction with the negative `log10` of the channel's success - /// probability for a payment to determine the liquidity penalty. + /// probability for a payment, as determined by our latest estimates of the channel's + /// liquidity, to determine the liquidity penalty. /// /// The penalty is based in part on the knowledge learned from prior successful and unsuccessful /// payments. This knowledge is decayed over time based on [`liquidity_offset_half_life`]. The @@ -341,7 +360,7 @@ pub struct ProbabilisticScoringParameters { /// uncertainty bounds of the channel liquidity balance. Amounts above the upper bound will /// result in a `u64::max_value` penalty, however. /// - /// Default value: 40,000 msat + /// Default value: 30,000 msat /// /// [`liquidity_offset_half_life`]: Self::liquidity_offset_half_life pub liquidity_penalty_multiplier_msat: u64, @@ -362,14 +381,15 @@ pub struct ProbabilisticScoringParameters { pub liquidity_offset_half_life: Duration, /// A multiplier used in conjunction with a payment amount and the negative `log10` of the - /// channel's success probability for the payment to determine the amount penalty. + /// channel's success probability for the payment, as determined by our latest estimates of the + /// channel's liquidity, to determine the amount penalty. /// /// The purpose of the amount penalty is to avoid having fees dominate the channel cost (i.e., /// fees plus penalty) for large payments. The penalty is computed as the product of this /// multiplier and `2^20`ths of the payment amount, weighted by the negative `log10` of the /// success probability. /// - /// `-log10(success_probability) * amount_penalty_multiplier_msat * amount_msat / 2^20` + /// `-log10(success_probability) * liquidity_penalty_amount_multiplier_msat * amount_msat / 2^20` /// /// In practice, this means for 0.1 success probability (`-log10(0.1) == 1`) each `2^20`th of /// the amount will result in a penalty of the multiplier. And, as the success probability @@ -377,8 +397,44 @@ pub struct ProbabilisticScoringParameters { /// probabilities, the multiplier will have a decreasing effect as the negative `log10` will /// fall below `1`. /// - /// Default value: 256 msat - pub amount_penalty_multiplier_msat: u64, + /// Default value: 192 msat + pub liquidity_penalty_amount_multiplier_msat: u64, + + /// A multiplier used in conjunction with the negative `log10` of the channel's success + /// probability for the payment, as determined based on the history of our estimates of the + /// channel's available liquidity, to determine a penalty. + /// + /// This penalty is similar to [`liquidity_penalty_multiplier_msat`], however, instead of using + /// only our latest estimate for the current liquidity available in the channel, it estimates + /// success probability based on the estimated liquidity available in the channel through + /// history. Specifically, every time we update our liquidity bounds on a given channel, we + /// track which of several buckets those bounds fall into, exponentially decaying the + /// probability of each bucket as new samples are added. + /// + /// Default value: 10,000 msat + /// + /// [`liquidity_penalty_multiplier_msat`]: Self::liquidity_penalty_multiplier_msat + pub historical_liquidity_penalty_multiplier_msat: u64, + + /// A multiplier used in conjunction with the payment amount and the negative `log10` of the + /// channel's success probability for the payment, as determined based on the history of our + /// estimates of the channel's available liquidity, to determine a penalty. + /// + /// The purpose of the amount penalty is to avoid having fees dominate the channel cost for + /// large payments. The penalty is computed as the product of this multiplier and the `2^20`ths + /// of the payment amount, weighted by the negative `log10` of the success probability. + /// + /// This penalty is similar to [`liquidity_penalty_amount_multiplier_msat`], however, instead + /// of using only our latest estimate for the current liquidity available in the channel, it + /// estimates success probability based on the estimated liquidity available in the channel + /// through history. Specifically, every time we update our liquidity bounds on a given + /// channel, we track which of several buckets those bounds fall into, exponentially decaying + /// the probability of each bucket as new samples are added. + /// + /// Default value: 64 msat + /// + /// [`liquidity_penalty_amount_multiplier_msat`]: Self::liquidity_penalty_amount_multiplier_msat + pub historical_liquidity_penalty_amount_multiplier_msat: u64, /// Manual penalties used for the given nodes. Allows to set a particular penalty for a given /// node. Note that a manual penalty of `u64::max_value()` means the node would not ever be @@ -394,8 +450,69 @@ pub struct ProbabilisticScoringParameters { /// /// Default value: 250 msat pub anti_probing_penalty_msat: u64, + + /// This penalty is applied when the amount we're attempting to send over a channel exceeds our + /// current estimate of the channel's available liquidity. + /// + /// Note that in this case all other penalties, including the + /// [`liquidity_penalty_multiplier_msat`] and [`liquidity_penalty_amount_multiplier_msat`]-based + /// penalties, as well as the [`base_penalty_msat`] and the [`anti_probing_penalty_msat`], if + /// applicable, are still included in the overall penalty. + /// + /// If you wish to avoid creating paths with such channels entirely, setting this to a value of + /// `u64::max_value()` will guarantee that. + /// + /// Default value: 1_0000_0000_000 msat (1 Bitcoin) + /// + /// [`liquidity_penalty_multiplier_msat`]: Self::liquidity_penalty_multiplier_msat + /// [`liquidity_penalty_amount_multiplier_msat`]: Self::liquidity_penalty_amount_multiplier_msat + /// [`base_penalty_msat`]: Self::base_penalty_msat + /// [`anti_probing_penalty_msat`]: Self::anti_probing_penalty_msat + pub considered_impossible_penalty_msat: u64, +} + +/// Tracks the historical state of a distribution as a weighted average of how much time was spent +/// in each of 8 buckets. +#[derive(Clone, Copy)] +struct HistoricalBucketRangeTracker { + buckets: [u16; 8], } +impl HistoricalBucketRangeTracker { + fn new() -> Self { Self { buckets: [0; 8] } } + fn track_datapoint(&mut self, bucket_idx: u8) { + // We have 8 leaky buckets for min and max liquidity. Each bucket tracks the amount of time + // we spend in each bucket as a 16-bit fixed-point number with a 5 bit fractional part. + // + // Each time we update our liquidity estimate, we add 32 (1.0 in our fixed-point system) to + // the buckets for the current min and max liquidity offset positions. + // + // We then decay each bucket by multiplying by 2047/2048 (avoiding dividing by a + // non-power-of-two). This ensures we can't actually overflow the u16 - when we get to + // 63,457 adding 32 and decaying by 2047/2048 leaves us back at 63,457. + // + // In total, this allows us to track data for the last 8,000 or so payments across a given + // channel. + // + // These constants are a balance - we try to fit in 2 bytes per bucket to reduce overhead, + // and need to balance having more bits in the decimal part (to ensure decay isn't too + // non-linear) with having too few bits in the mantissa, causing us to not store very many + // datapoints. + // + // The constants were picked experimentally, selecting a decay amount that restricts us + // from overflowing buckets without having to cap them manually. + debug_assert!(bucket_idx < 8); + if bucket_idx < 8 { + for e in self.buckets.iter_mut() { + *e = ((*e as u32) * 2047 / 2048) as u16; + } + self.buckets[bucket_idx as usize] = self.buckets[bucket_idx as usize].saturating_add(32); + } + } +} + +impl_writeable_tlv_based!(HistoricalBucketRangeTracker, { (0, buckets, required) }); + /// Accounting for channel liquidity balance uncertainty. /// /// Direction is defined in terms of [`NodeId`] partial ordering, where the source node is the @@ -410,13 +527,18 @@ struct ChannelLiquidity { /// Time when the liquidity bounds were last modified. last_updated: T, + + min_liquidity_offset_history: HistoricalBucketRangeTracker, + max_liquidity_offset_history: HistoricalBucketRangeTracker, } /// A snapshot of [`ChannelLiquidity`] in one direction assuming a certain channel capacity and /// decayed with a given half life. -struct DirectedChannelLiquidity, T: Time, U: Deref> { +struct DirectedChannelLiquidity, BRT: Deref, T: Time, U: Deref> { min_liquidity_offset_msat: L, max_liquidity_offset_msat: L, + min_liquidity_offset_history: BRT, + max_liquidity_offset_history: BRT, capacity_msat: u64, last_updated: U, now: T, @@ -517,11 +639,15 @@ impl ProbabilisticScoringParameters { fn zero_penalty() -> Self { Self { base_penalty_msat: 0, + base_penalty_amount_multiplier_msat: 0, liquidity_penalty_multiplier_msat: 0, liquidity_offset_half_life: Duration::from_secs(3600), - amount_penalty_multiplier_msat: 0, + liquidity_penalty_amount_multiplier_msat: 0, + historical_liquidity_penalty_multiplier_msat: 0, + historical_liquidity_penalty_amount_multiplier_msat: 0, manual_node_penalties: HashMap::new(), anti_probing_penalty_msat: 0, + considered_impossible_penalty_msat: 0, } } @@ -538,11 +664,15 @@ impl Default for ProbabilisticScoringParameters { fn default() -> Self { Self { base_penalty_msat: 500, - liquidity_penalty_multiplier_msat: 40_000, + base_penalty_amount_multiplier_msat: 8192, + liquidity_penalty_multiplier_msat: 30_000, liquidity_offset_half_life: Duration::from_secs(3600), - amount_penalty_multiplier_msat: 256, + liquidity_penalty_amount_multiplier_msat: 192, + historical_liquidity_penalty_multiplier_msat: 10_000, + historical_liquidity_penalty_amount_multiplier_msat: 64, manual_node_penalties: HashMap::new(), anti_probing_penalty_msat: 250, + considered_impossible_penalty_msat: 1_0000_0000_000, } } } @@ -553,6 +683,8 @@ impl ChannelLiquidity { Self { min_liquidity_offset_msat: 0, max_liquidity_offset_msat: 0, + min_liquidity_offset_history: HistoricalBucketRangeTracker::new(), + max_liquidity_offset_history: HistoricalBucketRangeTracker::new(), last_updated: T::now(), } } @@ -561,16 +693,21 @@ impl ChannelLiquidity { /// `capacity_msat`. fn as_directed( &self, source: &NodeId, target: &NodeId, capacity_msat: u64, half_life: Duration - ) -> DirectedChannelLiquidity<&u64, T, &T> { - let (min_liquidity_offset_msat, max_liquidity_offset_msat) = if source < target { - (&self.min_liquidity_offset_msat, &self.max_liquidity_offset_msat) - } else { - (&self.max_liquidity_offset_msat, &self.min_liquidity_offset_msat) - }; + ) -> DirectedChannelLiquidity<&u64, &HistoricalBucketRangeTracker, T, &T> { + let (min_liquidity_offset_msat, max_liquidity_offset_msat, min_liquidity_offset_history, max_liquidity_offset_history) = + if source < target { + (&self.min_liquidity_offset_msat, &self.max_liquidity_offset_msat, + &self.min_liquidity_offset_history, &self.max_liquidity_offset_history) + } else { + (&self.max_liquidity_offset_msat, &self.min_liquidity_offset_msat, + &self.max_liquidity_offset_history, &self.min_liquidity_offset_history) + }; DirectedChannelLiquidity { min_liquidity_offset_msat, max_liquidity_offset_msat, + min_liquidity_offset_history, + max_liquidity_offset_history, capacity_msat, last_updated: &self.last_updated, now: T::now(), @@ -582,16 +719,21 @@ impl ChannelLiquidity { /// `capacity_msat`. fn as_directed_mut( &mut self, source: &NodeId, target: &NodeId, capacity_msat: u64, half_life: Duration - ) -> DirectedChannelLiquidity<&mut u64, T, &mut T> { - let (min_liquidity_offset_msat, max_liquidity_offset_msat) = if source < target { - (&mut self.min_liquidity_offset_msat, &mut self.max_liquidity_offset_msat) - } else { - (&mut self.max_liquidity_offset_msat, &mut self.min_liquidity_offset_msat) - }; + ) -> DirectedChannelLiquidity<&mut u64, &mut HistoricalBucketRangeTracker, T, &mut T> { + let (min_liquidity_offset_msat, max_liquidity_offset_msat, min_liquidity_offset_history, max_liquidity_offset_history) = + if source < target { + (&mut self.min_liquidity_offset_msat, &mut self.max_liquidity_offset_msat, + &mut self.min_liquidity_offset_history, &mut self.max_liquidity_offset_history) + } else { + (&mut self.max_liquidity_offset_msat, &mut self.min_liquidity_offset_msat, + &mut self.max_liquidity_offset_history, &mut self.min_liquidity_offset_history) + }; DirectedChannelLiquidity { min_liquidity_offset_msat, max_liquidity_offset_msat, + min_liquidity_offset_history, + max_liquidity_offset_history, capacity_msat, last_updated: &mut self.last_updated, now: T::now(), @@ -610,62 +752,130 @@ const PRECISION_LOWER_BOUND_DENOMINATOR: u64 = approx::LOWER_BITS_BOUND; /// The divisor used when computing the amount penalty. const AMOUNT_PENALTY_DIVISOR: u64 = 1 << 20; +const BASE_AMOUNT_PENALTY_DIVISOR: u64 = 1 << 30; -impl, T: Time, U: Deref> DirectedChannelLiquidity { - /// Returns a penalty for routing the given HTLC `amount_msat` through the channel in this - /// direction. +impl, BRT: Deref, T: Time, U: Deref> DirectedChannelLiquidity { + /// Returns a liquidity penalty for routing the given HTLC `amount_msat` through the channel in + /// this direction. fn penalty_msat(&self, amount_msat: u64, params: &ProbabilisticScoringParameters) -> u64 { let max_liquidity_msat = self.max_liquidity_msat(); let min_liquidity_msat = core::cmp::min(self.min_liquidity_msat(), max_liquidity_msat); - if amount_msat <= min_liquidity_msat { + + let mut res = if amount_msat <= min_liquidity_msat { 0 } else if amount_msat >= max_liquidity_msat { - if amount_msat > max_liquidity_msat { - u64::max_value() - } else if max_liquidity_msat != self.capacity_msat { - // Avoid using the failed channel on retry. - u64::max_value() - } else { - // Equivalent to hitting the else clause below with the amount equal to the - // effective capacity and without any certainty on the liquidity upper bound. - let negative_log10_times_2048 = NEGATIVE_LOG10_UPPER_BOUND * 2048; - self.combined_penalty_msat(amount_msat, negative_log10_times_2048, params) - } + // Equivalent to hitting the else clause below with the amount equal to the effective + // capacity and without any certainty on the liquidity upper bound, plus the + // impossibility penalty. + let negative_log10_times_2048 = NEGATIVE_LOG10_UPPER_BOUND * 2048; + Self::combined_penalty_msat(amount_msat, negative_log10_times_2048, + params.liquidity_penalty_multiplier_msat, + params.liquidity_penalty_amount_multiplier_msat) + .saturating_add(params.considered_impossible_penalty_msat) } else { let numerator = (max_liquidity_msat - amount_msat).saturating_add(1); let denominator = (max_liquidity_msat - min_liquidity_msat).saturating_add(1); if amount_msat - min_liquidity_msat < denominator / PRECISION_LOWER_BOUND_DENOMINATOR { // If the failure probability is < 1.5625% (as 1 - numerator/denominator < 1/64), // don't bother trying to use the log approximation as it gets too noisy to be - // particularly helpful, instead just round down to 0 and return the base penalty. - params.base_penalty_msat + // particularly helpful, instead just round down to 0. + 0 } else { let negative_log10_times_2048 = approx::negative_log10_times_2048(numerator, denominator); - self.combined_penalty_msat(amount_msat, negative_log10_times_2048, params) + Self::combined_penalty_msat(amount_msat, negative_log10_times_2048, + params.liquidity_penalty_multiplier_msat, + params.liquidity_penalty_amount_multiplier_msat) + } + }; + + if params.historical_liquidity_penalty_multiplier_msat != 0 || + params.historical_liquidity_penalty_amount_multiplier_msat != 0 { + // If historical penalties are enabled, calculate the penalty by walking the set of + // historical liquidity bucket (min, max) combinations (where min_idx < max_idx) + // and, for each, calculate the probability of success given our payment amount, then + // total the weighted average probability of success. + // + // We use a sliding scale to decide which point within a given bucket will be compared + // to the amount being sent - for lower-bounds, the amount being sent is compared to + // the lower edge of the first bucket (i.e. zero), but compared to the upper 7/8ths of + // the last bucket (i.e. 9 times the index, or 63), with each bucket in between + // increasing the comparison point by 1/64th. For upper-bounds, the same applies, + // however with an offset of 1/64th (i.e. starting at one and ending at 64). This + // avoids failing to assign penalties to channels at the edges. + // + // If we used the bottom edge of buckets, we'd end up never assigning any penalty at + // all to such a channel when sending less than ~0.19% of the channel's capacity (e.g. + // ~200k sats for a 1 BTC channel!). + // + // If we used the middle of each bucket we'd never assign any penalty at all when + // sending less than 1/16th of a channel's capacity, or 1/8th if we used the top of the + // bucket. + let mut total_valid_points_tracked = 0; + for (min_idx, min_bucket) in self.min_liquidity_offset_history.buckets.iter().enumerate() { + for max_bucket in self.max_liquidity_offset_history.buckets.iter().take(8 - min_idx) { + total_valid_points_tracked += (*min_bucket as u64) * (*max_bucket as u64); + } + } + if total_valid_points_tracked == 0 { + // If we don't have any valid points, redo the non-historical calculation with no + // liquidity bounds tracked and the historical penalty multipliers. + let max_capacity = self.capacity_msat.saturating_sub(amount_msat).saturating_add(1); + let negative_log10_times_2048 = + approx::negative_log10_times_2048(max_capacity, self.capacity_msat.saturating_add(1)); + res = res.saturating_add(Self::combined_penalty_msat(amount_msat, negative_log10_times_2048, + params.historical_liquidity_penalty_multiplier_msat, + params.historical_liquidity_penalty_amount_multiplier_msat)); + return res; } + + let payment_amt_64th_bucket = amount_msat * 64 / self.capacity_msat; + debug_assert!(payment_amt_64th_bucket <= 64); + if payment_amt_64th_bucket > 64 { return res; } + + let mut cumulative_success_prob_times_billion = 0; + for (min_idx, min_bucket) in self.min_liquidity_offset_history.buckets.iter().enumerate() { + for (max_idx, max_bucket) in self.max_liquidity_offset_history.buckets.iter().enumerate().take(8 - min_idx) { + let bucket_prob_times_million = (*min_bucket as u64) * (*max_bucket as u64) + * 1024 * 1024 / total_valid_points_tracked; + let min_64th_bucket = min_idx as u64 * 9; + let max_64th_bucket = (7 - max_idx as u64) * 9 + 1; + if payment_amt_64th_bucket > max_64th_bucket { + // Success probability 0, the payment amount is above the max liquidity + } else if payment_amt_64th_bucket <= min_64th_bucket { + cumulative_success_prob_times_billion += bucket_prob_times_million * 1024; + } else { + cumulative_success_prob_times_billion += bucket_prob_times_million * + (max_64th_bucket - payment_amt_64th_bucket) * 1024 / + (max_64th_bucket - min_64th_bucket); + } + } + } + let historical_negative_log10_times_2048 = approx::negative_log10_times_2048(cumulative_success_prob_times_billion + 1, 1024 * 1024 * 1024); + res = res.saturating_add(Self::combined_penalty_msat(amount_msat, + historical_negative_log10_times_2048, params.historical_liquidity_penalty_multiplier_msat, + params.historical_liquidity_penalty_amount_multiplier_msat)); } + + res } - /// Computes the liquidity and amount penalties and adds them to the base penalty. + /// Computes the liquidity penalty from the penalty multipliers. #[inline(always)] - fn combined_penalty_msat( - &self, amount_msat: u64, negative_log10_times_2048: u64, - params: &ProbabilisticScoringParameters + fn combined_penalty_msat(amount_msat: u64, negative_log10_times_2048: u64, + liquidity_penalty_multiplier_msat: u64, liquidity_penalty_amount_multiplier_msat: u64, ) -> u64 { let liquidity_penalty_msat = { // Upper bound the liquidity penalty to ensure some channel is selected. - let multiplier_msat = params.liquidity_penalty_multiplier_msat; + let multiplier_msat = liquidity_penalty_multiplier_msat; let max_penalty_msat = multiplier_msat.saturating_mul(NEGATIVE_LOG10_UPPER_BOUND); (negative_log10_times_2048.saturating_mul(multiplier_msat) / 2048).min(max_penalty_msat) }; let amount_penalty_msat = negative_log10_times_2048 - .saturating_mul(params.amount_penalty_multiplier_msat) + .saturating_mul(liquidity_penalty_amount_multiplier_msat) .saturating_mul(amount_msat) / 2048 / AMOUNT_PENALTY_DIVISOR; - params.base_penalty_msat - .saturating_add(liquidity_penalty_msat) - .saturating_add(amount_penalty_msat) + liquidity_penalty_msat.saturating_add(amount_penalty_msat) } /// Returns the lower bound of the channel liquidity balance in this direction. @@ -688,7 +898,7 @@ impl, T: Time, U: Deref> DirectedChannelLiqui } } -impl, T: Time, U: DerefMut> DirectedChannelLiquidity { +impl, BRT: DerefMut, T: Time, U: DerefMut> DirectedChannelLiquidity { /// Adjusts the channel liquidity balance bounds when failing to route `amount_msat`. fn failed_at_channel(&mut self, amount_msat: u64, chan_descr: fmt::Arguments, logger: &Log) where Log::Target: Logger { if amount_msat < self.max_liquidity_msat() { @@ -716,6 +926,21 @@ impl, T: Time, U: DerefMut> DirectedChanne self.set_max_liquidity_msat(max_liquidity_msat); } + fn update_history_buckets(&mut self) { + debug_assert!(*self.min_liquidity_offset_msat <= self.capacity_msat); + self.min_liquidity_offset_history.track_datapoint( + // Ensure the bucket index we pass is in the range [0, 7], even if the liquidity offset + // is zero or the channel's capacity, though the second should generally never happen. + (self.min_liquidity_offset_msat.saturating_sub(1) * 8 / self.capacity_msat) + .try_into().unwrap_or(32)); // 32 is bogus for 8 buckets, and will be ignored + debug_assert!(*self.max_liquidity_offset_msat <= self.capacity_msat); + self.max_liquidity_offset_history.track_datapoint( + // Ensure the bucket index we pass is in the range [0, 7], even if the liquidity offset + // is zero or the channel's capacity, though the second should generally never happen. + (self.max_liquidity_offset_msat.saturating_sub(1) * 8 / self.capacity_msat) + .try_into().unwrap_or(32)); // 32 is bogus for 8 buckets, and will be ignored + } + /// Adjusts the lower bound of the channel liquidity balance in this direction. fn set_min_liquidity_msat(&mut self, amount_msat: u64) { *self.min_liquidity_offset_msat = amount_msat; @@ -725,6 +950,7 @@ impl, T: Time, U: DerefMut> DirectedChanne self.decayed_offset_msat(*self.max_liquidity_offset_msat) }; *self.last_updated = self.now; + self.update_history_buckets(); } /// Adjusts the upper bound of the channel liquidity balance in this direction. @@ -736,6 +962,7 @@ impl, T: Time, U: DerefMut> DirectedChanne self.decayed_offset_msat(*self.min_liquidity_offset_msat) }; *self.last_updated = self.now; + self.update_history_buckets(); } } @@ -747,13 +974,17 @@ impl>, L: Deref, T: Time> Score for Probabilis return *penalty; } + let base_penalty_msat = self.params.base_penalty_msat.saturating_add( + self.params.base_penalty_amount_multiplier_msat + .saturating_mul(usage.amount_msat) / BASE_AMOUNT_PENALTY_DIVISOR); + let mut anti_probing_penalty_msat = 0; match usage.effective_capacity { EffectiveCapacity::ExactLiquidity { liquidity_msat } => { if usage.amount_msat > liquidity_msat { return u64::max_value(); } else { - return self.params.base_penalty_msat; + return base_penalty_msat; } }, EffectiveCapacity::Total { capacity_msat, htlc_maximum_msat: Some(htlc_maximum_msat) } => { @@ -774,6 +1005,7 @@ impl>, L: Deref, T: Time> Score for Probabilis .as_directed(source, target, capacity_msat, liquidity_offset_half_life) .penalty_msat(amount_msat, &self.params) .saturating_add(anti_probing_penalty_msat) + .saturating_add(base_penalty_msat) } fn payment_path_failed(&mut self, path: &[&RouteHop], short_channel_id: u64) { @@ -1197,7 +1429,9 @@ impl Writeable for ChannelLiquidity { let duration_since_epoch = T::duration_since_epoch() - self.last_updated.elapsed(); write_tlv_fields!(w, { (0, self.min_liquidity_offset_msat, required), + (1, Some(self.min_liquidity_offset_history), option), (2, self.max_liquidity_offset_msat, required), + (3, Some(self.max_liquidity_offset_history), option), (4, duration_since_epoch, required), }); Ok(()) @@ -1209,28 +1443,46 @@ impl Readable for ChannelLiquidity { fn read(r: &mut R) -> Result { let mut min_liquidity_offset_msat = 0; let mut max_liquidity_offset_msat = 0; + let mut min_liquidity_offset_history = Some(HistoricalBucketRangeTracker::new()); + let mut max_liquidity_offset_history = Some(HistoricalBucketRangeTracker::new()); let mut duration_since_epoch = Duration::from_secs(0); read_tlv_fields!(r, { (0, min_liquidity_offset_msat, required), + (1, min_liquidity_offset_history, option), (2, max_liquidity_offset_msat, required), + (3, max_liquidity_offset_history, option), (4, duration_since_epoch, required), }); + // On rust prior to 1.60 `Instant::duration_since` will panic if time goes backwards. + // We write `last_updated` as wallclock time even though its ultimately an `Instant` (which + // is a time from a monotonic clock usually represented as an offset against boot time). + // Thus, we have to construct an `Instant` by subtracting the difference in wallclock time + // from the one that was written. However, because `Instant` can panic if we construct one + // in the future, we must handle wallclock time jumping backwards, which we do by simply + // using `Instant::now()` in that case. + let wall_clock_now = T::duration_since_epoch(); + let now = T::now(); + let last_updated = if wall_clock_now > duration_since_epoch { + now - (wall_clock_now - duration_since_epoch) + } else { now }; Ok(Self { min_liquidity_offset_msat, max_liquidity_offset_msat, - last_updated: T::now() - (T::duration_since_epoch() - duration_since_epoch), + min_liquidity_offset_history: min_liquidity_offset_history.unwrap(), + max_liquidity_offset_history: max_liquidity_offset_history.unwrap(), + last_updated, }) } } #[cfg(test)] mod tests { - use super::{ChannelLiquidity, ProbabilisticScoringParameters, ProbabilisticScorerUsingTime}; + use super::{ChannelLiquidity, HistoricalBucketRangeTracker, ProbabilisticScoringParameters, ProbabilisticScorerUsingTime}; use util::time::Time; use util::time::tests::SinceEpoch; use ln::features::{ChannelFeatures, NodeFeatures}; - use ln::msgs::{ChannelAnnouncement, ChannelUpdate, OptionalField, UnsignedChannelAnnouncement, UnsignedChannelUpdate}; + use ln::msgs::{ChannelAnnouncement, ChannelUpdate, UnsignedChannelAnnouncement, UnsignedChannelUpdate}; use routing::gossip::{EffectiveCapacity, NetworkGraph, NodeId}; use routing::router::RouteHop; use routing::scoring::{ChannelUsage, Score}; @@ -1357,7 +1609,7 @@ mod tests { flags, cltv_expiry_delta: 18, htlc_minimum_msat: 0, - htlc_maximum_msat: OptionalField::Present(1_000), + htlc_maximum_msat: 1_000, fee_base_msat: 1, fee_proportional_millionths: 0, excess_data: Vec::new(), @@ -1408,11 +1660,15 @@ mod tests { let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger) .with_channel(42, ChannelLiquidity { - min_liquidity_offset_msat: 700, max_liquidity_offset_msat: 100, last_updated + min_liquidity_offset_msat: 700, max_liquidity_offset_msat: 100, last_updated, + min_liquidity_offset_history: HistoricalBucketRangeTracker::new(), + max_liquidity_offset_history: HistoricalBucketRangeTracker::new(), }) .with_channel(43, ChannelLiquidity { - min_liquidity_offset_msat: 700, max_liquidity_offset_msat: 100, last_updated + min_liquidity_offset_msat: 700, max_liquidity_offset_msat: 100, last_updated, + min_liquidity_offset_history: HistoricalBucketRangeTracker::new(), + max_liquidity_offset_history: HistoricalBucketRangeTracker::new(), }); let source = source_node_id(); let target = target_node_id(); @@ -1483,7 +1739,9 @@ mod tests { let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger) .with_channel(42, ChannelLiquidity { - min_liquidity_offset_msat: 200, max_liquidity_offset_msat: 400, last_updated + min_liquidity_offset_msat: 200, max_liquidity_offset_msat: 400, last_updated, + min_liquidity_offset_history: HistoricalBucketRangeTracker::new(), + max_liquidity_offset_history: HistoricalBucketRangeTracker::new(), }); let source = source_node_id(); let target = target_node_id(); @@ -1541,7 +1799,9 @@ mod tests { let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger) .with_channel(42, ChannelLiquidity { - min_liquidity_offset_msat: 200, max_liquidity_offset_msat: 400, last_updated + min_liquidity_offset_msat: 200, max_liquidity_offset_msat: 400, last_updated, + min_liquidity_offset_history: HistoricalBucketRangeTracker::new(), + max_liquidity_offset_history: HistoricalBucketRangeTracker::new(), }); let source = source_node_id(); let target = target_node_id(); @@ -1612,7 +1872,7 @@ mod tests { assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0); let usage = ChannelUsage { amount_msat: 102_400, ..usage }; assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 47); - let usage = ChannelUsage { amount_msat: 1_024_000, ..usage }; + let usage = ChannelUsage { amount_msat: 1_023_999, ..usage }; assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 2_000); let usage = ChannelUsage { @@ -1642,12 +1902,15 @@ mod tests { let network_graph = network_graph(&logger); let params = ProbabilisticScoringParameters { liquidity_penalty_multiplier_msat: 1_000, + considered_impossible_penalty_msat: u64::max_value(), ..ProbabilisticScoringParameters::zero_penalty() }; let scorer = ProbabilisticScorer::new(params, &network_graph, &logger) .with_channel(42, ChannelLiquidity { - min_liquidity_offset_msat: 40, max_liquidity_offset_msat: 40, last_updated + min_liquidity_offset_msat: 40, max_liquidity_offset_msat: 40, last_updated, + min_liquidity_offset_history: HistoricalBucketRangeTracker::new(), + max_liquidity_offset_history: HistoricalBucketRangeTracker::new(), }); let source = source_node_id(); let target = target_node_id(); @@ -1733,6 +1996,7 @@ mod tests { let network_graph = network_graph(&logger); let params = ProbabilisticScoringParameters { liquidity_penalty_multiplier_msat: 1_000, + considered_impossible_penalty_msat: u64::max_value(), ..ProbabilisticScoringParameters::zero_penalty() }; let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger); @@ -1799,6 +2063,7 @@ mod tests { let params = ProbabilisticScoringParameters { liquidity_penalty_multiplier_msat: 1_000, liquidity_offset_half_life: Duration::from_secs(10), + considered_impossible_penalty_msat: u64::max_value(), ..ProbabilisticScoringParameters::zero_penalty() }; let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger); @@ -1808,10 +2073,10 @@ mod tests { let usage = ChannelUsage { amount_msat: 0, inflight_htlc_msat: 0, - effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: Some(1_000) }, + effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: Some(1_024) }, }; assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0); - let usage = ChannelUsage { amount_msat: 1_024, ..usage }; + let usage = ChannelUsage { amount_msat: 1_023, ..usage }; assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 2_000); scorer.payment_path_failed(&payment_path_for_amount(768).iter().collect::>(), 42); @@ -1855,20 +2120,20 @@ mod tests { let usage = ChannelUsage { amount_msat: 1_023, ..usage }; assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 2_000); let usage = ChannelUsage { amount_msat: 1_024, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 2_000); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value()); // Fully decay liquidity upper bound. SinceEpoch::advance(Duration::from_secs(10)); let usage = ChannelUsage { amount_msat: 0, ..usage }; assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0); let usage = ChannelUsage { amount_msat: 1_024, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 2_000); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value()); SinceEpoch::advance(Duration::from_secs(10)); let usage = ChannelUsage { amount_msat: 0, ..usage }; assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0); let usage = ChannelUsage { amount_msat: 1_024, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 2_000); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value()); } #[test] @@ -1953,6 +2218,7 @@ mod tests { let params = ProbabilisticScoringParameters { liquidity_penalty_multiplier_msat: 1_000, liquidity_offset_half_life: Duration::from_secs(10), + considered_impossible_penalty_msat: u64::max_value(), ..ProbabilisticScoringParameters::zero_penalty() }; let mut scorer = ProbabilisticScorer::new(params.clone(), &network_graph, &logger); @@ -1989,6 +2255,7 @@ mod tests { let params = ProbabilisticScoringParameters { liquidity_penalty_multiplier_msat: 1_000, liquidity_offset_half_life: Duration::from_secs(10), + considered_impossible_penalty_msat: u64::max_value(), ..ProbabilisticScoringParameters::zero_penalty() }; let mut scorer = ProbabilisticScorer::new(params.clone(), &network_graph, &logger); @@ -2036,47 +2303,47 @@ mod tests { inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Total { capacity_msat: 950_000_000, htlc_maximum_msat: Some(1_000) }, }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 3613); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 4375); let usage = ChannelUsage { effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_950_000_000, htlc_maximum_msat: Some(1_000) }, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1977); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 2739); let usage = ChannelUsage { effective_capacity: EffectiveCapacity::Total { capacity_msat: 2_950_000_000, htlc_maximum_msat: Some(1_000) }, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1474); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 2236); let usage = ChannelUsage { effective_capacity: EffectiveCapacity::Total { capacity_msat: 3_950_000_000, htlc_maximum_msat: Some(1_000) }, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1223); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1983); let usage = ChannelUsage { effective_capacity: EffectiveCapacity::Total { capacity_msat: 4_950_000_000, htlc_maximum_msat: Some(1_000) }, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 877); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1637); let usage = ChannelUsage { effective_capacity: EffectiveCapacity::Total { capacity_msat: 5_950_000_000, htlc_maximum_msat: Some(1_000) }, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 845); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1606); let usage = ChannelUsage { effective_capacity: EffectiveCapacity::Total { capacity_msat: 6_950_000_000, htlc_maximum_msat: Some(1_000) }, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 500); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1331); let usage = ChannelUsage { effective_capacity: EffectiveCapacity::Total { capacity_msat: 7_450_000_000, htlc_maximum_msat: Some(1_000) }, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 500); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1387); let usage = ChannelUsage { effective_capacity: EffectiveCapacity::Total { capacity_msat: 7_950_000_000, htlc_maximum_msat: Some(1_000) }, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 500); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1379); let usage = ChannelUsage { effective_capacity: EffectiveCapacity::Total { capacity_msat: 8_950_000_000, htlc_maximum_msat: Some(1_000) }, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 500); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1363); let usage = ChannelUsage { effective_capacity: EffectiveCapacity::Total { capacity_msat: 9_950_000_000, htlc_maximum_msat: Some(1_000) }, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 500); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1355); } #[test] @@ -2100,10 +2367,19 @@ mod tests { let params = ProbabilisticScoringParameters { base_penalty_msat: 500, liquidity_penalty_multiplier_msat: 1_000, - anti_probing_penalty_msat: 0, ..Default::default() + anti_probing_penalty_msat: 0, ..ProbabilisticScoringParameters::zero_penalty() }; let scorer = ProbabilisticScorer::new(params, &network_graph, &logger); assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 558); + + let params = ProbabilisticScoringParameters { + base_penalty_msat: 500, liquidity_penalty_multiplier_msat: 1_000, + base_penalty_amount_multiplier_msat: (1 << 30), + anti_probing_penalty_msat: 0, ..ProbabilisticScoringParameters::zero_penalty() + }; + + let scorer = ProbabilisticScorer::new(params, &network_graph, &logger); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 558 + 128); } #[test] @@ -2120,7 +2396,7 @@ mod tests { let params = ProbabilisticScoringParameters { liquidity_penalty_multiplier_msat: 1_000, - amount_penalty_multiplier_msat: 0, + liquidity_penalty_amount_multiplier_msat: 0, ..ProbabilisticScoringParameters::zero_penalty() }; let scorer = ProbabilisticScorer::new(params, &network_graph, &logger); @@ -2128,7 +2404,7 @@ mod tests { let params = ProbabilisticScoringParameters { liquidity_penalty_multiplier_msat: 1_000, - amount_penalty_multiplier_msat: 256, + liquidity_penalty_amount_multiplier_msat: 256, ..ProbabilisticScoringParameters::zero_penalty() }; let scorer = ProbabilisticScorer::new(params, &network_graph, &logger); @@ -2159,7 +2435,10 @@ mod tests { fn accounts_for_inflight_htlc_usage() { let logger = TestLogger::new(); let network_graph = network_graph(&logger); - let params = ProbabilisticScoringParameters::default(); + let params = ProbabilisticScoringParameters { + considered_impossible_penalty_msat: u64::max_value(), + ..ProbabilisticScoringParameters::zero_penalty() + }; let scorer = ProbabilisticScorer::new(params, &network_graph, &logger); let source = source_node_id(); let target = target_node_id(); @@ -2199,6 +2478,36 @@ mod tests { assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value()); } + #[test] + fn remembers_historical_failures() { + let logger = TestLogger::new(); + let network_graph = network_graph(&logger); + let params = ProbabilisticScoringParameters { + historical_liquidity_penalty_multiplier_msat: 1024, + historical_liquidity_penalty_amount_multiplier_msat: 1024, + ..ProbabilisticScoringParameters::zero_penalty() + }; + let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger); + let source = source_node_id(); + let target = target_node_id(); + + let usage = ChannelUsage { + amount_msat: 100, + inflight_htlc_msat: 0, + effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: Some(1_024) }, + }; + // With no historical data the normal liquidity penalty calculation is used. + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 47); + + scorer.payment_path_failed(&payment_path_for_amount(1).iter().collect::>(), 42); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 2048); + + // Even after we tell the scorer we definitely have enough available liquidity, it will + // still remember that there was some failure in the past, and assign a non-0 penalty. + scorer.payment_path_failed(&payment_path_for_amount(1000).iter().collect::>(), 43); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 198); + } + #[test] fn adds_anti_probing_penalty() { let logger = TestLogger::new();