X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Frouting%2Fscoring.rs;h=fd88f0b469d6b6d31dbc6efb800c032c5d281042;hb=b0278e279386c2adb4afe5fb1fe6f3135febcb31;hp=d1ed0f7cb6d55b5206ce791ebc6ec1d10ff1aebc;hpb=10cfe5c973e61251f6bf2180d1fc8d57d5e56ca5;p=rust-lightning diff --git a/lightning/src/routing/scoring.rs b/lightning/src/routing/scoring.rs index d1ed0f7c..fd88f0b4 100644 --- a/lightning/src/routing/scoring.rs +++ b/lightning/src/routing/scoring.rs @@ -317,12 +317,44 @@ impl ReadableArgs for FixedPenaltyScorer { } #[cfg(not(feature = "no-std"))] -type ConfiguredTime = std::time::Instant; -#[cfg(feature = "no-std")] -use crate::util::time::Eternity; +/// [`Score`] implementation using channel success probability distributions. +/// +/// Channels are tracked with upper and lower liquidity bounds - when an HTLC fails at a channel, +/// we learn that the upper-bound on the available liquidity is lower than the amount of the HTLC. +/// When a payment is forwarded through a channel (but fails later in the route), we learn the +/// lower-bound on the channel's available liquidity must be at least the value of the HTLC. +/// +/// These bounds are then used to determine a success probability using the formula from +/// *Optimally Reliable & Cheap Payment Flows on the Lightning Network* by Rene Pickhardt +/// and Stefan Richter [[1]] (i.e. `(upper_bound - payment_amount) / (upper_bound - lower_bound)`). +/// +/// This probability is combined with the [`liquidity_penalty_multiplier_msat`] and +/// [`liquidity_penalty_amount_multiplier_msat`] parameters to calculate a concrete penalty in +/// milli-satoshis. The penalties, when added across all hops, have the property of being linear in +/// terms of the entire path's success probability. This allows the router to directly compare +/// penalties for different paths. See the documentation of those parameters for the exact formulas. +/// +/// The liquidity bounds are decayed by halving them every [`liquidity_offset_half_life`]. +/// +/// Further, we track the history of our upper and lower liquidity bounds for each channel, +/// allowing us to assign a second penalty (using [`historical_liquidity_penalty_multiplier_msat`] +/// and [`historical_liquidity_penalty_amount_multiplier_msat`]) based on the same probability +/// formula, but using the history of a channel rather than our latest estimates for the liquidity +/// bounds. +/// +/// # Note +/// +/// Mixing the `no-std` feature between serialization and deserialization results in undefined +/// behavior. +/// +/// [1]: https://arxiv.org/abs/2107.05322 +/// [`liquidity_penalty_multiplier_msat`]: ProbabilisticScoringParameters::liquidity_penalty_multiplier_msat +/// [`liquidity_penalty_amount_multiplier_msat`]: ProbabilisticScoringParameters::liquidity_penalty_amount_multiplier_msat +/// [`liquidity_offset_half_life`]: ProbabilisticScoringParameters::liquidity_offset_half_life +/// [`historical_liquidity_penalty_multiplier_msat`]: ProbabilisticScoringParameters::historical_liquidity_penalty_multiplier_msat +/// [`historical_liquidity_penalty_amount_multiplier_msat`]: ProbabilisticScoringParameters::historical_liquidity_penalty_amount_multiplier_msat +pub type ProbabilisticScorer = ProbabilisticScorerUsingTime::; #[cfg(feature = "no-std")] -type ConfiguredTime = Eternity; - /// [`Score`] implementation using channel success probability distributions. /// /// Channels are tracked with upper and lower liquidity bounds - when an HTLC fails at a channel, @@ -359,7 +391,7 @@ type ConfiguredTime = Eternity; /// [`liquidity_offset_half_life`]: ProbabilisticScoringParameters::liquidity_offset_half_life /// [`historical_liquidity_penalty_multiplier_msat`]: ProbabilisticScoringParameters::historical_liquidity_penalty_multiplier_msat /// [`historical_liquidity_penalty_amount_multiplier_msat`]: ProbabilisticScoringParameters::historical_liquidity_penalty_amount_multiplier_msat -pub type ProbabilisticScorer = ProbabilisticScorerUsingTime::; +pub type ProbabilisticScorer = ProbabilisticScorerUsingTime::; /// Probabilistic [`Score`] implementation. /// @@ -550,7 +582,7 @@ struct HistoricalBucketRangeTracker { impl HistoricalBucketRangeTracker { fn new() -> Self { Self { buckets: [0; 8] } } - fn track_datapoint(&mut self, bucket_idx: u8) { + fn track_datapoint(&mut self, liquidity_offset_msat: u64, capacity_msat: u64) { // We have 8 leaky buckets for min and max liquidity. Each bucket tracks the amount of time // we spend in each bucket as a 16-bit fixed-point number with a 5 bit fractional part. // @@ -571,6 +603,12 @@ impl HistoricalBucketRangeTracker { // // The constants were picked experimentally, selecting a decay amount that restricts us // from overflowing buckets without having to cap them manually. + + // Ensure the bucket index is in the range [0, 7], even if the liquidity offset is zero or + // the channel's capacity, though the second should generally never happen. + debug_assert!(liquidity_offset_msat <= capacity_msat); + let bucket_idx: u8 = (liquidity_offset_msat * 8 / capacity_msat.saturating_add(1)) + .try_into().unwrap_or(32); // 32 is bogus for 8 buckets, and will be ignored debug_assert!(bucket_idx < 8); if bucket_idx < 8 { for e in self.buckets.iter_mut() { @@ -1028,12 +1066,12 @@ impl, BRT: Deref, if params.historical_liquidity_penalty_multiplier_msat != 0 || params.historical_liquidity_penalty_amount_multiplier_msat != 0 { let payment_amt_64th_bucket = if amount_msat < u64::max_value() / 64 { - amount_msat * 64 / self.capacity_msat + amount_msat * 64 / self.capacity_msat.saturating_add(1) } else { // Only use 128-bit arithmetic when multiplication will overflow to avoid 128-bit // division. This branch should only be hit in fuzz testing since the amount would // need to be over 2.88 million BTC in practice. - ((amount_msat as u128) * 64 / (self.capacity_msat as u128)) + ((amount_msat as u128) * 64 / (self.capacity_msat as u128).saturating_add(1)) .try_into().unwrap_or(65) }; #[cfg(not(fuzzing))] @@ -1123,6 +1161,7 @@ impl, BRT: DerefMut, BRT: DerefMut, BRT: DerefMut, BRT: DerefMut, BRT: DerefMut, BRT: DerefMut = None; network_graph.update_channel_from_announcement( &signed_announcement, &chain_source).unwrap(); - update_channel(network_graph, short_channel_id, node_1_key, 0); - update_channel(network_graph, short_channel_id, node_2_key, 1); + update_channel(network_graph, short_channel_id, node_1_key, 0, 1_000); + update_channel(network_graph, short_channel_id, node_2_key, 1, 0); } fn update_channel( network_graph: &mut NetworkGraph<&TestLogger>, short_channel_id: u64, node_key: SecretKey, - flags: u8 + flags: u8, htlc_maximum_msat: u64 ) { let genesis_hash = genesis_block(Network::Testnet).header.block_hash(); let secp_ctx = Secp256k1::new(); @@ -1831,7 +1866,7 @@ mod tests { flags, cltv_expiry_delta: 18, htlc_minimum_msat: 0, - htlc_maximum_msat: 1_000, + htlc_maximum_msat, fee_base_msat: 1, fee_proportional_millionths: 0, excess_data: Vec::new(), @@ -2751,6 +2786,7 @@ mod tests { let logger = TestLogger::new(); let network_graph = network_graph(&logger); let params = ProbabilisticScoringParameters { + liquidity_offset_half_life: Duration::from_secs(60 * 60), historical_liquidity_penalty_multiplier_msat: 1024, historical_liquidity_penalty_amount_multiplier_msat: 1024, historical_no_updates_half_life: Duration::from_secs(10), @@ -2800,7 +2836,26 @@ mod tests { effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_024 }, }; scorer.payment_path_failed(&payment_path_for_amount(1).iter().collect::>(), 42); - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 2048); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 409); + + let usage = ChannelUsage { + amount_msat: 1, + inflight_htlc_msat: 0, + effective_capacity: EffectiveCapacity::MaximumHTLC { amount_msat: 0 }, + }; + assert_eq!(scorer.channel_penalty_msat(42, &target, &source, usage), 2048); + + // Advance to decay all liquidity offsets to zero. + SinceEpoch::advance(Duration::from_secs(60 * 60 * 10)); + + // Use a path in the opposite direction, which have zero for htlc_maximum_msat. This will + // ensure that the effective capacity is zero to test division-by-zero edge cases. + let path = vec![ + path_hop(target_pubkey(), 43, 2), + path_hop(source_pubkey(), 42, 1), + path_hop(sender_pubkey(), 41, 0), + ]; + scorer.payment_path_failed(&path.iter().collect::>(), 42); } #[test]