X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Frouting%2Fscoring.rs;h=9c907c3f7fe4bd38d7526b9d10871769ea537d27;hb=fb670c8faae8c1e990496b869e62dfbde10a64f8;hp=04c405b036dd7c63f5a891815af108bcd885398c;hpb=df52da7b31494c7ec77a705cca4c44bc840f8a95;p=rust-lightning diff --git a/lightning/src/routing/scoring.rs b/lightning/src/routing/scoring.rs index 04c405b0..9c907c3f 100644 --- a/lightning/src/routing/scoring.rs +++ b/lightning/src/routing/scoring.rs @@ -89,7 +89,7 @@ macro_rules! define_score { ($($supertrait: path)*) => { /// `ScoreLookUp` is used to determine the penalty for a given channel. /// /// Scoring is in terms of fees willing to be paid in order to avoid routing through a channel. -pub trait ScoreLookUp $(: $supertrait)* { +pub trait ScoreLookUp { /// A configurable type which should contain various passed-in parameters for configuring the scorer, /// on a per-routefinding-call basis through to the scorer methods, /// which are used to determine the parameters for the suitability of channels for use. @@ -108,7 +108,7 @@ pub trait ScoreLookUp $(: $supertrait)* { } /// `ScoreUpdate` is used to update the scorer's internal state after a payment attempt. -pub trait ScoreUpdate $(: $supertrait)* { +pub trait ScoreUpdate { /// Handles updating channel penalties after failing to route through a channel. fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64); @@ -122,8 +122,20 @@ pub trait ScoreUpdate $(: $supertrait)* { fn probe_successful(&mut self, path: &Path); } -impl, T: Deref $(+ $supertrait)*> ScoreLookUp for T { - type ScoreParams = SP; +/// A trait which can both lookup and update routing channel penalty scores. +/// +/// This is used in places where both bounds are required and implemented for all types which +/// implement [`ScoreLookUp`] and [`ScoreUpdate`]. +/// +/// Bindings users may need to manually implement this for their custom scoring implementations. +pub trait Score : ScoreLookUp + ScoreUpdate $(+ $supertrait)* {} + +#[cfg(not(c_bindings))] +impl Score for T {} + +#[cfg(not(c_bindings))] +impl> ScoreLookUp for T { + type ScoreParams = S::ScoreParams; fn channel_penalty_msat( &self, short_channel_id: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage, score_params: &Self::ScoreParams ) -> u64 { @@ -131,7 +143,8 @@ impl, T: Deref $(+ $supert } } -impl $(+ $supertrait)*> ScoreUpdate for T { +#[cfg(not(c_bindings))] +impl> ScoreUpdate for T { fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64) { self.deref_mut().payment_path_failed(path, short_channel_id) } @@ -192,7 +205,7 @@ pub trait WriteableScore<'a>: LockableScore<'a> + Writeable {} #[cfg(not(c_bindings))] impl<'a, T> WriteableScore<'a> for T where T: LockableScore<'a> + Writeable {} #[cfg(not(c_bindings))] -impl<'a, T: 'a + ScoreLookUp + ScoreUpdate> LockableScore<'a> for Mutex { +impl<'a, T: Score + 'a> LockableScore<'a> for Mutex { type ScoreUpdate = T; type ScoreLookUp = T; @@ -209,7 +222,7 @@ impl<'a, T: 'a + ScoreLookUp + ScoreUpdate> LockableScore<'a> for Mutex { } #[cfg(not(c_bindings))] -impl<'a, T: 'a + ScoreUpdate + ScoreLookUp> LockableScore<'a> for RefCell { +impl<'a, T: Score + 'a> LockableScore<'a> for RefCell { type ScoreUpdate = T; type ScoreLookUp = T; @@ -226,7 +239,7 @@ impl<'a, T: 'a + ScoreUpdate + ScoreLookUp> LockableScore<'a> for RefCell { } #[cfg(not(c_bindings))] -impl<'a, SP:Sized, T: 'a + ScoreUpdate + ScoreLookUp> LockableScore<'a> for RwLock { +impl<'a, T: Score + 'a> LockableScore<'a> for RwLock { type ScoreUpdate = T; type ScoreLookUp = T; @@ -244,12 +257,12 @@ impl<'a, SP:Sized, T: 'a + ScoreUpdate + ScoreLookUp> Lockabl #[cfg(c_bindings)] /// A concrete implementation of [`LockableScore`] which supports multi-threading. -pub struct MultiThreadedLockableScore { +pub struct MultiThreadedLockableScore { score: RwLock, } #[cfg(c_bindings)] -impl<'a, SP:Sized, T: 'a + ScoreLookUp + ScoreUpdate> LockableScore<'a> for MultiThreadedLockableScore { +impl<'a, T: Score + 'a> LockableScore<'a> for MultiThreadedLockableScore { type ScoreUpdate = T; type ScoreLookUp = T; type WriteLocked = MultiThreadedScoreLockWrite<'a, Self::ScoreUpdate>; @@ -265,17 +278,17 @@ impl<'a, SP:Sized, T: 'a + ScoreLookUp + ScoreUpdate> Lockable } #[cfg(c_bindings)] -impl Writeable for MultiThreadedLockableScore { +impl Writeable for MultiThreadedLockableScore { fn write(&self, writer: &mut W) -> Result<(), io::Error> { self.score.read().unwrap().write(writer) } } #[cfg(c_bindings)] -impl<'a, T: 'a + ScoreUpdate + ScoreLookUp> WriteableScore<'a> for MultiThreadedLockableScore {} +impl<'a, T: Score + 'a> WriteableScore<'a> for MultiThreadedLockableScore {} #[cfg(c_bindings)] -impl MultiThreadedLockableScore { +impl MultiThreadedLockableScore { /// Creates a new [`MultiThreadedLockableScore`] given an underlying [`Score`]. pub fn new(score: T) -> Self { MultiThreadedLockableScore { score: RwLock::new(score) } @@ -284,14 +297,14 @@ impl MultiThreadedLockableScore { #[cfg(c_bindings)] /// A locked `MultiThreadedLockableScore`. -pub struct MultiThreadedScoreLockRead<'a, T: ScoreLookUp>(RwLockReadGuard<'a, T>); +pub struct MultiThreadedScoreLockRead<'a, T: Score>(RwLockReadGuard<'a, T>); #[cfg(c_bindings)] /// A locked `MultiThreadedLockableScore`. -pub struct MultiThreadedScoreLockWrite<'a, T: ScoreUpdate>(RwLockWriteGuard<'a, T>); +pub struct MultiThreadedScoreLockWrite<'a, T: Score>(RwLockWriteGuard<'a, T>); #[cfg(c_bindings)] -impl<'a, T: 'a + ScoreLookUp> Deref for MultiThreadedScoreLockRead<'a, T> { +impl<'a, T: 'a + Score> Deref for MultiThreadedScoreLockRead<'a, T> { type Target = T; fn deref(&self) -> &Self::Target { @@ -300,14 +313,24 @@ impl<'a, T: 'a + ScoreLookUp> Deref for MultiThreadedScoreLockRead<'a, T> { } #[cfg(c_bindings)] -impl<'a, T: 'a + ScoreUpdate> Writeable for MultiThreadedScoreLockWrite<'a, T> { +impl<'a, T: Score> ScoreLookUp for MultiThreadedScoreLockRead<'a, T> { + type ScoreParams = T::ScoreParams; + fn channel_penalty_msat(&self, short_channel_id: u64, source: &NodeId, + target: &NodeId, usage: ChannelUsage, score_params: &Self::ScoreParams + ) -> u64 { + self.0.channel_penalty_msat(short_channel_id, source, target, usage, score_params) + } +} + +#[cfg(c_bindings)] +impl<'a, T: Score> Writeable for MultiThreadedScoreLockWrite<'a, T> { fn write(&self, writer: &mut W) -> Result<(), io::Error> { self.0.write(writer) } } #[cfg(c_bindings)] -impl<'a, T: 'a + ScoreUpdate> Deref for MultiThreadedScoreLockWrite<'a, T> { +impl<'a, T: 'a + Score> Deref for MultiThreadedScoreLockWrite<'a, T> { type Target = T; fn deref(&self) -> &Self::Target { @@ -316,12 +339,31 @@ impl<'a, T: 'a + ScoreUpdate> Deref for MultiThreadedScoreLockWrite<'a, T> { } #[cfg(c_bindings)] -impl<'a, T: 'a + ScoreUpdate> DerefMut for MultiThreadedScoreLockWrite<'a, T> { +impl<'a, T: 'a + Score> DerefMut for MultiThreadedScoreLockWrite<'a, T> { fn deref_mut(&mut self) -> &mut Self::Target { self.0.deref_mut() } } +#[cfg(c_bindings)] +impl<'a, T: Score> ScoreUpdate for MultiThreadedScoreLockWrite<'a, T> { + fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64) { + self.0.payment_path_failed(path, short_channel_id) + } + + fn payment_path_successful(&mut self, path: &Path) { + self.0.payment_path_successful(path) + } + + fn probe_failed(&mut self, path: &Path, short_channel_id: u64) { + self.0.probe_failed(path, short_channel_id) + } + + fn probe_successful(&mut self, path: &Path) { + self.0.probe_successful(path) + } +} + /// Proposed use of a channel passed as a parameter to [`ScoreLookUp::channel_penalty_msat`]. #[derive(Clone, Copy, Debug, PartialEq)] @@ -580,6 +622,28 @@ pub struct ProbabilisticScoringFeeParameters { /// [`base_penalty_msat`]: Self::base_penalty_msat /// [`anti_probing_penalty_msat`]: Self::anti_probing_penalty_msat pub considered_impossible_penalty_msat: u64, + + /// In order to calculate most of the scores above, we must first convert a lower and upper + /// bound on the available liquidity in a channel into the probability that we think a payment + /// will succeed. That probability is derived from a Probability Density Function for where we + /// think the liquidity in a channel likely lies, given such bounds. + /// + /// If this flag is set, that PDF is simply a constant - we assume that the actual available + /// liquidity in a channel is just as likely to be at any point between our lower and upper + /// bounds. + /// + /// If this flag is *not* set, that PDF is `(x - 0.5*capacity) ^ 2`. That is, we use an + /// exponential curve which expects the liquidity of a channel to lie "at the edges". This + /// matches experimental results - most routing nodes do not aggressively rebalance their + /// channels and flows in the network are often unbalanced, leaving liquidity usually + /// unavailable. + /// + /// Thus, for the "best" routes, leave this flag `false`. However, the flag does imply a number + /// of floating-point multiplications in the hottest routing code, which may lead to routing + /// performance degradation on some machines. + /// + /// Default value: false + pub linear_success_probability: bool, } impl Default for ProbabilisticScoringFeeParameters { @@ -594,6 +658,7 @@ impl Default for ProbabilisticScoringFeeParameters { considered_impossible_penalty_msat: 1_0000_0000_000, historical_liquidity_penalty_multiplier_msat: 10_000, historical_liquidity_penalty_amount_multiplier_msat: 64, + linear_success_probability: false, } } } @@ -647,6 +712,7 @@ impl ProbabilisticScoringFeeParameters { manual_node_penalties: HashMap::new(), anti_probing_penalty_msat: 0, considered_impossible_penalty_msat: 0, + linear_success_probability: true, } } } @@ -999,6 +1065,12 @@ const PRECISION_LOWER_BOUND_DENOMINATOR: u64 = approx::LOWER_BITS_BOUND; const AMOUNT_PENALTY_DIVISOR: u64 = 1 << 20; const BASE_AMOUNT_PENALTY_DIVISOR: u64 = 1 << 30; +/// Raises three `f64`s to the 3rd power, without `powi` because it requires `std` (dunno why). +#[inline(always)] +fn three_f64_pow_3(a: f64, b: f64, c: f64) -> (f64, f64, f64) { + (a * a * a, b * b * b, c * c * c) +} + /// Given liquidity bounds, calculates the success probability (in the form of a numerator and /// denominator) of an HTLC. This is a key assumption in our scoring models. /// @@ -1009,14 +1081,46 @@ const BASE_AMOUNT_PENALTY_DIVISOR: u64 = 1 << 30; #[inline(always)] fn success_probability( amount_msat: u64, min_liquidity_msat: u64, max_liquidity_msat: u64, capacity_msat: u64, - _params: &ProbabilisticScoringFeeParameters, min_zero_implies_no_successes: bool, + params: &ProbabilisticScoringFeeParameters, min_zero_implies_no_successes: bool, ) -> (u64, u64) { debug_assert!(min_liquidity_msat <= amount_msat); debug_assert!(amount_msat < max_liquidity_msat); debug_assert!(max_liquidity_msat <= capacity_msat); - let numerator = max_liquidity_msat - amount_msat; - let mut denominator = (max_liquidity_msat - min_liquidity_msat).saturating_add(1); + let (numerator, mut denominator) = + if params.linear_success_probability { + (max_liquidity_msat - amount_msat, + (max_liquidity_msat - min_liquidity_msat).saturating_add(1)) + } else { + let capacity = capacity_msat as f64; + let min = (min_liquidity_msat as f64) / capacity; + let max = (max_liquidity_msat as f64) / capacity; + let amount = (amount_msat as f64) / capacity; + + // Assume the channel has a probability density function of (x - 0.5)^2 for values from + // 0 to 1 (where 1 is the channel's full capacity). The success probability given some + // liquidity bounds is thus the integral under the curve from the amount to maximum + // estimated liquidity, divided by the same integral from the minimum to the maximum + // estimated liquidity bounds. + // + // Because the integral from x to y is simply (y - 0.5)^3 - (x - 0.5)^3, we can + // calculate the cumulative density function between the min/max bounds trivially. Note + // that we don't bother to normalize the CDF to total to 1, as it will come out in the + // division of num / den. + let (max_pow, amt_pow, min_pow) = three_f64_pow_3(max - 0.5, amount - 0.5, min - 0.5); + let num = max_pow - amt_pow; + let den = max_pow - min_pow; + + // Because our numerator and denominator max out at 0.5^3 we need to multiply them by + // quite a large factor to get something useful (ideally in the 2^30 range). + const BILLIONISH: f64 = 1024.0 * 1024.0 * 1024.0; + let numerator = (num * BILLIONISH) as u64 + 1; + let denominator = (den * BILLIONISH) as u64 + 1; + debug_assert!(numerator <= 1 << 30, "Got large numerator ({}) from float {}.", numerator, num); + debug_assert!(denominator <= 1 << 30, "Got large denominator ({}) from float {}.", denominator, den); + (numerator, denominator) + }; + if min_zero_implies_no_successes && min_liquidity_msat == 0 && denominator < u64::max_value() / 21 { @@ -1355,6 +1459,10 @@ impl>, L: Deref, T: Time> ScoreUpdate for Prob } } +#[cfg(c_bindings)] +impl>, L: Deref, T: Time> Score for ProbabilisticScorerUsingTime +where L::Target: Logger {} + mod approx { const BITS: u32 = 64; const HIGHEST_BIT: u32 = BITS - 1; @@ -2071,7 +2179,7 @@ mod tests { use crate::util::ser::{ReadableArgs, Writeable}; use crate::util::test_utils::{self, TestLogger}; - use bitcoin::blockdata::constants::genesis_block; + use bitcoin::blockdata::constants::ChainHash; use bitcoin::hashes::Hash; use bitcoin::hashes::sha256d::Hash as Sha256dHash; use bitcoin::network::constants::Network; @@ -2148,7 +2256,7 @@ mod tests { network_graph: &mut NetworkGraph<&TestLogger>, short_channel_id: u64, node_1_key: SecretKey, node_2_key: SecretKey ) { - let genesis_hash = genesis_block(Network::Testnet).header.block_hash(); + let genesis_hash = ChainHash::using_genesis_block(Network::Testnet); let node_1_secret = &SecretKey::from_slice(&[39; 32]).unwrap(); let node_2_secret = &SecretKey::from_slice(&[40; 32]).unwrap(); let secp_ctx = Secp256k1::new(); @@ -2181,7 +2289,7 @@ mod tests { network_graph: &mut NetworkGraph<&TestLogger>, short_channel_id: u64, node_key: SecretKey, flags: u8, htlc_maximum_msat: u64, timestamp: u32, ) { - let genesis_hash = genesis_block(Network::Testnet).header.block_hash(); + let genesis_hash = ChainHash::using_genesis_block(Network::Testnet); let secp_ctx = Secp256k1::new(); let unsigned_update = UnsignedChannelUpdate { chain_hash: genesis_hash, @@ -2212,6 +2320,7 @@ mod tests { channel_features: channelmanager::provided_channel_features(&config), fee_msat, cltv_expiry_delta: 18, + maybe_announced_channel: true, } } @@ -2964,47 +3073,47 @@ mod tests { inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Total { capacity_msat: 950_000_000, htlc_maximum_msat: 1_000 }, }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 6262); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 11497); let usage = ChannelUsage { effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_950_000_000, htlc_maximum_msat: 1_000 }, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 4634); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 7408); let usage = ChannelUsage { effective_capacity: EffectiveCapacity::Total { capacity_msat: 2_950_000_000, htlc_maximum_msat: 1_000 }, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 4186); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 6151); let usage = ChannelUsage { effective_capacity: EffectiveCapacity::Total { capacity_msat: 3_950_000_000, htlc_maximum_msat: 1_000 }, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 3909); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 5427); let usage = ChannelUsage { effective_capacity: EffectiveCapacity::Total { capacity_msat: 4_950_000_000, htlc_maximum_msat: 1_000 }, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 3556); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 4955); let usage = ChannelUsage { effective_capacity: EffectiveCapacity::Total { capacity_msat: 5_950_000_000, htlc_maximum_msat: 1_000 }, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 3533); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 4736); let usage = ChannelUsage { effective_capacity: EffectiveCapacity::Total { capacity_msat: 6_950_000_000, htlc_maximum_msat: 1_000 }, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 3172); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 4484); let usage = ChannelUsage { effective_capacity: EffectiveCapacity::Total { capacity_msat: 7_450_000_000, htlc_maximum_msat: 1_000 }, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 3211); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 4484); let usage = ChannelUsage { effective_capacity: EffectiveCapacity::Total { capacity_msat: 7_950_000_000, htlc_maximum_msat: 1_000 }, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 3243); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 4263); let usage = ChannelUsage { effective_capacity: EffectiveCapacity::Total { capacity_msat: 8_950_000_000, htlc_maximum_msat: 1_000 }, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 3297); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 4263); let usage = ChannelUsage { effective_capacity: EffectiveCapacity::Total { capacity_msat: 9_950_000_000, htlc_maximum_msat: 1_000 }, ..usage }; - assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 3250); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 4044); } #[test]