X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Frouting%2Fscoring.rs;h=31158d41bc37570b7f95a98b5fb48bb9b68ea52d;hb=8311581fe110a9ee561a6fda6b55c78a02068d43;hp=6b8502d334fe58c4147bf92ca3a9e8b73981ca6a;hpb=30060c18b375a277fc11c4f601cbdeab07723c39;p=rust-lightning diff --git a/lightning/src/routing/scoring.rs b/lightning/src/routing/scoring.rs index 6b8502d3..31158d41 100644 --- a/lightning/src/routing/scoring.rs +++ b/lightning/src/routing/scoring.rs @@ -260,7 +260,7 @@ impl<'a, S: Writeable> Writeable for MutexGuard<'a, S> { } /// Proposed use of a channel passed as a parameter to [`Score::channel_penalty_msat`]. -#[derive(Clone, Copy, Debug)] +#[derive(Clone, Copy, Debug, PartialEq)] pub struct ChannelUsage { /// The amount to send through the channel, denominated in millisatoshis. pub amount_msat: u64, @@ -731,6 +731,8 @@ impl>, L: Deref, T: Time> ProbabilisticScorerU /// Note that this writes roughly one line per channel for which we have a liquidity estimate, /// which may be a substantial amount of log output. pub fn debug_log_liquidity_stats(&self) { + let now = T::now(); + let graph = self.network_graph.read_only(); for (scid, liq) in self.channel_liquidities.iter() { if let Some(chan_debug) = graph.channels().get(scid) { @@ -738,8 +740,25 @@ impl>, L: Deref, T: Time> ProbabilisticScorerU if let Some((directed_info, _)) = chan_debug.as_directed_to(target) { let amt = directed_info.effective_capacity().as_msat(); let dir_liq = liq.as_directed(source, target, amt, &self.params); - log_debug!(self.logger, "Liquidity from {:?} to {:?} via {} is in the range ({}, {})", - source, target, scid, dir_liq.min_liquidity_msat(), dir_liq.max_liquidity_msat()); + + let buckets = HistoricalMinMaxBuckets { + min_liquidity_offset_history: &dir_liq.min_liquidity_offset_history, + max_liquidity_offset_history: &dir_liq.max_liquidity_offset_history, + }; + let (min_buckets, max_buckets, _) = buckets.get_decayed_buckets(now, + *dir_liq.last_updated, self.params.historical_no_updates_half_life); + + log_debug!(self.logger, core::concat!( + "Liquidity from {} to {} via {} is in the range ({}, {}).\n", + "\tHistorical min liquidity octile relative probabilities: {} {} {} {} {} {} {} {}\n", + "\tHistorical max liquidity octile relative probabilities: {} {} {} {} {} {} {} {}"), + source, target, scid, dir_liq.min_liquidity_msat(), dir_liq.max_liquidity_msat(), + min_buckets[0], min_buckets[1], min_buckets[2], min_buckets[3], + min_buckets[4], min_buckets[5], min_buckets[6], min_buckets[7], + // Note that the liquidity buckets are an offset from the edge, so we + // inverse the max order to get the probabilities from zero. + max_buckets[7], max_buckets[6], max_buckets[5], max_buckets[4], + max_buckets[3], max_buckets[2], max_buckets[1], max_buckets[0]); } else { log_debug!(self.logger, "No amount known for SCID {} from {:?} to {:?}", scid, source, target); } @@ -770,6 +789,53 @@ impl>, L: Deref, T: Time> ProbabilisticScorerU None } + /// Query the historical estimated minimum and maximum liquidity available for sending a + /// payment over the channel with `scid` towards the given `target` node. + /// + /// Returns two sets of 8 buckets. The first set describes the octiles for lower-bound + /// liquidity estimates, the second set describes the octiles for upper-bound liquidity + /// estimates. Each bucket describes the relative frequency at which we've seen a liquidity + /// bound in the octile relative to the channel's total capacity, on an arbitrary scale. + /// Because the values are slowly decayed, more recent data points are weighted more heavily + /// than older datapoints. + /// + /// When scoring, the estimated probability that an upper-/lower-bound lies in a given octile + /// relative to the channel's total capacity is calculated by dividing that bucket's value with + /// the total of all buckets for the given bound. + /// + /// For example, a value of `[0, 0, 0, 0, 0, 0, 32]` indicates that we believe the probability + /// of a bound being in the top octile to be 100%, and have never (recently) seen it in any + /// other octiles. A value of `[31, 0, 0, 0, 0, 0, 0, 32]` indicates we've seen the bound being + /// both in the top and bottom octile, and roughly with similar (recent) frequency. + /// + /// Because the datapoints are decayed slowly over time, values will eventually return to + /// `Some(([0; 8], [0; 8]))`. + pub fn historical_estimated_channel_liquidity_probabilities(&self, scid: u64, target: &NodeId) + -> Option<([u16; 8], [u16; 8])> { + let graph = self.network_graph.read_only(); + + if let Some(chan) = graph.channels().get(&scid) { + if let Some(liq) = self.channel_liquidities.get(&scid) { + if let Some((directed_info, source)) = chan.as_directed_to(target) { + let amt = directed_info.effective_capacity().as_msat(); + let dir_liq = liq.as_directed(source, target, amt, &self.params); + + let buckets = HistoricalMinMaxBuckets { + min_liquidity_offset_history: &dir_liq.min_liquidity_offset_history, + max_liquidity_offset_history: &dir_liq.max_liquidity_offset_history, + }; + let (min_buckets, mut max_buckets, _) = buckets.get_decayed_buckets(T::now(), + *dir_liq.last_updated, self.params.historical_no_updates_half_life); + // Note that the liquidity buckets are an offset from the edge, so we inverse + // the max order to get the probabilities from zero. + max_buckets.reverse(); + return Some((min_buckets, max_buckets)); + } + } + } + None + } + /// Marks the node with the given `node_id` as banned, i.e., /// it will be avoided during path finding. pub fn add_banned(&mut self, node_id: &NodeId) { @@ -1693,8 +1759,7 @@ mod tests { } fn network_graph(logger: &TestLogger) -> NetworkGraph<&TestLogger> { - let genesis_hash = genesis_block(Network::Testnet).header.block_hash(); - let mut network_graph = NetworkGraph::new(genesis_hash, logger); + let mut network_graph = NetworkGraph::new(Network::Testnet, logger); add_channel(&mut network_graph, 42, source_privkey(), target_privkey()); add_channel(&mut network_graph, 43, target_privkey(), recipient_privkey()); @@ -1713,10 +1778,10 @@ mod tests { features: channelmanager::provided_channel_features(&UserConfig::default()), chain_hash: genesis_hash, short_channel_id, - node_id_1: PublicKey::from_secret_key(&secp_ctx, &node_1_key), - node_id_2: PublicKey::from_secret_key(&secp_ctx, &node_2_key), - bitcoin_key_1: PublicKey::from_secret_key(&secp_ctx, &node_1_secret), - bitcoin_key_2: PublicKey::from_secret_key(&secp_ctx, &node_2_secret), + node_id_1: NodeId::from_pubkey(&PublicKey::from_secret_key(&secp_ctx, &node_1_key)), + node_id_2: NodeId::from_pubkey(&PublicKey::from_secret_key(&secp_ctx, &node_2_key)), + bitcoin_key_1: NodeId::from_pubkey(&PublicKey::from_secret_key(&secp_ctx, &node_1_secret)), + bitcoin_key_2: NodeId::from_pubkey(&PublicKey::from_secret_key(&secp_ctx, &node_2_secret)), excess_data: Vec::new(), }; let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); @@ -2160,8 +2225,7 @@ mod tests { // we do not score such channels. let secp_ctx = Secp256k1::new(); let logger = TestLogger::new(); - let genesis_hash = genesis_block(Network::Testnet).header.block_hash(); - let mut network_graph = NetworkGraph::new(genesis_hash, &logger); + let mut network_graph = NetworkGraph::new(Network::Testnet, &logger); let secret_a = SecretKey::from_slice(&[42; 32]).unwrap(); let secret_b = SecretKey::from_slice(&[43; 32]).unwrap(); let secret_c = SecretKey::from_slice(&[44; 32]).unwrap(); @@ -2684,19 +2748,32 @@ mod tests { }; // With no historical data the normal liquidity penalty calculation is used. assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 47); + assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target), + None); scorer.payment_path_failed(&payment_path_for_amount(1).iter().collect::>(), 42); assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 2048); + // The "it failed" increment is 32, where the probability should lie fully in the first + // octile. + assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target), + Some(([32, 0, 0, 0, 0, 0, 0, 0], [32, 0, 0, 0, 0, 0, 0, 0]))); // Even after we tell the scorer we definitely have enough available liquidity, it will // still remember that there was some failure in the past, and assign a non-0 penalty. scorer.payment_path_failed(&payment_path_for_amount(1000).iter().collect::>(), 43); assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 198); + // The first octile should be decayed just slightly and the last octile has a new point. + assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target), + Some(([31, 0, 0, 0, 0, 0, 0, 32], [31, 0, 0, 0, 0, 0, 0, 32]))); // Advance the time forward 16 half-lives (which the docs claim will ensure all data is // gone), and check that we're back to where we started. SinceEpoch::advance(Duration::from_secs(10 * 16)); assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 47); + // Once fully decayed we still have data, but its all-0s. In the future we may remove the + // data entirely instead. + assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target), + Some(([0; 8], [0; 8]))); } #[test]