X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Frouting%2Fscoring.rs;h=adfc59c92aef9eb0c3194b697fd564d99721ca48;hb=ca9ca75f082dec8dfc70f3e263a7c3789e17a054;hp=860f3cb195418054322cd551f5e7487b84af8cad;hpb=00607a5286751edd10c86f9fe6ce0d656d1e3ece;p=rust-lightning diff --git a/lightning/src/routing/scoring.rs b/lightning/src/routing/scoring.rs index 860f3cb1..adfc59c9 100644 --- a/lightning/src/routing/scoring.rs +++ b/lightning/src/routing/scoring.rs @@ -20,7 +20,7 @@ //! # use lightning::routing::gossip::NetworkGraph; //! # use lightning::routing::router::{RouteParameters, find_route}; //! # use lightning::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringParameters}; -//! # use lightning::chain::keysinterface::{KeysManager, KeysInterface}; +//! # use lightning::chain::keysinterface::KeysManager; //! # use lightning::util::logger::{Logger, Record}; //! # use bitcoin::secp256k1::PublicKey; //! # @@ -260,7 +260,7 @@ impl<'a, S: Writeable> Writeable for MutexGuard<'a, S> { } /// Proposed use of a channel passed as a parameter to [`Score::channel_penalty_msat`]. -#[derive(Clone, Copy, Debug)] +#[derive(Clone, Copy, Debug, PartialEq)] pub struct ChannelUsage { /// The amount to send through the channel, denominated in millisatoshis. pub amount_msat: u64, @@ -550,7 +550,7 @@ struct HistoricalBucketRangeTracker { impl HistoricalBucketRangeTracker { fn new() -> Self { Self { buckets: [0; 8] } } - fn track_datapoint(&mut self, bucket_idx: u8) { + fn track_datapoint(&mut self, liquidity_offset_msat: u64, capacity_msat: u64) { // We have 8 leaky buckets for min and max liquidity. Each bucket tracks the amount of time // we spend in each bucket as a 16-bit fixed-point number with a 5 bit fractional part. // @@ -571,6 +571,12 @@ impl HistoricalBucketRangeTracker { // // The constants were picked experimentally, selecting a decay amount that restricts us // from overflowing buckets without having to cap them manually. + + // Ensure the bucket index is in the range [0, 7], even if the liquidity offset is zero or + // the channel's capacity, though the second should generally never happen. + debug_assert!(liquidity_offset_msat <= capacity_msat); + let bucket_idx: u8 = (liquidity_offset_msat * 8 / capacity_msat.saturating_add(1)) + .try_into().unwrap_or(32); // 32 is bogus for 8 buckets, and will be ignored debug_assert!(bucket_idx < 8); if bucket_idx < 8 { for e in self.buckets.iter_mut() { @@ -597,7 +603,22 @@ struct HistoricalMinMaxBuckets<'a> { impl HistoricalMinMaxBuckets<'_> { #[inline] - fn calculate_success_probability_times_billion(&self, required_decays: u32, payment_amt_64th_bucket: u8) -> Option { + fn get_decayed_buckets(&self, now: T, last_updated: T, half_life: Duration) + -> ([u16; 8], [u16; 8], u32) { + let required_decays = now.duration_since(last_updated).as_secs() + .checked_div(half_life.as_secs()) + .map_or(u32::max_value(), |decays| cmp::min(decays, u32::max_value() as u64) as u32); + let mut min_buckets = *self.min_liquidity_offset_history; + min_buckets.time_decay_data(required_decays); + let mut max_buckets = *self.max_liquidity_offset_history; + max_buckets.time_decay_data(required_decays); + (min_buckets.buckets, max_buckets.buckets, required_decays) + } + + #[inline] + fn calculate_success_probability_times_billion( + &self, now: T, last_updated: T, half_life: Duration, payment_amt_64th_bucket: u8) + -> Option { // If historical penalties are enabled, calculate the penalty by walking the set of // historical liquidity bucket (min, max) combinations (where min_idx < max_idx) and, for // each, calculate the probability of success given our payment amount, then total the @@ -619,23 +640,22 @@ impl HistoricalMinMaxBuckets<'_> { // less than 1/16th of a channel's capacity, or 1/8th if we used the top of the bucket. let mut total_valid_points_tracked = 0; - // Rather than actually decaying the individual buckets, which would lose precision, we - // simply track whether all buckets would be decayed to zero, in which case we treat it as - // if we had no data. - let mut is_fully_decayed = true; - let mut check_track_bucket_contains_undecayed_points = - |bucket_val: u16| if bucket_val.checked_shr(required_decays).unwrap_or(0) > 0 { is_fully_decayed = false; }; + // Check if all our buckets are zero, once decayed and treat it as if we had no data. We + // don't actually use the decayed buckets, though, as that would lose precision. + let (decayed_min_buckets, decayed_max_buckets, required_decays) = + self.get_decayed_buckets(now, last_updated, half_life); + if decayed_min_buckets.iter().all(|v| *v == 0) || decayed_max_buckets.iter().all(|v| *v == 0) { + return None; + } for (min_idx, min_bucket) in self.min_liquidity_offset_history.buckets.iter().enumerate() { - check_track_bucket_contains_undecayed_points(*min_bucket); for max_bucket in self.max_liquidity_offset_history.buckets.iter().take(8 - min_idx) { total_valid_points_tracked += (*min_bucket as u64) * (*max_bucket as u64); - check_track_bucket_contains_undecayed_points(*max_bucket); } } // If the total valid points is smaller than 1.0 (i.e. 32 in our fixed-point scheme), treat // it as if we were fully decayed. - if total_valid_points_tracked.checked_shr(required_decays).unwrap_or(0) < 32*32 || is_fully_decayed { + if total_valid_points_tracked.checked_shr(required_decays).unwrap_or(0) < 32*32 { return None; } @@ -688,6 +708,7 @@ struct DirectedChannelLiquidity<'a, L: Deref, BRT: Deref>, L: Deref, T: Time> ProbabilisticScorerU /// Note that this writes roughly one line per channel for which we have a liquidity estimate, /// which may be a substantial amount of log output. pub fn debug_log_liquidity_stats(&self) { + let now = T::now(); + let graph = self.network_graph.read_only(); for (scid, liq) in self.channel_liquidities.iter() { if let Some(chan_debug) = graph.channels().get(scid) { let log_direction = |source, target| { if let Some((directed_info, _)) = chan_debug.as_directed_to(target) { let amt = directed_info.effective_capacity().as_msat(); - let dir_liq = liq.as_directed(source, target, amt, &self.params); - log_debug!(self.logger, "Liquidity from {:?} to {:?} via {} is in the range ({}, {})", - source, target, scid, dir_liq.min_liquidity_msat(), dir_liq.max_liquidity_msat()); + let dir_liq = liq.as_directed(source, target, 0, amt, &self.params); + + let buckets = HistoricalMinMaxBuckets { + min_liquidity_offset_history: &dir_liq.min_liquidity_offset_history, + max_liquidity_offset_history: &dir_liq.max_liquidity_offset_history, + }; + let (min_buckets, max_buckets, _) = buckets.get_decayed_buckets(now, + *dir_liq.last_updated, self.params.historical_no_updates_half_life); + + log_debug!(self.logger, core::concat!( + "Liquidity from {} to {} via {} is in the range ({}, {}).\n", + "\tHistorical min liquidity octile relative probabilities: {} {} {} {} {} {} {} {}\n", + "\tHistorical max liquidity octile relative probabilities: {} {} {} {} {} {} {} {}"), + source, target, scid, dir_liq.min_liquidity_msat(), dir_liq.max_liquidity_msat(), + min_buckets[0], min_buckets[1], min_buckets[2], min_buckets[3], + min_buckets[4], min_buckets[5], min_buckets[6], min_buckets[7], + // Note that the liquidity buckets are an offset from the edge, so we + // inverse the max order to get the probabilities from zero. + max_buckets[7], max_buckets[6], max_buckets[5], max_buckets[4], + max_buckets[3], max_buckets[2], max_buckets[1], max_buckets[0]); } else { log_debug!(self.logger, "No amount known for SCID {} from {:?} to {:?}", scid, source, target); } @@ -748,7 +788,7 @@ impl>, L: Deref, T: Time> ProbabilisticScorerU if let Some(liq) = self.channel_liquidities.get(&scid) { if let Some((directed_info, source)) = chan.as_directed_to(target) { let amt = directed_info.effective_capacity().as_msat(); - let dir_liq = liq.as_directed(source, target, amt, &self.params); + let dir_liq = liq.as_directed(source, target, 0, amt, &self.params); return Some((dir_liq.min_liquidity_msat(), dir_liq.max_liquidity_msat())); } } @@ -756,6 +796,53 @@ impl>, L: Deref, T: Time> ProbabilisticScorerU None } + /// Query the historical estimated minimum and maximum liquidity available for sending a + /// payment over the channel with `scid` towards the given `target` node. + /// + /// Returns two sets of 8 buckets. The first set describes the octiles for lower-bound + /// liquidity estimates, the second set describes the octiles for upper-bound liquidity + /// estimates. Each bucket describes the relative frequency at which we've seen a liquidity + /// bound in the octile relative to the channel's total capacity, on an arbitrary scale. + /// Because the values are slowly decayed, more recent data points are weighted more heavily + /// than older datapoints. + /// + /// When scoring, the estimated probability that an upper-/lower-bound lies in a given octile + /// relative to the channel's total capacity is calculated by dividing that bucket's value with + /// the total of all buckets for the given bound. + /// + /// For example, a value of `[0, 0, 0, 0, 0, 0, 32]` indicates that we believe the probability + /// of a bound being in the top octile to be 100%, and have never (recently) seen it in any + /// other octiles. A value of `[31, 0, 0, 0, 0, 0, 0, 32]` indicates we've seen the bound being + /// both in the top and bottom octile, and roughly with similar (recent) frequency. + /// + /// Because the datapoints are decayed slowly over time, values will eventually return to + /// `Some(([0; 8], [0; 8]))`. + pub fn historical_estimated_channel_liquidity_probabilities(&self, scid: u64, target: &NodeId) + -> Option<([u16; 8], [u16; 8])> { + let graph = self.network_graph.read_only(); + + if let Some(chan) = graph.channels().get(&scid) { + if let Some(liq) = self.channel_liquidities.get(&scid) { + if let Some((directed_info, source)) = chan.as_directed_to(target) { + let amt = directed_info.effective_capacity().as_msat(); + let dir_liq = liq.as_directed(source, target, 0, amt, &self.params); + + let buckets = HistoricalMinMaxBuckets { + min_liquidity_offset_history: &dir_liq.min_liquidity_offset_history, + max_liquidity_offset_history: &dir_liq.max_liquidity_offset_history, + }; + let (min_buckets, mut max_buckets, _) = buckets.get_decayed_buckets(T::now(), + *dir_liq.last_updated, self.params.historical_no_updates_half_life); + // Note that the liquidity buckets are an offset from the edge, so we inverse + // the max order to get the probabilities from zero. + max_buckets.reverse(); + return Some((min_buckets, max_buckets)); + } + } + } + None + } + /// Marks the node with the given `node_id` as banned, i.e., /// it will be avoided during path finding. pub fn add_banned(&mut self, node_id: &NodeId) { @@ -843,7 +930,8 @@ impl ChannelLiquidity { /// Returns a view of the channel liquidity directed from `source` to `target` assuming /// `capacity_msat`. fn as_directed<'a>( - &self, source: &NodeId, target: &NodeId, capacity_msat: u64, params: &'a ProbabilisticScoringParameters + &self, source: &NodeId, target: &NodeId, inflight_htlc_msat: u64, capacity_msat: u64, + params: &'a ProbabilisticScoringParameters ) -> DirectedChannelLiquidity<'a, &u64, &HistoricalBucketRangeTracker, T, &T> { let (min_liquidity_offset_msat, max_liquidity_offset_msat, min_liquidity_offset_history, max_liquidity_offset_history) = if source < target { @@ -859,6 +947,7 @@ impl ChannelLiquidity { max_liquidity_offset_msat, min_liquidity_offset_history, max_liquidity_offset_history, + inflight_htlc_msat, capacity_msat, last_updated: &self.last_updated, now: T::now(), @@ -869,7 +958,8 @@ impl ChannelLiquidity { /// Returns a mutable view of the channel liquidity directed from `source` to `target` assuming /// `capacity_msat`. fn as_directed_mut<'a>( - &mut self, source: &NodeId, target: &NodeId, capacity_msat: u64, params: &'a ProbabilisticScoringParameters + &mut self, source: &NodeId, target: &NodeId, inflight_htlc_msat: u64, capacity_msat: u64, + params: &'a ProbabilisticScoringParameters ) -> DirectedChannelLiquidity<'a, &mut u64, &mut HistoricalBucketRangeTracker, T, &mut T> { let (min_liquidity_offset_msat, max_liquidity_offset_msat, min_liquidity_offset_history, max_liquidity_offset_history) = if source < target { @@ -885,6 +975,7 @@ impl ChannelLiquidity { max_liquidity_offset_msat, min_liquidity_offset_history, max_liquidity_offset_history, + inflight_htlc_msat, capacity_msat, last_updated: &mut self.last_updated, now: T::now(), @@ -942,10 +1033,16 @@ impl, BRT: Deref, if params.historical_liquidity_penalty_multiplier_msat != 0 || params.historical_liquidity_penalty_amount_multiplier_msat != 0 { - let required_decays = self.now.duration_since(*self.last_updated).as_secs() - .checked_div(params.historical_no_updates_half_life.as_secs()) - .map_or(u32::max_value(), |decays| cmp::min(decays, u32::max_value() as u64) as u32); - let payment_amt_64th_bucket = amount_msat * 64 / self.capacity_msat; + let payment_amt_64th_bucket = if amount_msat < u64::max_value() / 64 { + amount_msat * 64 / self.capacity_msat.saturating_add(1) + } else { + // Only use 128-bit arithmetic when multiplication will overflow to avoid 128-bit + // division. This branch should only be hit in fuzz testing since the amount would + // need to be over 2.88 million BTC in practice. + ((amount_msat as u128) * 64 / (self.capacity_msat as u128).saturating_add(1)) + .try_into().unwrap_or(65) + }; + #[cfg(not(fuzzing))] debug_assert!(payment_amt_64th_bucket <= 64); if payment_amt_64th_bucket > 64 { return res; } @@ -954,7 +1051,9 @@ impl, BRT: Deref, max_liquidity_offset_history: &self.max_liquidity_offset_history, }; if let Some(cumulative_success_prob_times_billion) = buckets - .calculate_success_probability_times_billion(required_decays, payment_amt_64th_bucket as u8) { + .calculate_success_probability_times_billion(self.now, *self.last_updated, + params.historical_no_updates_half_life, payment_amt_64th_bucket as u8) + { let historical_negative_log10_times_2048 = approx::negative_log10_times_2048(cumulative_success_prob_times_billion + 1, 1024 * 1024 * 1024); res = res.saturating_add(Self::combined_penalty_msat(amount_msat, historical_negative_log10_times_2048, params.historical_liquidity_penalty_multiplier_msat, @@ -963,13 +1062,14 @@ impl, BRT: Deref, // If we don't have any valid points (or, once decayed, we have less than a full // point), redo the non-historical calculation with no liquidity bounds tracked and // the historical penalty multipliers. - let max_capacity = self.capacity_msat.saturating_sub(amount_msat).saturating_add(1); + let available_capacity = self.available_capacity(); + let numerator = available_capacity.saturating_sub(amount_msat).saturating_add(1); + let denominator = available_capacity.saturating_add(1); let negative_log10_times_2048 = - approx::negative_log10_times_2048(max_capacity, self.capacity_msat.saturating_add(1)); + approx::negative_log10_times_2048(numerator, denominator); res = res.saturating_add(Self::combined_penalty_msat(amount_msat, negative_log10_times_2048, params.historical_liquidity_penalty_multiplier_msat, params.historical_liquidity_penalty_amount_multiplier_msat)); - return res; } } @@ -1001,9 +1101,13 @@ impl, BRT: Deref, /// Returns the upper bound of the channel liquidity balance in this direction. fn max_liquidity_msat(&self) -> u64 { - self.capacity_msat - .checked_sub(self.decayed_offset_msat(*self.max_liquidity_offset_msat)) - .unwrap_or(0) + self.available_capacity() + .saturating_sub(self.decayed_offset_msat(*self.max_liquidity_offset_msat)) + } + + /// Returns the capacity minus the in-flight HTLCs in this direction. + fn available_capacity(&self) -> u64 { + self.capacity_msat.saturating_sub(self.inflight_htlc_msat) } fn decayed_offset_msat(&self, offset_msat: u64) -> u64 { @@ -1025,6 +1129,7 @@ impl, BRT: DerefMut, BRT: DerefMut, BRT: DerefMut, BRT: DerefMut, BRT: DerefMut, BRT: DerefMut>, L: Deref, T: Time> Score for Probabilis } let amount_msat = usage.amount_msat; - let capacity_msat = usage.effective_capacity.as_msat() - .saturating_sub(usage.inflight_htlc_msat); + let capacity_msat = usage.effective_capacity.as_msat(); + let inflight_htlc_msat = usage.inflight_htlc_msat; self.channel_liquidities .get(&short_channel_id) .unwrap_or(&ChannelLiquidity::new()) - .as_directed(source, target, capacity_msat, &self.params) + .as_directed(source, target, inflight_htlc_msat, capacity_msat, &self.params) .penalty_msat(amount_msat, &self.params) .saturating_add(anti_probing_penalty_msat) .saturating_add(base_penalty_msat) @@ -1155,13 +1256,13 @@ impl>, L: Deref, T: Time> Score for Probabilis self.channel_liquidities .entry(hop.short_channel_id) .or_insert_with(ChannelLiquidity::new) - .as_directed_mut(source, &target, capacity_msat, &self.params) + .as_directed_mut(source, &target, 0, capacity_msat, &self.params) .failed_at_channel(amount_msat, format_args!("SCID {}, towards {:?}", hop.short_channel_id, target), &self.logger); } else { self.channel_liquidities .entry(hop.short_channel_id) .or_insert_with(ChannelLiquidity::new) - .as_directed_mut(source, &target, capacity_msat, &self.params) + .as_directed_mut(source, &target, 0, capacity_msat, &self.params) .failed_downstream(amount_msat, format_args!("SCID {}, towards {:?}", hop.short_channel_id, target), &self.logger); } } else { @@ -1189,7 +1290,7 @@ impl>, L: Deref, T: Time> Score for Probabilis self.channel_liquidities .entry(hop.short_channel_id) .or_insert_with(ChannelLiquidity::new) - .as_directed_mut(source, &target, capacity_msat, &self.params) + .as_directed_mut(source, &target, 0, capacity_msat, &self.params) .successful(amount_msat, format_args!("SCID {}, towards {:?}", hop.short_channel_id, target), &self.logger); } else { log_debug!(self.logger, "Not able to learn for channel with SCID {} as we do not have graph info for it (likely a route-hint last-hop).", @@ -1602,6 +1703,7 @@ impl Readable for ChannelLiquidity { #[cfg(test)] mod tests { use super::{ChannelLiquidity, HistoricalBucketRangeTracker, ProbabilisticScoringParameters, ProbabilisticScorerUsingTime}; + use crate::util::config::UserConfig; use crate::util::time::Time; use crate::util::time::tests::SinceEpoch; @@ -1679,8 +1781,7 @@ mod tests { } fn network_graph(logger: &TestLogger) -> NetworkGraph<&TestLogger> { - let genesis_hash = genesis_block(Network::Testnet).header.block_hash(); - let mut network_graph = NetworkGraph::new(genesis_hash, logger); + let mut network_graph = NetworkGraph::new(Network::Testnet, logger); add_channel(&mut network_graph, 42, source_privkey(), target_privkey()); add_channel(&mut network_graph, 43, target_privkey(), recipient_privkey()); @@ -1696,13 +1797,13 @@ mod tests { let node_2_secret = &SecretKey::from_slice(&[40; 32]).unwrap(); let secp_ctx = Secp256k1::new(); let unsigned_announcement = UnsignedChannelAnnouncement { - features: channelmanager::provided_channel_features(), + features: channelmanager::provided_channel_features(&UserConfig::default()), chain_hash: genesis_hash, short_channel_id, - node_id_1: PublicKey::from_secret_key(&secp_ctx, &node_1_key), - node_id_2: PublicKey::from_secret_key(&secp_ctx, &node_2_key), - bitcoin_key_1: PublicKey::from_secret_key(&secp_ctx, &node_1_secret), - bitcoin_key_2: PublicKey::from_secret_key(&secp_ctx, &node_2_secret), + node_id_1: NodeId::from_pubkey(&PublicKey::from_secret_key(&secp_ctx, &node_1_key)), + node_id_2: NodeId::from_pubkey(&PublicKey::from_secret_key(&secp_ctx, &node_2_key)), + bitcoin_key_1: NodeId::from_pubkey(&PublicKey::from_secret_key(&secp_ctx, &node_1_secret)), + bitcoin_key_2: NodeId::from_pubkey(&PublicKey::from_secret_key(&secp_ctx, &node_2_secret)), excess_data: Vec::new(), }; let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); @@ -1716,13 +1817,13 @@ mod tests { let chain_source: Option<&crate::util::test_utils::TestChainSource> = None; network_graph.update_channel_from_announcement( &signed_announcement, &chain_source).unwrap(); - update_channel(network_graph, short_channel_id, node_1_key, 0); - update_channel(network_graph, short_channel_id, node_2_key, 1); + update_channel(network_graph, short_channel_id, node_1_key, 0, 1_000); + update_channel(network_graph, short_channel_id, node_2_key, 1, 0); } fn update_channel( network_graph: &mut NetworkGraph<&TestLogger>, short_channel_id: u64, node_key: SecretKey, - flags: u8 + flags: u8, htlc_maximum_msat: u64 ) { let genesis_hash = genesis_block(Network::Testnet).header.block_hash(); let secp_ctx = Secp256k1::new(); @@ -1733,7 +1834,7 @@ mod tests { flags, cltv_expiry_delta: 18, htlc_minimum_msat: 0, - htlc_maximum_msat: 1_000, + htlc_maximum_msat, fee_base_msat: 1, fee_proportional_millionths: 0, excess_data: Vec::new(), @@ -1747,11 +1848,12 @@ mod tests { } fn path_hop(pubkey: PublicKey, short_channel_id: u64, fee_msat: u64) -> RouteHop { + let config = UserConfig::default(); RouteHop { pubkey, - node_features: channelmanager::provided_node_features(), + node_features: channelmanager::provided_node_features(&config), short_channel_id, - channel_features: channelmanager::provided_channel_features(), + channel_features: channelmanager::provided_channel_features(&config), fee_msat, cltv_expiry_delta: 18, } @@ -1793,52 +1895,52 @@ mod tests { // Update minimum liquidity. let liquidity = scorer.channel_liquidities.get(&42).unwrap() - .as_directed(&source, &target, 1_000, &scorer.params); + .as_directed(&source, &target, 0, 1_000, &scorer.params); assert_eq!(liquidity.min_liquidity_msat(), 100); assert_eq!(liquidity.max_liquidity_msat(), 300); let liquidity = scorer.channel_liquidities.get(&42).unwrap() - .as_directed(&target, &source, 1_000, &scorer.params); + .as_directed(&target, &source, 0, 1_000, &scorer.params); assert_eq!(liquidity.min_liquidity_msat(), 700); assert_eq!(liquidity.max_liquidity_msat(), 900); scorer.channel_liquidities.get_mut(&42).unwrap() - .as_directed_mut(&source, &target, 1_000, &scorer.params) + .as_directed_mut(&source, &target, 0, 1_000, &scorer.params) .set_min_liquidity_msat(200); let liquidity = scorer.channel_liquidities.get(&42).unwrap() - .as_directed(&source, &target, 1_000, &scorer.params); + .as_directed(&source, &target, 0, 1_000, &scorer.params); assert_eq!(liquidity.min_liquidity_msat(), 200); assert_eq!(liquidity.max_liquidity_msat(), 300); let liquidity = scorer.channel_liquidities.get(&42).unwrap() - .as_directed(&target, &source, 1_000, &scorer.params); + .as_directed(&target, &source, 0, 1_000, &scorer.params); assert_eq!(liquidity.min_liquidity_msat(), 700); assert_eq!(liquidity.max_liquidity_msat(), 800); // Update maximum liquidity. let liquidity = scorer.channel_liquidities.get(&43).unwrap() - .as_directed(&target, &recipient, 1_000, &scorer.params); + .as_directed(&target, &recipient, 0, 1_000, &scorer.params); assert_eq!(liquidity.min_liquidity_msat(), 700); assert_eq!(liquidity.max_liquidity_msat(), 900); let liquidity = scorer.channel_liquidities.get(&43).unwrap() - .as_directed(&recipient, &target, 1_000, &scorer.params); + .as_directed(&recipient, &target, 0, 1_000, &scorer.params); assert_eq!(liquidity.min_liquidity_msat(), 100); assert_eq!(liquidity.max_liquidity_msat(), 300); scorer.channel_liquidities.get_mut(&43).unwrap() - .as_directed_mut(&target, &recipient, 1_000, &scorer.params) + .as_directed_mut(&target, &recipient, 0, 1_000, &scorer.params) .set_max_liquidity_msat(200); let liquidity = scorer.channel_liquidities.get(&43).unwrap() - .as_directed(&target, &recipient, 1_000, &scorer.params); + .as_directed(&target, &recipient, 0, 1_000, &scorer.params); assert_eq!(liquidity.min_liquidity_msat(), 0); assert_eq!(liquidity.max_liquidity_msat(), 200); let liquidity = scorer.channel_liquidities.get(&43).unwrap() - .as_directed(&recipient, &target, 1_000, &scorer.params); + .as_directed(&recipient, &target, 0, 1_000, &scorer.params); assert_eq!(liquidity.min_liquidity_msat(), 800); assert_eq!(liquidity.max_liquidity_msat(), 1000); } @@ -1862,42 +1964,42 @@ mod tests { // Check initial bounds. let liquidity = scorer.channel_liquidities.get(&42).unwrap() - .as_directed(&source, &target, 1_000, &scorer.params); + .as_directed(&source, &target, 0, 1_000, &scorer.params); assert_eq!(liquidity.min_liquidity_msat(), 400); assert_eq!(liquidity.max_liquidity_msat(), 800); let liquidity = scorer.channel_liquidities.get(&42).unwrap() - .as_directed(&target, &source, 1_000, &scorer.params); + .as_directed(&target, &source, 0, 1_000, &scorer.params); assert_eq!(liquidity.min_liquidity_msat(), 200); assert_eq!(liquidity.max_liquidity_msat(), 600); // Reset from source to target. scorer.channel_liquidities.get_mut(&42).unwrap() - .as_directed_mut(&source, &target, 1_000, &scorer.params) + .as_directed_mut(&source, &target, 0, 1_000, &scorer.params) .set_min_liquidity_msat(900); let liquidity = scorer.channel_liquidities.get(&42).unwrap() - .as_directed(&source, &target, 1_000, &scorer.params); + .as_directed(&source, &target, 0, 1_000, &scorer.params); assert_eq!(liquidity.min_liquidity_msat(), 900); assert_eq!(liquidity.max_liquidity_msat(), 1_000); let liquidity = scorer.channel_liquidities.get(&42).unwrap() - .as_directed(&target, &source, 1_000, &scorer.params); + .as_directed(&target, &source, 0, 1_000, &scorer.params); assert_eq!(liquidity.min_liquidity_msat(), 0); assert_eq!(liquidity.max_liquidity_msat(), 100); // Reset from target to source. scorer.channel_liquidities.get_mut(&42).unwrap() - .as_directed_mut(&target, &source, 1_000, &scorer.params) + .as_directed_mut(&target, &source, 0, 1_000, &scorer.params) .set_min_liquidity_msat(400); let liquidity = scorer.channel_liquidities.get(&42).unwrap() - .as_directed(&source, &target, 1_000, &scorer.params); + .as_directed(&source, &target, 0, 1_000, &scorer.params); assert_eq!(liquidity.min_liquidity_msat(), 0); assert_eq!(liquidity.max_liquidity_msat(), 600); let liquidity = scorer.channel_liquidities.get(&42).unwrap() - .as_directed(&target, &source, 1_000, &scorer.params); + .as_directed(&target, &source, 0, 1_000, &scorer.params); assert_eq!(liquidity.min_liquidity_msat(), 400); assert_eq!(liquidity.max_liquidity_msat(), 1_000); } @@ -1921,42 +2023,42 @@ mod tests { // Check initial bounds. let liquidity = scorer.channel_liquidities.get(&42).unwrap() - .as_directed(&source, &target, 1_000, &scorer.params); + .as_directed(&source, &target, 0, 1_000, &scorer.params); assert_eq!(liquidity.min_liquidity_msat(), 400); assert_eq!(liquidity.max_liquidity_msat(), 800); let liquidity = scorer.channel_liquidities.get(&42).unwrap() - .as_directed(&target, &source, 1_000, &scorer.params); + .as_directed(&target, &source, 0, 1_000, &scorer.params); assert_eq!(liquidity.min_liquidity_msat(), 200); assert_eq!(liquidity.max_liquidity_msat(), 600); // Reset from source to target. scorer.channel_liquidities.get_mut(&42).unwrap() - .as_directed_mut(&source, &target, 1_000, &scorer.params) + .as_directed_mut(&source, &target, 0, 1_000, &scorer.params) .set_max_liquidity_msat(300); let liquidity = scorer.channel_liquidities.get(&42).unwrap() - .as_directed(&source, &target, 1_000, &scorer.params); + .as_directed(&source, &target, 0, 1_000, &scorer.params); assert_eq!(liquidity.min_liquidity_msat(), 0); assert_eq!(liquidity.max_liquidity_msat(), 300); let liquidity = scorer.channel_liquidities.get(&42).unwrap() - .as_directed(&target, &source, 1_000, &scorer.params); + .as_directed(&target, &source, 0, 1_000, &scorer.params); assert_eq!(liquidity.min_liquidity_msat(), 700); assert_eq!(liquidity.max_liquidity_msat(), 1_000); // Reset from target to source. scorer.channel_liquidities.get_mut(&42).unwrap() - .as_directed_mut(&target, &source, 1_000, &scorer.params) + .as_directed_mut(&target, &source, 0, 1_000, &scorer.params) .set_max_liquidity_msat(600); let liquidity = scorer.channel_liquidities.get(&42).unwrap() - .as_directed(&source, &target, 1_000, &scorer.params); + .as_directed(&source, &target, 0, 1_000, &scorer.params); assert_eq!(liquidity.min_liquidity_msat(), 400); assert_eq!(liquidity.max_liquidity_msat(), 1_000); let liquidity = scorer.channel_liquidities.get(&42).unwrap() - .as_directed(&target, &source, 1_000, &scorer.params); + .as_directed(&target, &source, 0, 1_000, &scorer.params); assert_eq!(liquidity.min_liquidity_msat(), 0); assert_eq!(liquidity.max_liquidity_msat(), 600); } @@ -2145,8 +2247,7 @@ mod tests { // we do not score such channels. let secp_ctx = Secp256k1::new(); let logger = TestLogger::new(); - let genesis_hash = genesis_block(Network::Testnet).header.block_hash(); - let mut network_graph = NetworkGraph::new(genesis_hash, &logger); + let mut network_graph = NetworkGraph::new(Network::Testnet, &logger); let secret_a = SecretKey::from_slice(&[42; 32]).unwrap(); let secret_b = SecretKey::from_slice(&[43; 32]).unwrap(); let secret_c = SecretKey::from_slice(&[44; 32]).unwrap(); @@ -2653,6 +2754,7 @@ mod tests { let logger = TestLogger::new(); let network_graph = network_graph(&logger); let params = ProbabilisticScoringParameters { + liquidity_offset_half_life: Duration::from_secs(60 * 60), historical_liquidity_penalty_multiplier_msat: 1024, historical_liquidity_penalty_amount_multiplier_msat: 1024, historical_no_updates_half_life: Duration::from_secs(10), @@ -2669,19 +2771,59 @@ mod tests { }; // With no historical data the normal liquidity penalty calculation is used. assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 47); + assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target), + None); scorer.payment_path_failed(&payment_path_for_amount(1).iter().collect::>(), 42); assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 2048); + // The "it failed" increment is 32, where the probability should lie fully in the first + // octile. + assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target), + Some(([32, 0, 0, 0, 0, 0, 0, 0], [32, 0, 0, 0, 0, 0, 0, 0]))); // Even after we tell the scorer we definitely have enough available liquidity, it will // still remember that there was some failure in the past, and assign a non-0 penalty. scorer.payment_path_failed(&payment_path_for_amount(1000).iter().collect::>(), 43); assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 198); + // The first octile should be decayed just slightly and the last octile has a new point. + assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target), + Some(([31, 0, 0, 0, 0, 0, 0, 32], [31, 0, 0, 0, 0, 0, 0, 32]))); // Advance the time forward 16 half-lives (which the docs claim will ensure all data is // gone), and check that we're back to where we started. SinceEpoch::advance(Duration::from_secs(10 * 16)); assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 47); + // Once fully decayed we still have data, but its all-0s. In the future we may remove the + // data entirely instead. + assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target), + Some(([0; 8], [0; 8]))); + + let usage = ChannelUsage { + amount_msat: 100, + inflight_htlc_msat: 1024, + effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_024 }, + }; + scorer.payment_path_failed(&payment_path_for_amount(1).iter().collect::>(), 42); + assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 409); + + let usage = ChannelUsage { + amount_msat: 1, + inflight_htlc_msat: 0, + effective_capacity: EffectiveCapacity::MaximumHTLC { amount_msat: 0 }, + }; + assert_eq!(scorer.channel_penalty_msat(42, &target, &source, usage), 2048); + + // Advance to decay all liquidity offsets to zero. + SinceEpoch::advance(Duration::from_secs(60 * 60 * 10)); + + // Use a path in the opposite direction, which have zero for htlc_maximum_msat. This will + // ensure that the effective capacity is zero to test division-by-zero edge cases. + let path = vec![ + path_hop(target_pubkey(), 43, 2), + path_hop(source_pubkey(), 42, 1), + path_hop(sender_pubkey(), 41, 0), + ]; + scorer.payment_path_failed(&path.iter().collect::>(), 42); } #[test]