+mod bucketed_history {
+ use super::*;
+
+ // Because liquidity is often skewed heavily in one direction, we store historical state
+ // distribution in buckets of different size. For backwards compatibility, buckets of size 1/8th
+ // must fit evenly into the buckets here.
+ //
+ // The smallest bucket is 2^-14th of the channel, for each of our 32 buckets here we define the
+ // width of the bucket in 2^14'ths of the channel. This increases exponentially until we reach
+ // a full 16th of the channel's capacity, which is reapeated a few times for backwards
+ // compatibility. The four middle buckets represent full octiles of the channel's capacity.
+ //
+ // For a 1 BTC channel, this let's us differentiate between failures in the bottom 6k sats, or
+ // between the 12,000th sat and 24,000th sat, while only needing to store and operate on 32
+ // buckets in total.
+
+ const BUCKET_START_POS: [u16; 33] = [
+ 0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 3072, 4096, 6144, 8192, 10240, 12288,
+ 13312, 14336, 15360, 15872, 16128, 16256, 16320, 16352, 16368, 16376, 16380, 16382, 16383, 16384,
+ ];
+
+ const LEGACY_TO_BUCKET_RANGE: [(u8, u8); 8] = [
+ (0, 12), (12, 14), (14, 15), (15, 16), (16, 17), (17, 18), (18, 20), (20, 32)
+ ];
+
+ const POSITION_TICKS: u16 = 1 << 14;
+
+ fn pos_to_bucket(pos: u16) -> usize {
+ for bucket in 0..32 {
+ if pos < BUCKET_START_POS[bucket + 1] {
+ return bucket;
+ }
+ }
+ debug_assert!(false);
+ return 32;
+ }
+
+ #[cfg(test)]
+ #[test]
+ fn check_bucket_maps() {
+ const BUCKET_WIDTH_IN_16384S: [u16; 32] = [
+ 1, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 1024, 1024, 2048, 2048,
+ 2048, 2048, 1024, 1024, 1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1, 1];
+
+ let mut min_size_iter = 0;
+ let mut legacy_bucket_iter = 0;
+ for (bucket, width) in BUCKET_WIDTH_IN_16384S.iter().enumerate() {
+ assert_eq!(BUCKET_START_POS[bucket], min_size_iter);
+ for i in 0..*width {
+ assert_eq!(pos_to_bucket(min_size_iter + i) as usize, bucket);
+ }
+ min_size_iter += *width;
+ if min_size_iter % (POSITION_TICKS / 8) == 0 {
+ assert_eq!(LEGACY_TO_BUCKET_RANGE[legacy_bucket_iter].1 as usize, bucket + 1);
+ if legacy_bucket_iter + 1 < 8 {
+ assert_eq!(LEGACY_TO_BUCKET_RANGE[legacy_bucket_iter + 1].0 as usize, bucket + 1);
+ }
+ legacy_bucket_iter += 1;
+ }
+ }
+ assert_eq!(BUCKET_START_POS[32], POSITION_TICKS);
+ assert_eq!(min_size_iter, POSITION_TICKS);
+ }
+
+ #[inline]
+ fn amount_to_pos(amount_msat: u64, capacity_msat: u64) -> u16 {
+ let pos = if amount_msat < u64::max_value() / (POSITION_TICKS as u64) {
+ (amount_msat * (POSITION_TICKS as u64) / capacity_msat.saturating_add(1))
+ .try_into().unwrap_or(POSITION_TICKS)
+ } else {
+ // Only use 128-bit arithmetic when multiplication will overflow to avoid 128-bit
+ // division. This branch should only be hit in fuzz testing since the amount would
+ // need to be over 2.88 million BTC in practice.
+ ((amount_msat as u128) * (POSITION_TICKS as u128)
+ / (capacity_msat as u128).saturating_add(1))
+ .try_into().unwrap_or(POSITION_TICKS)
+ };
+ // If we are running in a client that doesn't validate gossip, its possible for a channel's
+ // capacity to change due to a `channel_update` message which, if received while a payment
+ // is in-flight, could cause this to fail. Thus, we only assert in test.
+ #[cfg(test)]
+ debug_assert!(pos < POSITION_TICKS);
+ pos
+ }
+
+ /// Prior to LDK 0.0.117 we used eight buckets which were split evenly across the either
+ /// octiles. This was changed to use 32 buckets for accuracy reasons in 0.0.117, however we
+ /// support reading the legacy values here for backwards compatibility.
+ pub(super) struct LegacyHistoricalBucketRangeTracker {
+ buckets: [u16; 8],
+ }
+
+ impl LegacyHistoricalBucketRangeTracker {
+ pub(crate) fn into_current(&self) -> HistoricalBucketRangeTracker {
+ let mut buckets = [0; 32];
+ for (idx, legacy_bucket) in self.buckets.iter().enumerate() {
+ let mut new_val = *legacy_bucket;
+ let (start, end) = LEGACY_TO_BUCKET_RANGE[idx];
+ new_val /= (end - start) as u16;
+ for i in start..end {
+ buckets[i as usize] = new_val;
+ }
+ }
+ HistoricalBucketRangeTracker { buckets }
+ }
+ }
+
+ /// Tracks the historical state of a distribution as a weighted average of how much time was spent
+ /// in each of 32 buckets.
+ #[derive(Clone, Copy)]
+ pub(super) struct HistoricalBucketRangeTracker {
+ buckets: [u16; 32],
+ }
+
+ /// Buckets are stored in fixed point numbers with a 5 bit fractional part. Thus, the value
+ /// "one" is 32, or this constant.
+ pub const BUCKET_FIXED_POINT_ONE: u16 = 32;
+
+ impl HistoricalBucketRangeTracker {
+ pub(super) fn new() -> Self { Self { buckets: [0; 32] } }
+ pub(super) fn track_datapoint(&mut self, liquidity_offset_msat: u64, capacity_msat: u64) {
+ // We have 32 leaky buckets for min and max liquidity. Each bucket tracks the amount of time
+ // we spend in each bucket as a 16-bit fixed-point number with a 5 bit fractional part.
+ //
+ // Each time we update our liquidity estimate, we add 32 (1.0 in our fixed-point system) to
+ // the buckets for the current min and max liquidity offset positions.
+ //
+ // We then decay each bucket by multiplying by 2047/2048 (avoiding dividing by a
+ // non-power-of-two). This ensures we can't actually overflow the u16 - when we get to
+ // 63,457 adding 32 and decaying by 2047/2048 leaves us back at 63,457.
+ //
+ // In total, this allows us to track data for the last 8,000 or so payments across a given
+ // channel.
+ //
+ // These constants are a balance - we try to fit in 2 bytes per bucket to reduce overhead,
+ // and need to balance having more bits in the decimal part (to ensure decay isn't too
+ // non-linear) with having too few bits in the mantissa, causing us to not store very many
+ // datapoints.
+ //
+ // The constants were picked experimentally, selecting a decay amount that restricts us
+ // from overflowing buckets without having to cap them manually.
+
+ let pos: u16 = amount_to_pos(liquidity_offset_msat, capacity_msat);
+ if pos < POSITION_TICKS {
+ for e in self.buckets.iter_mut() {
+ *e = ((*e as u32) * 2047 / 2048) as u16;
+ }
+ let bucket = pos_to_bucket(pos);
+ self.buckets[bucket] = self.buckets[bucket].saturating_add(BUCKET_FIXED_POINT_ONE);
+ }
+ }
+ /// Decay all buckets by the given number of half-lives. Used to more aggressively remove old
+ /// datapoints as we receive newer information.
+ #[inline]
+ pub(super) fn time_decay_data(&mut self, half_lives: u32) {
+ for e in self.buckets.iter_mut() {
+ *e = e.checked_shr(half_lives).unwrap_or(0);
+ }
+ }
+ }
+
+ impl_writeable_tlv_based!(HistoricalBucketRangeTracker, { (0, buckets, required) });
+ impl_writeable_tlv_based!(LegacyHistoricalBucketRangeTracker, { (0, buckets, required) });
+
+ /// A set of buckets representing the history of where we've seen the minimum- and maximum-
+ /// liquidity bounds for a given channel.
+ pub(super) struct HistoricalMinMaxBuckets<D: Deref<Target = HistoricalBucketRangeTracker>> {
+ /// Buckets tracking where and how often we've seen the minimum liquidity bound for a
+ /// channel.
+ pub(super) min_liquidity_offset_history: D,
+ /// Buckets tracking where and how often we've seen the maximum liquidity bound for a
+ /// channel.
+ pub(super) max_liquidity_offset_history: D,
+ }
+
+ impl<D: Deref<Target = HistoricalBucketRangeTracker>> HistoricalMinMaxBuckets<D> {
+ pub(super) fn get_decayed_buckets<T: Time>(&self, now: T, last_updated: T, half_life: Duration)
+ -> Option<([u16; 32], [u16; 32])> {
+ let (_, required_decays) = self.get_total_valid_points(now, last_updated, half_life)?;
+
+ let mut min_buckets = *self.min_liquidity_offset_history;
+ min_buckets.time_decay_data(required_decays);
+ let mut max_buckets = *self.max_liquidity_offset_history;
+ max_buckets.time_decay_data(required_decays);
+ Some((min_buckets.buckets, max_buckets.buckets))
+ }
+ #[inline]
+ pub(super) fn get_total_valid_points<T: Time>(&self, now: T, last_updated: T, half_life: Duration)
+ -> Option<(u64, u32)> {
+ let required_decays = now.duration_since(last_updated).as_secs()
+ .checked_div(half_life.as_secs())
+ .map_or(u32::max_value(), |decays| cmp::min(decays, u32::max_value() as u64) as u32);
+
+ let mut total_valid_points_tracked = 0;
+ for (min_idx, min_bucket) in self.min_liquidity_offset_history.buckets.iter().enumerate() {
+ for max_bucket in self.max_liquidity_offset_history.buckets.iter().take(32 - min_idx) {
+ total_valid_points_tracked += (*min_bucket as u64) * (*max_bucket as u64);
+ }
+ }
+
+ // If the total valid points is smaller than 1.0 (i.e. 32 in our fixed-point scheme),
+ // treat it as if we were fully decayed.
+ const FULLY_DECAYED: u16 = BUCKET_FIXED_POINT_ONE * BUCKET_FIXED_POINT_ONE;
+ if total_valid_points_tracked.checked_shr(required_decays).unwrap_or(0) < FULLY_DECAYED.into() {
+ return None;
+ }
+
+ Some((total_valid_points_tracked, required_decays))
+ }
+
+ #[inline]
+ pub(super) fn calculate_success_probability_times_billion<T: Time>(
+ &self, now: T, last_updated: T, half_life: Duration,
+ params: &ProbabilisticScoringFeeParameters, amount_msat: u64, capacity_msat: u64
+ ) -> Option<u64> {
+ // If historical penalties are enabled, we try to calculate a probability of success
+ // given our historical distribution of min- and max-liquidity bounds in a channel.
+ // To do so, we walk the set of historical liquidity bucket (min, max) combinations
+ // (where min_idx < max_idx, as having a minimum above our maximum is an invalid
+ // state). For each pair, we calculate the probability as if the bucket's corresponding
+ // min- and max- liquidity bounds were our current liquidity bounds and then multiply
+ // that probability by the weight of the selected buckets.
+ let payment_pos = amount_to_pos(amount_msat, capacity_msat);
+ if payment_pos >= POSITION_TICKS { return None; }
+
+ // Check if all our buckets are zero, once decayed and treat it as if we had no data. We
+ // don't actually use the decayed buckets, though, as that would lose precision.
+ let (total_valid_points_tracked, _)
+ = self.get_total_valid_points(now, last_updated, half_life)?;
+
+ let mut cumulative_success_prob_times_billion = 0;
+ // Special-case the 0th min bucket - it generally means we failed a payment, so only
+ // consider the highest (i.e. largest-offset-from-max-capacity) max bucket for all
+ // points against the 0th min bucket. This avoids the case where we fail to route
+ // increasingly lower values over a channel, but treat each failure as a separate
+ // datapoint, many of which may have relatively high maximum-available-liquidity
+ // values, which will result in us thinking we have some nontrivial probability of
+ // routing up to that amount.
+ if self.min_liquidity_offset_history.buckets[0] != 0 {
+ let mut highest_max_bucket_with_points = 0; // The highest max-bucket with any data
+ let mut total_max_points = 0; // Total points in max-buckets to consider
+ for (max_idx, max_bucket) in self.max_liquidity_offset_history.buckets.iter().enumerate() {
+ if *max_bucket >= BUCKET_FIXED_POINT_ONE {
+ highest_max_bucket_with_points = cmp::max(highest_max_bucket_with_points, max_idx);
+ }
+ total_max_points += *max_bucket as u64;
+ }
+ let max_bucket_end_pos = BUCKET_START_POS[32 - highest_max_bucket_with_points] - 1;
+ if payment_pos < max_bucket_end_pos {
+ let (numerator, denominator) = success_probability(payment_pos as u64, 0,
+ max_bucket_end_pos as u64, POSITION_TICKS as u64 - 1, params, true);
+ let bucket_prob_times_billion =
+ (self.min_liquidity_offset_history.buckets[0] as u64) * total_max_points
+ * 1024 * 1024 * 1024 / total_valid_points_tracked;
+ cumulative_success_prob_times_billion += bucket_prob_times_billion *
+ numerator / denominator;
+ }
+ }
+
+ for (min_idx, min_bucket) in self.min_liquidity_offset_history.buckets.iter().enumerate().skip(1) {
+ let min_bucket_start_pos = BUCKET_START_POS[min_idx];
+ for (max_idx, max_bucket) in self.max_liquidity_offset_history.buckets.iter().enumerate().take(32 - min_idx) {
+ let max_bucket_end_pos = BUCKET_START_POS[32 - max_idx] - 1;
+ // Note that this multiply can only barely not overflow - two 16 bit ints plus
+ // 30 bits is 62 bits.
+ let bucket_prob_times_billion = (*min_bucket as u64) * (*max_bucket as u64)
+ * 1024 * 1024 * 1024 / total_valid_points_tracked;
+ if payment_pos >= max_bucket_end_pos {
+ // Success probability 0, the payment amount may be above the max liquidity
+ break;
+ } else if payment_pos < min_bucket_start_pos {
+ cumulative_success_prob_times_billion += bucket_prob_times_billion;
+ } else {
+ let (numerator, denominator) = success_probability(payment_pos as u64,
+ min_bucket_start_pos as u64, max_bucket_end_pos as u64,
+ POSITION_TICKS as u64 - 1, params, true);
+ cumulative_success_prob_times_billion += bucket_prob_times_billion *
+ numerator / denominator;
+ }
+ }
+ }
+
+ Some(cumulative_success_prob_times_billion)
+ }
+ }
+}
+use bucketed_history::{LegacyHistoricalBucketRangeTracker, HistoricalBucketRangeTracker, HistoricalMinMaxBuckets};
+