Move to a constant for "bucket one" in the scoring buckets
[rust-lightning] / lightning / src / routing / scoring.rs
index eca5ee6058b7841398b07d8137cd9842c9ec8e2d..5fdbf9ae3a9c8eff91ba0948ec86d8f9f3f7bdec 100644 (file)
@@ -157,8 +157,11 @@ define_score!();
 ///
 /// [`find_route`]: crate::routing::router::find_route
 pub trait LockableScore<'a> {
+       /// The [`Score`] type.
+       type Score: 'a + Score;
+
        /// The locked [`Score`] type.
-       type Locked: 'a + Score;
+       type Locked: DerefMut<Target = Self::Score> + Sized;
 
        /// Returns the locked scorer.
        fn lock(&'a self) -> Self::Locked;
@@ -172,62 +175,38 @@ pub trait WriteableScore<'a>: LockableScore<'a> + Writeable {}
 
 #[cfg(not(c_bindings))]
 impl<'a, T> WriteableScore<'a> for T where T: LockableScore<'a> + Writeable {}
-/// This is not exported to bindings users
+#[cfg(not(c_bindings))]
 impl<'a, T: 'a + Score> LockableScore<'a> for Mutex<T> {
+       type Score = T;
        type Locked = MutexGuard<'a, T>;
 
-       fn lock(&'a self) -> MutexGuard<'a, T> {
+       fn lock(&'a self) -> Self::Locked {
                Mutex::lock(self).unwrap()
        }
 }
 
+#[cfg(not(c_bindings))]
 impl<'a, T: 'a + Score> LockableScore<'a> for RefCell<T> {
+       type Score = T;
        type Locked = RefMut<'a, T>;
 
-       fn lock(&'a self) -> RefMut<'a, T> {
+       fn lock(&'a self) -> Self::Locked {
                self.borrow_mut()
        }
 }
 
 #[cfg(c_bindings)]
 /// A concrete implementation of [`LockableScore`] which supports multi-threading.
-pub struct MultiThreadedLockableScore<S: Score> {
-       score: Mutex<S>,
-}
-#[cfg(c_bindings)]
-/// A locked `MultiThreadedLockableScore`.
-pub struct MultiThreadedScoreLock<'a, S: Score>(MutexGuard<'a, S>);
-#[cfg(c_bindings)]
-impl<'a, T: Score + 'a> Score for MultiThreadedScoreLock<'a, T> {
-       type ScoreParams = <T as Score>::ScoreParams;
-       fn channel_penalty_msat(&self, scid: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage, score_params: &Self::ScoreParams) -> u64 {
-               self.0.channel_penalty_msat(scid, source, target, usage, score_params)
-       }
-       fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64) {
-               self.0.payment_path_failed(path, short_channel_id)
-       }
-       fn payment_path_successful(&mut self, path: &Path) {
-               self.0.payment_path_successful(path)
-       }
-       fn probe_failed(&mut self, path: &Path, short_channel_id: u64) {
-               self.0.probe_failed(path, short_channel_id)
-       }
-       fn probe_successful(&mut self, path: &Path) {
-               self.0.probe_successful(path)
-       }
-}
-#[cfg(c_bindings)]
-impl<'a, T: Score + 'a> Writeable for MultiThreadedScoreLock<'a, T> {
-       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
-               self.0.write(writer)
-       }
+pub struct MultiThreadedLockableScore<T: Score> {
+       score: Mutex<T>,
 }
 
 #[cfg(c_bindings)]
-impl<'a, T: Score + 'a> LockableScore<'a> for MultiThreadedLockableScore<T> {
+impl<'a, T: 'a + Score> LockableScore<'a> for MultiThreadedLockableScore<T> {
+       type Score = T;
        type Locked = MultiThreadedScoreLock<'a, T>;
 
-       fn lock(&'a self) -> MultiThreadedScoreLock<'a, T> {
+       fn lock(&'a self) -> Self::Locked {
                MultiThreadedScoreLock(Mutex::lock(&self.score).unwrap())
        }
 }
@@ -240,7 +219,7 @@ impl<T: Score> Writeable for MultiThreadedLockableScore<T> {
 }
 
 #[cfg(c_bindings)]
-impl<'a, T: Score + 'a> WriteableScore<'a> for MultiThreadedLockableScore<T> {}
+impl<'a, T: 'a + Score> WriteableScore<'a> for MultiThreadedLockableScore<T> {}
 
 #[cfg(c_bindings)]
 impl<T: Score> MultiThreadedLockableScore<T> {
@@ -251,21 +230,34 @@ impl<T: Score> MultiThreadedLockableScore<T> {
 }
 
 #[cfg(c_bindings)]
-/// This is not exported to bindings users
-impl<'a, T: Writeable> Writeable for RefMut<'a, T> {
+/// A locked `MultiThreadedLockableScore`.
+pub struct MultiThreadedScoreLock<'a, T: Score>(MutexGuard<'a, T>);
+
+#[cfg(c_bindings)]
+impl<'a, T: 'a + Score> Writeable for MultiThreadedScoreLock<'a, T> {
        fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
-               T::write(&**self, writer)
+               self.0.write(writer)
        }
 }
 
 #[cfg(c_bindings)]
-/// This is not exported to bindings users
-impl<'a, S: Writeable> Writeable for MutexGuard<'a, S> {
-       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
-               S::write(&**self, writer)
-       }
+impl<'a, T: 'a + Score> DerefMut for MultiThreadedScoreLock<'a, T> {
+    fn deref_mut(&mut self) -> &mut Self::Target {
+        self.0.deref_mut()
+    }
 }
 
+#[cfg(c_bindings)]
+impl<'a, T: 'a + Score> Deref for MultiThreadedScoreLock<'a, T> {
+       type Target = T;
+
+    fn deref(&self) -> &Self::Target {
+        self.0.deref()
+    }
+}
+
+
+
 /// Proposed use of a channel passed as a parameter to [`Score::channel_penalty_msat`].
 #[derive(Clone, Copy, Debug, PartialEq)]
 pub struct ChannelUsage {
@@ -491,7 +483,7 @@ pub struct ProbabilisticScoringFeeParameters {
        pub manual_node_penalties: HashMap<NodeId, u64>,
 
        /// This penalty is applied when `htlc_maximum_msat` is equal to or larger than half of the
-       /// channel's capacity, (ie. htlc_maximum_msat  0.5 * channel_capacity) which makes us
+       /// channel's capacity, (ie. htlc_maximum_msat >= 0.5 * channel_capacity) which makes us
        /// prefer nodes with a smaller `htlc_maximum_msat`. We treat such nodes preferentially
        /// as this makes balance discovery attacks harder to execute, thereby creating an incentive
        /// to restrict `htlc_maximum_msat` and improve privacy.
@@ -649,147 +641,6 @@ impl ProbabilisticScoringDecayParameters {
        }
 }
 
-/// Tracks the historical state of a distribution as a weighted average of how much time was spent
-/// in each of 8 buckets.
-#[derive(Clone, Copy)]
-struct HistoricalBucketRangeTracker {
-       buckets: [u16; 8],
-}
-
-impl HistoricalBucketRangeTracker {
-       fn new() -> Self { Self { buckets: [0; 8] } }
-       fn track_datapoint(&mut self, liquidity_offset_msat: u64, capacity_msat: u64) {
-               // We have 8 leaky buckets for min and max liquidity. Each bucket tracks the amount of time
-               // we spend in each bucket as a 16-bit fixed-point number with a 5 bit fractional part.
-               //
-               // Each time we update our liquidity estimate, we add 32 (1.0 in our fixed-point system) to
-               // the buckets for the current min and max liquidity offset positions.
-               //
-               // We then decay each bucket by multiplying by 2047/2048 (avoiding dividing by a
-               // non-power-of-two). This ensures we can't actually overflow the u16 - when we get to
-               // 63,457 adding 32 and decaying by 2047/2048 leaves us back at 63,457.
-               //
-               // In total, this allows us to track data for the last 8,000 or so payments across a given
-               // channel.
-               //
-               // These constants are a balance - we try to fit in 2 bytes per bucket to reduce overhead,
-               // and need to balance having more bits in the decimal part (to ensure decay isn't too
-               // non-linear) with having too few bits in the mantissa, causing us to not store very many
-               // datapoints.
-               //
-               // The constants were picked experimentally, selecting a decay amount that restricts us
-               // from overflowing buckets without having to cap them manually.
-
-               // Ensure the bucket index is in the range [0, 7], even if the liquidity offset is zero or
-               // the channel's capacity, though the second should generally never happen.
-               debug_assert!(liquidity_offset_msat <= capacity_msat);
-               let bucket_idx: u8 = (liquidity_offset_msat * 8 / capacity_msat.saturating_add(1))
-                       .try_into().unwrap_or(32); // 32 is bogus for 8 buckets, and will be ignored
-               debug_assert!(bucket_idx < 8);
-               if bucket_idx < 8 {
-                       for e in self.buckets.iter_mut() {
-                               *e = ((*e as u32) * 2047 / 2048) as u16;
-                       }
-                       self.buckets[bucket_idx as usize] = self.buckets[bucket_idx as usize].saturating_add(32);
-               }
-       }
-       /// Decay all buckets by the given number of half-lives. Used to more aggressively remove old
-       /// datapoints as we receive newer information.
-       fn time_decay_data(&mut self, half_lives: u32) {
-               for e in self.buckets.iter_mut() {
-                       *e = e.checked_shr(half_lives).unwrap_or(0);
-               }
-       }
-}
-
-impl_writeable_tlv_based!(HistoricalBucketRangeTracker, { (0, buckets, required) });
-
-struct HistoricalMinMaxBuckets<'a> {
-       min_liquidity_offset_history: &'a HistoricalBucketRangeTracker,
-       max_liquidity_offset_history: &'a HistoricalBucketRangeTracker,
-}
-
-impl HistoricalMinMaxBuckets<'_> {
-       #[inline]
-       fn get_decayed_buckets<T: Time>(&self, now: T, last_updated: T, half_life: Duration)
-       -> ([u16; 8], [u16; 8], u32) {
-               let required_decays = now.duration_since(last_updated).as_secs()
-                       .checked_div(half_life.as_secs())
-                       .map_or(u32::max_value(), |decays| cmp::min(decays, u32::max_value() as u64) as u32);
-               let mut min_buckets = *self.min_liquidity_offset_history;
-               min_buckets.time_decay_data(required_decays);
-               let mut max_buckets = *self.max_liquidity_offset_history;
-               max_buckets.time_decay_data(required_decays);
-               (min_buckets.buckets, max_buckets.buckets, required_decays)
-       }
-
-       #[inline]
-       fn calculate_success_probability_times_billion<T: Time>(
-               &self, now: T, last_updated: T, half_life: Duration, payment_amt_64th_bucket: u8)
-       -> Option<u64> {
-               // If historical penalties are enabled, calculate the penalty by walking the set of
-               // historical liquidity bucket (min, max) combinations (where min_idx < max_idx) and, for
-               // each, calculate the probability of success given our payment amount, then total the
-               // weighted average probability of success.
-               //
-               // We use a sliding scale to decide which point within a given bucket will be compared to
-               // the amount being sent - for lower-bounds, the amount being sent is compared to the lower
-               // edge of the first bucket (i.e. zero), but compared to the upper 7/8ths of the last
-               // bucket (i.e. 9 times the index, or 63), with each bucket in between increasing the
-               // comparison point by 1/64th. For upper-bounds, the same applies, however with an offset
-               // of 1/64th (i.e. starting at one and ending at 64). This avoids failing to assign
-               // penalties to channels at the edges.
-               //
-               // If we used the bottom edge of buckets, we'd end up never assigning any penalty at all to
-               // such a channel when sending less than ~0.19% of the channel's capacity (e.g. ~200k sats
-               // for a 1 BTC channel!).
-               //
-               // If we used the middle of each bucket we'd never assign any penalty at all when sending
-               // less than 1/16th of a channel's capacity, or 1/8th if we used the top of the bucket.
-               let mut total_valid_points_tracked = 0;
-
-               // Check if all our buckets are zero, once decayed and treat it as if we had no data. We
-               // don't actually use the decayed buckets, though, as that would lose precision.
-               let (decayed_min_buckets, decayed_max_buckets, required_decays) =
-                       self.get_decayed_buckets(now, last_updated, half_life);
-               if decayed_min_buckets.iter().all(|v| *v == 0) || decayed_max_buckets.iter().all(|v| *v == 0) {
-                       return None;
-               }
-
-               for (min_idx, min_bucket) in self.min_liquidity_offset_history.buckets.iter().enumerate() {
-                       for max_bucket in self.max_liquidity_offset_history.buckets.iter().take(8 - min_idx) {
-                               total_valid_points_tracked += (*min_bucket as u64) * (*max_bucket as u64);
-                       }
-               }
-               // If the total valid points is smaller than 1.0 (i.e. 32 in our fixed-point scheme), treat
-               // it as if we were fully decayed.
-               if total_valid_points_tracked.checked_shr(required_decays).unwrap_or(0) < 32*32 {
-                       return None;
-               }
-
-               let mut cumulative_success_prob_times_billion = 0;
-               for (min_idx, min_bucket) in self.min_liquidity_offset_history.buckets.iter().enumerate() {
-                       for (max_idx, max_bucket) in self.max_liquidity_offset_history.buckets.iter().enumerate().take(8 - min_idx) {
-                               let bucket_prob_times_million = (*min_bucket as u64) * (*max_bucket as u64)
-                                       * 1024 * 1024 / total_valid_points_tracked;
-                               let min_64th_bucket = min_idx as u8 * 9;
-                               let max_64th_bucket = (7 - max_idx as u8) * 9 + 1;
-                               if payment_amt_64th_bucket > max_64th_bucket {
-                                       // Success probability 0, the payment amount is above the max liquidity
-                               } else if payment_amt_64th_bucket <= min_64th_bucket {
-                                       cumulative_success_prob_times_billion += bucket_prob_times_million * 1024;
-                               } else {
-                                       cumulative_success_prob_times_billion += bucket_prob_times_million *
-                                               ((max_64th_bucket - payment_amt_64th_bucket) as u64) * 1024 /
-                                               ((max_64th_bucket - min_64th_bucket) as u64);
-                               }
-                       }
-               }
-
-               Some(cumulative_success_prob_times_billion)
-       }
-}
-
 /// Accounting for channel liquidity balance uncertainty.
 ///
 /// Direction is defined in terms of [`NodeId`] partial ordering, where the source node is the
@@ -814,8 +665,7 @@ struct ChannelLiquidity<T: Time> {
 struct DirectedChannelLiquidity<L: Deref<Target = u64>, BRT: Deref<Target = HistoricalBucketRangeTracker>, T: Time, U: Deref<Target = T>> {
        min_liquidity_offset_msat: L,
        max_liquidity_offset_msat: L,
-       min_liquidity_offset_history: BRT,
-       max_liquidity_offset_history: BRT,
+       liquidity_history: HistoricalMinMaxBuckets<BRT>,
        inflight_htlc_msat: u64,
        capacity_msat: u64,
        last_updated: U,
@@ -856,24 +706,36 @@ impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> ProbabilisticScorerU
                                                let amt = directed_info.effective_capacity().as_msat();
                                                let dir_liq = liq.as_directed(source, target, 0, amt, self.decay_params);
 
-                                               let buckets = HistoricalMinMaxBuckets {
-                                                       min_liquidity_offset_history: &dir_liq.min_liquidity_offset_history,
-                                                       max_liquidity_offset_history: &dir_liq.max_liquidity_offset_history,
-                                               };
-                                               let (min_buckets, max_buckets, _) = buckets.get_decayed_buckets(now,
-                                                       *dir_liq.last_updated, self.decay_params.historical_no_updates_half_life);
+                                               let (min_buckets, max_buckets) = dir_liq.liquidity_history
+                                                       .get_decayed_buckets(now, *dir_liq.last_updated,
+                                                               self.decay_params.historical_no_updates_half_life)
+                                                       .unwrap_or(([0; 32], [0; 32]));
 
                                                log_debug!(self.logger, core::concat!(
                                                        "Liquidity from {} to {} via {} is in the range ({}, {}).\n",
-                                                       "\tHistorical min liquidity octile relative probabilities: {} {} {} {} {} {} {} {}\n",
-                                                       "\tHistorical max liquidity octile relative probabilities: {} {} {} {} {} {} {} {}"),
+                                                       "\tHistorical min liquidity bucket relative probabilities:\n",
+                                                       "\t\t{} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {}\n",
+                                                       "\tHistorical max liquidity bucket relative probabilities:\n",
+                                                       "\t\t{} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {}"),
                                                        source, target, scid, dir_liq.min_liquidity_msat(), dir_liq.max_liquidity_msat(),
-                                                       min_buckets[0], min_buckets[1], min_buckets[2], min_buckets[3],
-                                                       min_buckets[4], min_buckets[5], min_buckets[6], min_buckets[7],
+                                                       min_buckets[ 0], min_buckets[ 1], min_buckets[ 2], min_buckets[ 3],
+                                                       min_buckets[ 4], min_buckets[ 5], min_buckets[ 6], min_buckets[ 7],
+                                                       min_buckets[ 8], min_buckets[ 9], min_buckets[10], min_buckets[11],
+                                                       min_buckets[12], min_buckets[13], min_buckets[14], min_buckets[15],
+                                                       min_buckets[16], min_buckets[17], min_buckets[18], min_buckets[19],
+                                                       min_buckets[20], min_buckets[21], min_buckets[22], min_buckets[23],
+                                                       min_buckets[24], min_buckets[25], min_buckets[26], min_buckets[27],
+                                                       min_buckets[28], min_buckets[29], min_buckets[30], min_buckets[31],
                                                        // Note that the liquidity buckets are an offset from the edge, so we
                                                        // inverse the max order to get the probabilities from zero.
-                                                       max_buckets[7], max_buckets[6], max_buckets[5], max_buckets[4],
-                                                       max_buckets[3], max_buckets[2], max_buckets[1], max_buckets[0]);
+                                                       max_buckets[31], max_buckets[30], max_buckets[29], max_buckets[28],
+                                                       max_buckets[27], max_buckets[26], max_buckets[25], max_buckets[24],
+                                                       max_buckets[23], max_buckets[22], max_buckets[21], max_buckets[20],
+                                                       max_buckets[19], max_buckets[18], max_buckets[17], max_buckets[16],
+                                                       max_buckets[15], max_buckets[14], max_buckets[13], max_buckets[12],
+                                                       max_buckets[11], max_buckets[10], max_buckets[ 9], max_buckets[ 8],
+                                                       max_buckets[ 7], max_buckets[ 6], max_buckets[ 5], max_buckets[ 4],
+                                                       max_buckets[ 3], max_buckets[ 2], max_buckets[ 1], max_buckets[ 0]);
                                        } else {
                                                log_debug!(self.logger, "No amount known for SCID {} from {:?} to {:?}", scid, source, target);
                                        }
@@ -907,26 +769,31 @@ impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> ProbabilisticScorerU
        /// Query the historical estimated minimum and maximum liquidity available for sending a
        /// payment over the channel with `scid` towards the given `target` node.
        ///
-       /// Returns two sets of 8 buckets. The first set describes the octiles for lower-bound
-       /// liquidity estimates, the second set describes the octiles for upper-bound liquidity
-       /// estimates. Each bucket describes the relative frequency at which we've seen a liquidity
-       /// bound in the octile relative to the channel's total capacity, on an arbitrary scale.
-       /// Because the values are slowly decayed, more recent data points are weighted more heavily
-       /// than older datapoints.
+       /// Returns two sets of 32 buckets. The first set describes the lower-bound liquidity history,
+       /// the second set describes the upper-bound liquidity history. Each bucket describes the
+       /// relative frequency at which we've seen a liquidity bound in the bucket's range relative to
+       /// the channel's total capacity, on an arbitrary scale. Because the values are slowly decayed,
+       /// more recent data points are weighted more heavily than older datapoints.
+       ///
+       /// Note that the range of each bucket varies by its location to provide more granular results
+       /// at the edges of a channel's capacity, where it is more likely to sit.
        ///
-       /// When scoring, the estimated probability that an upper-/lower-bound lies in a given octile
-       /// relative to the channel's total capacity is calculated by dividing that bucket's value with
-       /// the total of all buckets for the given bound.
+       /// When scoring, the estimated probability that an upper-/lower-bound lies in a given bucket
+       /// is calculated by dividing that bucket's value with the total value of all buckets.
        ///
-       /// For example, a value of `[0, 0, 0, 0, 0, 0, 32]` indicates that we believe the probability
-       /// of a bound being in the top octile to be 100%, and have never (recently) seen it in any
-       /// other octiles. A value of `[31, 0, 0, 0, 0, 0, 0, 32]` indicates we've seen the bound being
-       /// both in the top and bottom octile, and roughly with similar (recent) frequency.
+       /// For example, using a lower bucket count for illustrative purposes, a value of
+       /// `[0, 0, 0, ..., 0, 32]` indicates that we believe the probability of a bound being very
+       /// close to the channel's capacity to be 100%, and have never (recently) seen it in any other
+       /// bucket. A value of `[31, 0, 0, ..., 0, 0, 32]` indicates we've seen the bound being both
+       /// in the top and bottom bucket, and roughly with similar (recent) frequency.
        ///
        /// Because the datapoints are decayed slowly over time, values will eventually return to
-       /// `Some(([0; 8], [0; 8]))`.
+       /// `Some(([1; 32], [1; 32]))` and then to `None` once no datapoints remain.
+       ///
+       /// In order to fetch a single success probability from the buckets provided here, as used in
+       /// the scoring model, see [`Self::historical_estimated_payment_success_probability`].
        pub fn historical_estimated_channel_liquidity_probabilities(&self, scid: u64, target: &NodeId)
-       -> Option<([u16; 8], [u16; 8])> {
+       -> Option<([u16; 32], [u16; 32])> {
                let graph = self.network_graph.read_only();
 
                if let Some(chan) = graph.channels().get(&scid) {
@@ -935,12 +802,12 @@ impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> ProbabilisticScorerU
                                        let amt = directed_info.effective_capacity().as_msat();
                                        let dir_liq = liq.as_directed(source, target, 0, amt, self.decay_params);
 
-                                       let buckets = HistoricalMinMaxBuckets {
-                                               min_liquidity_offset_history: &dir_liq.min_liquidity_offset_history,
-                                               max_liquidity_offset_history: &dir_liq.max_liquidity_offset_history,
-                                       };
-                                       let (min_buckets, mut max_buckets, _) = buckets.get_decayed_buckets(T::now(),
-                                               *dir_liq.last_updated, self.decay_params.historical_no_updates_half_life);
+                                       let (min_buckets, mut max_buckets) =
+                                               dir_liq.liquidity_history.get_decayed_buckets(
+                                                       dir_liq.now, *dir_liq.last_updated,
+                                                       self.decay_params.historical_no_updates_half_life
+                                               )?;
+
                                        // Note that the liquidity buckets are an offset from the edge, so we inverse
                                        // the max order to get the probabilities from zero.
                                        max_buckets.reverse();
@@ -950,6 +817,34 @@ impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> ProbabilisticScorerU
                }
                None
        }
+
+       /// Query the probability of payment success sending the given `amount_msat` over the channel
+       /// with `scid` towards the given `target` node, based on the historical estimated liquidity
+       /// bounds.
+       ///
+       /// These are the same bounds as returned by
+       /// [`Self::historical_estimated_channel_liquidity_probabilities`] (but not those returned by
+       /// [`Self::estimated_channel_liquidity_range`]).
+       pub fn historical_estimated_payment_success_probability(
+               &self, scid: u64, target: &NodeId, amount_msat: u64)
+       -> Option<f64> {
+               let graph = self.network_graph.read_only();
+
+               if let Some(chan) = graph.channels().get(&scid) {
+                       if let Some(liq) = self.channel_liquidities.get(&scid) {
+                               if let Some((directed_info, source)) = chan.as_directed_to(target) {
+                                       let capacity_msat = directed_info.effective_capacity().as_msat();
+                                       let dir_liq = liq.as_directed(source, target, 0, capacity_msat, self.decay_params);
+
+                                       return dir_liq.liquidity_history.calculate_success_probability_times_billion(
+                                               dir_liq.now, *dir_liq.last_updated,
+                                               self.decay_params.historical_no_updates_half_life, amount_msat, capacity_msat
+                                       ).map(|p| p as f64 / (1024 * 1024 * 1024) as f64);
+                               }
+                       }
+               }
+               None
+       }
 }
 
 impl<T: Time> ChannelLiquidity<T> {
@@ -981,8 +876,10 @@ impl<T: Time> ChannelLiquidity<T> {
                DirectedChannelLiquidity {
                        min_liquidity_offset_msat,
                        max_liquidity_offset_msat,
-                       min_liquidity_offset_history,
-                       max_liquidity_offset_history,
+                       liquidity_history: HistoricalMinMaxBuckets {
+                               min_liquidity_offset_history,
+                               max_liquidity_offset_history,
+                       },
                        inflight_htlc_msat,
                        capacity_msat,
                        last_updated: &self.last_updated,
@@ -1008,8 +905,10 @@ impl<T: Time> ChannelLiquidity<T> {
                DirectedChannelLiquidity {
                        min_liquidity_offset_msat,
                        max_liquidity_offset_msat,
-                       min_liquidity_offset_history,
-                       max_liquidity_offset_history,
+                       liquidity_history: HistoricalMinMaxBuckets {
+                               min_liquidity_offset_history,
+                               max_liquidity_offset_history,
+                       },
                        inflight_htlc_msat,
                        capacity_msat,
                        last_updated: &mut self.last_updated,
@@ -1035,6 +934,7 @@ impl<L: Deref<Target = u64>, BRT: Deref<Target = HistoricalBucketRangeTracker>,
        /// Returns a liquidity penalty for routing the given HTLC `amount_msat` through the channel in
        /// this direction.
        fn penalty_msat(&self, amount_msat: u64, score_params: &ProbabilisticScoringFeeParameters) -> u64 {
+               let available_capacity = self.available_capacity();
                let max_liquidity_msat = self.max_liquidity_msat();
                let min_liquidity_msat = core::cmp::min(self.min_liquidity_msat(), max_liquidity_msat);
 
@@ -1066,28 +966,20 @@ impl<L: Deref<Target = u64>, BRT: Deref<Target = HistoricalBucketRangeTracker>,
                        }
                };
 
+               if amount_msat >= available_capacity {
+                       // We're trying to send more than the capacity, use a max penalty.
+                       res = res.saturating_add(Self::combined_penalty_msat(amount_msat,
+                               NEGATIVE_LOG10_UPPER_BOUND * 2048,
+                               score_params.historical_liquidity_penalty_multiplier_msat,
+                               score_params.historical_liquidity_penalty_amount_multiplier_msat));
+                       return res;
+               }
+
                if score_params.historical_liquidity_penalty_multiplier_msat != 0 ||
                   score_params.historical_liquidity_penalty_amount_multiplier_msat != 0 {
-                       let payment_amt_64th_bucket = if amount_msat < u64::max_value() / 64 {
-                               amount_msat * 64 / self.capacity_msat.saturating_add(1)
-                       } else {
-                               // Only use 128-bit arithmetic when multiplication will overflow to avoid 128-bit
-                               // division. This branch should only be hit in fuzz testing since the amount would
-                               // need to be over 2.88 million BTC in practice.
-                               ((amount_msat as u128) * 64 / (self.capacity_msat as u128).saturating_add(1))
-                                       .try_into().unwrap_or(65)
-                       };
-                       #[cfg(not(fuzzing))]
-                       debug_assert!(payment_amt_64th_bucket <= 64);
-                       if payment_amt_64th_bucket > 64 { return res; }
-
-                       let buckets = HistoricalMinMaxBuckets {
-                               min_liquidity_offset_history: &self.min_liquidity_offset_history,
-                               max_liquidity_offset_history: &self.max_liquidity_offset_history,
-                       };
-                       if let Some(cumulative_success_prob_times_billion) = buckets
+                       if let Some(cumulative_success_prob_times_billion) = self.liquidity_history
                                .calculate_success_probability_times_billion(self.now, *self.last_updated,
-                                       self.decay_params.historical_no_updates_half_life, payment_amt_64th_bucket as u8)
+                                       self.decay_params.historical_no_updates_half_life, amount_msat, self.capacity_msat)
                        {
                                let historical_negative_log10_times_2048 = approx::negative_log10_times_2048(cumulative_success_prob_times_billion + 1, 1024 * 1024 * 1024);
                                res = res.saturating_add(Self::combined_penalty_msat(amount_msat,
@@ -1113,15 +1005,15 @@ impl<L: Deref<Target = u64>, BRT: Deref<Target = HistoricalBucketRangeTracker>,
 
        /// Computes the liquidity penalty from the penalty multipliers.
        #[inline(always)]
-       fn combined_penalty_msat(amount_msat: u64, negative_log10_times_2048: u64,
+       fn combined_penalty_msat(amount_msat: u64, mut negative_log10_times_2048: u64,
                liquidity_penalty_multiplier_msat: u64, liquidity_penalty_amount_multiplier_msat: u64,
        ) -> u64 {
-               let liquidity_penalty_msat = {
-                       // Upper bound the liquidity penalty to ensure some channel is selected.
-                       let multiplier_msat = liquidity_penalty_multiplier_msat;
-                       let max_penalty_msat = multiplier_msat.saturating_mul(NEGATIVE_LOG10_UPPER_BOUND);
-                       (negative_log10_times_2048.saturating_mul(multiplier_msat) / 2048).min(max_penalty_msat)
-               };
+               negative_log10_times_2048 =
+                       negative_log10_times_2048.min(NEGATIVE_LOG10_UPPER_BOUND * 2048);
+
+               // Upper bound the liquidity penalty to ensure some channel is selected.
+               let liquidity_penalty_msat = negative_log10_times_2048
+                       .saturating_mul(liquidity_penalty_multiplier_msat) / 2048;
                let amount_penalty_msat = negative_log10_times_2048
                        .saturating_mul(liquidity_penalty_amount_multiplier_msat)
                        .saturating_mul(amount_msat) / 2048 / AMOUNT_PENALTY_DIVISOR;
@@ -1130,26 +1022,44 @@ impl<L: Deref<Target = u64>, BRT: Deref<Target = HistoricalBucketRangeTracker>,
        }
 
        /// Returns the lower bound of the channel liquidity balance in this direction.
+       #[inline(always)]
        fn min_liquidity_msat(&self) -> u64 {
                self.decayed_offset_msat(*self.min_liquidity_offset_msat)
        }
 
        /// Returns the upper bound of the channel liquidity balance in this direction.
+       #[inline(always)]
        fn max_liquidity_msat(&self) -> u64 {
                self.available_capacity()
                        .saturating_sub(self.decayed_offset_msat(*self.max_liquidity_offset_msat))
        }
 
        /// Returns the capacity minus the in-flight HTLCs in this direction.
+       #[inline(always)]
        fn available_capacity(&self) -> u64 {
                self.capacity_msat.saturating_sub(self.inflight_htlc_msat)
        }
 
        fn decayed_offset_msat(&self, offset_msat: u64) -> u64 {
-               self.now.duration_since(*self.last_updated).as_secs()
-                       .checked_div(self.decay_params.liquidity_offset_half_life.as_secs())
-                       .and_then(|decays| offset_msat.checked_shr(decays as u32))
-                       .unwrap_or(0)
+               let half_life = self.decay_params.liquidity_offset_half_life.as_secs();
+               if half_life != 0 {
+                       // Decay the offset by the appropriate number of half lives. If half of the next half
+                       // life has passed, approximate an additional three-quarter life to help smooth out the
+                       // decay.
+                       let elapsed_time = self.now.duration_since(*self.last_updated).as_secs();
+                       let half_decays = elapsed_time / (half_life / 2);
+                       let decays = half_decays / 2;
+                       let decayed_offset_msat = offset_msat.checked_shr(decays as u32).unwrap_or(0);
+                       if half_decays % 2 == 0 {
+                               decayed_offset_msat
+                       } else {
+                               // 11_585 / 16_384 ~= core::f64::consts::FRAC_1_SQRT_2
+                               // 16_384 == 2^14
+                               (decayed_offset_msat as u128 * 11_585 / 16_384) as u64
+                       }
+               } else {
+                       0
+               }
        }
 }
 
@@ -1164,7 +1074,7 @@ impl<L: DerefMut<Target = u64>, BRT: DerefMut<Target = HistoricalBucketRangeTrac
                        log_trace!(logger, "Max liquidity of {} is {} (already less than or equal to {})",
                                chan_descr, existing_max_msat, amount_msat);
                }
-               self.update_history_buckets();
+               self.update_history_buckets(0);
        }
 
        /// Adjusts the channel liquidity balance bounds when failing to route `amount_msat` downstream.
@@ -1177,7 +1087,7 @@ impl<L: DerefMut<Target = u64>, BRT: DerefMut<Target = HistoricalBucketRangeTrac
                        log_trace!(logger, "Min liquidity of {} is {} (already greater than or equal to {})",
                                chan_descr, existing_min_msat, amount_msat);
                }
-               self.update_history_buckets();
+               self.update_history_buckets(0);
        }
 
        /// Adjusts the channel liquidity balance bounds when successfully routing `amount_msat`.
@@ -1185,23 +1095,27 @@ impl<L: DerefMut<Target = u64>, BRT: DerefMut<Target = HistoricalBucketRangeTrac
                let max_liquidity_msat = self.max_liquidity_msat().checked_sub(amount_msat).unwrap_or(0);
                log_debug!(logger, "Subtracting {} from max liquidity of {} (setting it to {})", amount_msat, chan_descr, max_liquidity_msat);
                self.set_max_liquidity_msat(max_liquidity_msat);
-               self.update_history_buckets();
+               self.update_history_buckets(amount_msat);
        }
 
-       fn update_history_buckets(&mut self) {
+       /// Updates the history buckets for this channel. Because the history buckets track what we now
+       /// know about the channel's state *prior to our payment* (i.e. what we assume is "steady
+       /// state"), we allow the caller to set an offset applied to our liquidity bounds which
+       /// represents the amount of the successful payment we just made.
+       fn update_history_buckets(&mut self, bucket_offset_msat: u64) {
                let half_lives = self.now.duration_since(*self.last_updated).as_secs()
                        .checked_div(self.decay_params.historical_no_updates_half_life.as_secs())
                        .map(|v| v.try_into().unwrap_or(u32::max_value())).unwrap_or(u32::max_value());
-               self.min_liquidity_offset_history.time_decay_data(half_lives);
-               self.max_liquidity_offset_history.time_decay_data(half_lives);
+               self.liquidity_history.min_liquidity_offset_history.time_decay_data(half_lives);
+               self.liquidity_history.max_liquidity_offset_history.time_decay_data(half_lives);
 
                let min_liquidity_offset_msat = self.decayed_offset_msat(*self.min_liquidity_offset_msat);
-               self.min_liquidity_offset_history.track_datapoint(
-                       min_liquidity_offset_msat, self.capacity_msat
+               self.liquidity_history.min_liquidity_offset_history.track_datapoint(
+                       min_liquidity_offset_msat + bucket_offset_msat, self.capacity_msat
                );
                let max_liquidity_offset_msat = self.decayed_offset_msat(*self.max_liquidity_offset_msat);
-               self.max_liquidity_offset_history.track_datapoint(
-                       max_liquidity_offset_msat, self.capacity_msat
+               self.liquidity_history.max_liquidity_offset_history.track_datapoint(
+                       max_liquidity_offset_msat.saturating_sub(bucket_offset_msat), self.capacity_msat
                );
        }
 
@@ -1656,6 +1570,294 @@ mod approx {
        }
 }
 
+mod bucketed_history {
+       use super::*;
+
+       // Because liquidity is often skewed heavily in one direction, we store historical state
+       // distribution in buckets of different size. For backwards compatibility, buckets of size 1/8th
+       // must fit evenly into the buckets here.
+       //
+       // The smallest bucket is 2^-14th of the channel, for each of our 32 buckets here we define the
+       // width of the bucket in 2^14'ths of the channel. This increases exponentially until we reach
+       // a full 16th of the channel's capacity, which is reapeated a few times for backwards
+       // compatibility. The four middle buckets represent full octiles of the channel's capacity.
+       //
+       // For a 1 BTC channel, this let's us differentiate between failures in the bottom 6k sats, or
+       // between the 12,000th sat and 24,000th sat, while only needing to store and operate on 32
+       // buckets in total.
+
+       const BUCKET_START_POS: [u16; 33] = [
+               0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 3072, 4096, 6144, 8192, 10240, 12288,
+               13312, 14336, 15360, 15872, 16128, 16256, 16320, 16352, 16368, 16376, 16380, 16382, 16383, 16384,
+       ];
+
+       const LEGACY_TO_BUCKET_RANGE: [(u8, u8); 8] = [
+               (0, 12), (12, 14), (14, 15), (15, 16), (16, 17), (17, 18), (18, 20), (20, 32)
+       ];
+
+       const POSITION_TICKS: u16 = 1 << 14;
+
+       fn pos_to_bucket(pos: u16) -> usize {
+               for bucket in 0..32 {
+                       if pos < BUCKET_START_POS[bucket + 1] {
+                               return bucket;
+                       }
+               }
+               debug_assert!(false);
+               return 32;
+       }
+
+       #[cfg(test)]
+       #[test]
+       fn check_bucket_maps() {
+               const BUCKET_WIDTH_IN_16384S: [u16; 32] = [
+                       1, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 1024, 1024, 2048, 2048,
+                       2048, 2048, 1024, 1024, 1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1, 1];
+
+               let mut min_size_iter = 0;
+               let mut legacy_bucket_iter = 0;
+               for (bucket, width) in BUCKET_WIDTH_IN_16384S.iter().enumerate() {
+                       assert_eq!(BUCKET_START_POS[bucket], min_size_iter);
+                       for i in 0..*width {
+                               assert_eq!(pos_to_bucket(min_size_iter + i) as usize, bucket);
+                       }
+                       min_size_iter += *width;
+                       if min_size_iter % (POSITION_TICKS / 8) == 0 {
+                               assert_eq!(LEGACY_TO_BUCKET_RANGE[legacy_bucket_iter].1 as usize, bucket + 1);
+                               if legacy_bucket_iter + 1 < 8 {
+                                       assert_eq!(LEGACY_TO_BUCKET_RANGE[legacy_bucket_iter + 1].0 as usize, bucket + 1);
+                               }
+                               legacy_bucket_iter += 1;
+                       }
+               }
+               assert_eq!(BUCKET_START_POS[32], POSITION_TICKS);
+               assert_eq!(min_size_iter, POSITION_TICKS);
+       }
+
+       #[inline]
+       fn amount_to_pos(amount_msat: u64, capacity_msat: u64) -> u16 {
+               let pos = if amount_msat < u64::max_value() / (POSITION_TICKS as u64) {
+                       (amount_msat * (POSITION_TICKS as u64) / capacity_msat.saturating_add(1))
+                               .try_into().unwrap_or(POSITION_TICKS)
+               } else {
+                       // Only use 128-bit arithmetic when multiplication will overflow to avoid 128-bit
+                       // division. This branch should only be hit in fuzz testing since the amount would
+                       // need to be over 2.88 million BTC in practice.
+                       ((amount_msat as u128) * (POSITION_TICKS as u128)
+                                       / (capacity_msat as u128).saturating_add(1))
+                               .try_into().unwrap_or(POSITION_TICKS)
+               };
+               // If we are running in a client that doesn't validate gossip, its possible for a channel's
+               // capacity to change due to a `channel_update` message which, if received while a payment
+               // is in-flight, could cause this to fail. Thus, we only assert in test.
+               #[cfg(test)]
+               debug_assert!(pos < POSITION_TICKS);
+               pos
+       }
+
+       /// Prior to LDK 0.0.117 we used eight buckets which were split evenly across the either
+       /// octiles. This was changed to use 32 buckets for accuracy reasons in 0.0.117, however we
+       /// support reading the legacy values here for backwards compatibility.
+       pub(super) struct LegacyHistoricalBucketRangeTracker {
+               buckets: [u16; 8],
+       }
+
+       impl LegacyHistoricalBucketRangeTracker {
+               pub(crate) fn into_current(&self) -> HistoricalBucketRangeTracker {
+                       let mut buckets = [0; 32];
+                       for (idx, legacy_bucket) in self.buckets.iter().enumerate() {
+                               let mut new_val = *legacy_bucket;
+                               let (start, end) = LEGACY_TO_BUCKET_RANGE[idx];
+                               new_val /= (end - start) as u16;
+                               for i in start..end {
+                                       buckets[i as usize] = new_val;
+                               }
+                       }
+                       HistoricalBucketRangeTracker { buckets }
+               }
+       }
+
+       /// Tracks the historical state of a distribution as a weighted average of how much time was spent
+       /// in each of 32 buckets.
+       #[derive(Clone, Copy)]
+       pub(super) struct HistoricalBucketRangeTracker {
+               buckets: [u16; 32],
+       }
+
+       /// Buckets are stored in fixed point numbers with a 5 bit fractional part. Thus, the value
+       /// "one" is 32, or this constant.
+       pub const BUCKET_FIXED_POINT_ONE: u16 = 32;
+
+       impl HistoricalBucketRangeTracker {
+               pub(super) fn new() -> Self { Self { buckets: [0; 32] } }
+               pub(super) fn track_datapoint(&mut self, liquidity_offset_msat: u64, capacity_msat: u64) {
+                       // We have 32 leaky buckets for min and max liquidity. Each bucket tracks the amount of time
+                       // we spend in each bucket as a 16-bit fixed-point number with a 5 bit fractional part.
+                       //
+                       // Each time we update our liquidity estimate, we add 32 (1.0 in our fixed-point system) to
+                       // the buckets for the current min and max liquidity offset positions.
+                       //
+                       // We then decay each bucket by multiplying by 2047/2048 (avoiding dividing by a
+                       // non-power-of-two). This ensures we can't actually overflow the u16 - when we get to
+                       // 63,457 adding 32 and decaying by 2047/2048 leaves us back at 63,457.
+                       //
+                       // In total, this allows us to track data for the last 8,000 or so payments across a given
+                       // channel.
+                       //
+                       // These constants are a balance - we try to fit in 2 bytes per bucket to reduce overhead,
+                       // and need to balance having more bits in the decimal part (to ensure decay isn't too
+                       // non-linear) with having too few bits in the mantissa, causing us to not store very many
+                       // datapoints.
+                       //
+                       // The constants were picked experimentally, selecting a decay amount that restricts us
+                       // from overflowing buckets without having to cap them manually.
+
+                       let pos: u16 = amount_to_pos(liquidity_offset_msat, capacity_msat);
+                       if pos < POSITION_TICKS {
+                               for e in self.buckets.iter_mut() {
+                                       *e = ((*e as u32) * 2047 / 2048) as u16;
+                               }
+                               let bucket = pos_to_bucket(pos);
+                               self.buckets[bucket] = self.buckets[bucket].saturating_add(BUCKET_FIXED_POINT_ONE);
+                       }
+               }
+               /// Decay all buckets by the given number of half-lives. Used to more aggressively remove old
+               /// datapoints as we receive newer information.
+               #[inline]
+               pub(super) fn time_decay_data(&mut self, half_lives: u32) {
+                       for e in self.buckets.iter_mut() {
+                               *e = e.checked_shr(half_lives).unwrap_or(0);
+                       }
+               }
+       }
+
+       impl_writeable_tlv_based!(HistoricalBucketRangeTracker, { (0, buckets, required) });
+       impl_writeable_tlv_based!(LegacyHistoricalBucketRangeTracker, { (0, buckets, required) });
+
+       /// A set of buckets representing the history of where we've seen the minimum- and maximum-
+       /// liquidity bounds for a given channel.
+       pub(super) struct HistoricalMinMaxBuckets<D: Deref<Target = HistoricalBucketRangeTracker>> {
+               /// Buckets tracking where and how often we've seen the minimum liquidity bound for a
+               /// channel.
+               pub(super) min_liquidity_offset_history: D,
+               /// Buckets tracking where and how often we've seen the maximum liquidity bound for a
+               /// channel.
+               pub(super) max_liquidity_offset_history: D,
+       }
+
+       impl<D: Deref<Target = HistoricalBucketRangeTracker>> HistoricalMinMaxBuckets<D> {
+               pub(super) fn get_decayed_buckets<T: Time>(&self, now: T, last_updated: T, half_life: Duration)
+               -> Option<([u16; 32], [u16; 32])> {
+                       let (_, required_decays) = self.get_total_valid_points(now, last_updated, half_life)?;
+
+                       let mut min_buckets = *self.min_liquidity_offset_history;
+                       min_buckets.time_decay_data(required_decays);
+                       let mut max_buckets = *self.max_liquidity_offset_history;
+                       max_buckets.time_decay_data(required_decays);
+                       Some((min_buckets.buckets, max_buckets.buckets))
+               }
+               #[inline]
+               pub(super) fn get_total_valid_points<T: Time>(&self, now: T, last_updated: T, half_life: Duration)
+               -> Option<(u64, u32)> {
+                       let required_decays = now.duration_since(last_updated).as_secs()
+                               .checked_div(half_life.as_secs())
+                               .map_or(u32::max_value(), |decays| cmp::min(decays, u32::max_value() as u64) as u32);
+
+                       let mut total_valid_points_tracked = 0;
+                       for (min_idx, min_bucket) in self.min_liquidity_offset_history.buckets.iter().enumerate() {
+                               for max_bucket in self.max_liquidity_offset_history.buckets.iter().take(32 - min_idx) {
+                                       total_valid_points_tracked += (*min_bucket as u64) * (*max_bucket as u64);
+                               }
+                       }
+
+                       // If the total valid points is smaller than 1.0 (i.e. 32 in our fixed-point scheme),
+                       // treat it as if we were fully decayed.
+                       const FULLY_DECAYED: u16 = BUCKET_FIXED_POINT_ONE * BUCKET_FIXED_POINT_ONE;
+                       if total_valid_points_tracked.checked_shr(required_decays).unwrap_or(0) < FULLY_DECAYED.into() {
+                               return None;
+                       }
+
+                       Some((total_valid_points_tracked, required_decays))
+               }
+
+               #[inline]
+               pub(super) fn calculate_success_probability_times_billion<T: Time>(
+                       &self, now: T, last_updated: T, half_life: Duration, amount_msat: u64, capacity_msat: u64)
+               -> Option<u64> {
+                       // If historical penalties are enabled, we try to calculate a probability of success
+                       // given our historical distribution of min- and max-liquidity bounds in a channel.
+                       // To do so, we walk the set of historical liquidity bucket (min, max) combinations
+                       // (where min_idx < max_idx, as having a minimum above our maximum is an invalid
+                       // state). For each pair, we calculate the probability as if the bucket's corresponding
+                       // min- and max- liquidity bounds were our current liquidity bounds and then multiply
+                       // that probability by the weight of the selected buckets.
+                       let payment_pos = amount_to_pos(amount_msat, capacity_msat);
+                       if payment_pos >= POSITION_TICKS { return None; }
+
+                       // Check if all our buckets are zero, once decayed and treat it as if we had no data. We
+                       // don't actually use the decayed buckets, though, as that would lose precision.
+                       let (total_valid_points_tracked, _)
+                               = self.get_total_valid_points(now, last_updated, half_life)?;
+
+                       let mut cumulative_success_prob_times_billion = 0;
+                       // Special-case the 0th min bucket - it generally means we failed a payment, so only
+                       // consider the highest (i.e. largest-offset-from-max-capacity) max bucket for all
+                       // points against the 0th min bucket. This avoids the case where we fail to route
+                       // increasingly lower values over a channel, but treat each failure as a separate
+                       // datapoint, many of which may have relatively high maximum-available-liquidity
+                       // values, which will result in us thinking we have some nontrivial probability of
+                       // routing up to that amount.
+                       if self.min_liquidity_offset_history.buckets[0] != 0 {
+                               let mut highest_max_bucket_with_points = 0; // The highest max-bucket with any data
+                               let mut total_max_points = 0; // Total points in max-buckets to consider
+                               for (max_idx, max_bucket) in self.max_liquidity_offset_history.buckets.iter().enumerate() {
+                                       if *max_bucket >= BUCKET_FIXED_POINT_ONE {
+                                               highest_max_bucket_with_points = cmp::max(highest_max_bucket_with_points, max_idx);
+                                       }
+                                       total_max_points += *max_bucket as u64;
+                               }
+                               let max_bucket_end_pos = BUCKET_START_POS[32 - highest_max_bucket_with_points] - 1;
+                               if payment_pos < max_bucket_end_pos {
+                                       let bucket_prob_times_billion =
+                                               (self.min_liquidity_offset_history.buckets[0] as u64) * total_max_points
+                                                       * 1024 * 1024 * 1024 / total_valid_points_tracked;
+                                       cumulative_success_prob_times_billion += bucket_prob_times_billion *
+                                               ((max_bucket_end_pos - payment_pos) as u64) /
+                                               // Add an additional one in the divisor as the payment bucket has been
+                                               // rounded down.
+                                               (max_bucket_end_pos + 1) as u64;
+                               }
+                       }
+
+                       for (min_idx, min_bucket) in self.min_liquidity_offset_history.buckets.iter().enumerate().skip(1) {
+                               let min_bucket_start_pos = BUCKET_START_POS[min_idx];
+                               for (max_idx, max_bucket) in self.max_liquidity_offset_history.buckets.iter().enumerate().take(32 - min_idx) {
+                                       let max_bucket_end_pos = BUCKET_START_POS[32 - max_idx] - 1;
+                                       // Note that this multiply can only barely not overflow - two 16 bit ints plus
+                                       // 30 bits is 62 bits.
+                                       let bucket_prob_times_billion = (*min_bucket as u64) * (*max_bucket as u64)
+                                               * 1024 * 1024 * 1024 / total_valid_points_tracked;
+                                       if payment_pos >= max_bucket_end_pos {
+                                               // Success probability 0, the payment amount may be above the max liquidity
+                                               break;
+                                       } else if payment_pos < min_bucket_start_pos {
+                                               cumulative_success_prob_times_billion += bucket_prob_times_billion;
+                                       } else {
+                                               cumulative_success_prob_times_billion += bucket_prob_times_billion *
+                                                       ((max_bucket_end_pos - payment_pos) as u64) /
+                                                       // Add an additional one in the divisor as the payment bucket has been
+                                                       // rounded down.
+                                                       ((max_bucket_end_pos - min_bucket_start_pos + 1) as u64);
+                                       }
+                               }
+                       }
+
+                       Some(cumulative_success_prob_times_billion)
+               }
+       }
+}
+use bucketed_history::{LegacyHistoricalBucketRangeTracker, HistoricalBucketRangeTracker, HistoricalMinMaxBuckets};
+
 impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> Writeable for ProbabilisticScorerUsingTime<G, L, T> where L::Target: Logger {
        #[inline]
        fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
@@ -1692,10 +1894,12 @@ impl<T: Time> Writeable for ChannelLiquidity<T> {
                let duration_since_epoch = T::duration_since_epoch() - self.last_updated.elapsed();
                write_tlv_fields!(w, {
                        (0, self.min_liquidity_offset_msat, required),
-                       (1, Some(self.min_liquidity_offset_history), option),
+                       // 1 was the min_liquidity_offset_history in octile form
                        (2, self.max_liquidity_offset_msat, required),
-                       (3, Some(self.max_liquidity_offset_history), option),
+                       // 3 was the max_liquidity_offset_history in octile form
                        (4, duration_since_epoch, required),
+                       (5, Some(self.min_liquidity_offset_history), option),
+                       (7, Some(self.max_liquidity_offset_history), option),
                });
                Ok(())
        }
@@ -1706,15 +1910,19 @@ impl<T: Time> Readable for ChannelLiquidity<T> {
        fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
                let mut min_liquidity_offset_msat = 0;
                let mut max_liquidity_offset_msat = 0;
-               let mut min_liquidity_offset_history = Some(HistoricalBucketRangeTracker::new());
-               let mut max_liquidity_offset_history = Some(HistoricalBucketRangeTracker::new());
+               let mut legacy_min_liq_offset_history: Option<LegacyHistoricalBucketRangeTracker> = None;
+               let mut legacy_max_liq_offset_history: Option<LegacyHistoricalBucketRangeTracker> = None;
+               let mut min_liquidity_offset_history: Option<HistoricalBucketRangeTracker> = None;
+               let mut max_liquidity_offset_history: Option<HistoricalBucketRangeTracker> = None;
                let mut duration_since_epoch = Duration::from_secs(0);
                read_tlv_fields!(r, {
                        (0, min_liquidity_offset_msat, required),
-                       (1, min_liquidity_offset_history, option),
+                       (1, legacy_min_liq_offset_history, option),
                        (2, max_liquidity_offset_msat, required),
-                       (3, max_liquidity_offset_history, option),
+                       (3, legacy_max_liq_offset_history, option),
                        (4, duration_since_epoch, required),
+                       (5, min_liquidity_offset_history, option),
+                       (7, max_liquidity_offset_history, option),
                });
                // On rust prior to 1.60 `Instant::duration_since` will panic if time goes backwards.
                // We write `last_updated` as wallclock time even though its ultimately an `Instant` (which
@@ -1728,6 +1936,20 @@ impl<T: Time> Readable for ChannelLiquidity<T> {
                let last_updated = if wall_clock_now > duration_since_epoch {
                        now - (wall_clock_now - duration_since_epoch)
                } else { now };
+               if min_liquidity_offset_history.is_none() {
+                       if let Some(legacy_buckets) = legacy_min_liq_offset_history {
+                               min_liquidity_offset_history = Some(legacy_buckets.into_current());
+                       } else {
+                               min_liquidity_offset_history = Some(HistoricalBucketRangeTracker::new());
+                       }
+               }
+               if max_liquidity_offset_history.is_none() {
+                       if let Some(legacy_buckets) = legacy_max_liq_offset_history {
+                               max_liquidity_offset_history = Some(legacy_buckets.into_current());
+                       } else {
+                               max_liquidity_offset_history = Some(HistoricalBucketRangeTracker::new());
+                       }
+               }
                Ok(Self {
                        min_liquidity_offset_msat,
                        max_liquidity_offset_msat,
@@ -1856,20 +2078,20 @@ mod tests {
                let chain_source: Option<&crate::util::test_utils::TestChainSource> = None;
                network_graph.update_channel_from_announcement(
                        &signed_announcement, &chain_source).unwrap();
-               update_channel(network_graph, short_channel_id, node_1_key, 0, 1_000);
-               update_channel(network_graph, short_channel_id, node_2_key, 1, 0);
+               update_channel(network_graph, short_channel_id, node_1_key, 0, 1_000, 100);
+               update_channel(network_graph, short_channel_id, node_2_key, 1, 0, 100);
        }
 
        fn update_channel(
                network_graph: &mut NetworkGraph<&TestLogger>, short_channel_id: u64, node_key: SecretKey,
-               flags: u8, htlc_maximum_msat: u64
+               flags: u8, htlc_maximum_msat: u64, timestamp: u32,
        ) {
                let genesis_hash = genesis_block(Network::Testnet).header.block_hash();
                let secp_ctx = Secp256k1::new();
                let unsigned_update = UnsignedChannelUpdate {
                        chain_hash: genesis_hash,
                        short_channel_id,
-                       timestamp: 100,
+                       timestamp,
                        flags,
                        cltv_expiry_delta: 18,
                        htlc_minimum_msat: 0,
@@ -2400,6 +2622,7 @@ mod tests {
                scorer.payment_path_failed(&payment_path_for_amount(768), 42);
                scorer.payment_path_failed(&payment_path_for_amount(128), 43);
 
+               // Initial penalties
                let usage = ChannelUsage { amount_msat: 128, ..usage };
                assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 0);
                let usage = ChannelUsage { amount_msat: 256, ..usage };
@@ -2409,7 +2632,8 @@ mod tests {
                let usage = ChannelUsage { amount_msat: 896, ..usage };
                assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), u64::max_value());
 
-               SinceEpoch::advance(Duration::from_secs(9));
+               // No decay
+               SinceEpoch::advance(Duration::from_secs(4));
                let usage = ChannelUsage { amount_msat: 128, ..usage };
                assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 0);
                let usage = ChannelUsage { amount_msat: 256, ..usage };
@@ -2419,7 +2643,19 @@ mod tests {
                let usage = ChannelUsage { amount_msat: 896, ..usage };
                assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), u64::max_value());
 
+               // Half decay (i.e., three-quarter life)
                SinceEpoch::advance(Duration::from_secs(1));
+               let usage = ChannelUsage { amount_msat: 128, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 22);
+               let usage = ChannelUsage { amount_msat: 256, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 106);
+               let usage = ChannelUsage { amount_msat: 768, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 916);
+               let usage = ChannelUsage { amount_msat: 896, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), u64::max_value());
+
+               // One decay (i.e., half life)
+               SinceEpoch::advance(Duration::from_secs(5));
                let usage = ChannelUsage { amount_msat: 64, ..usage };
                assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 0);
                let usage = ChannelUsage { amount_msat: 128, ..usage };
@@ -2831,25 +3067,51 @@ mod tests {
                        inflight_htlc_msat: 0,
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_024 },
                };
+               let usage_1 = ChannelUsage {
+                       amount_msat: 1,
+                       inflight_htlc_msat: 0,
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_024 },
+               };
+
                // With no historical data the normal liquidity penalty calculation is used.
                assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 47);
                assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
                        None);
+               assert_eq!(scorer.historical_estimated_payment_success_probability(42, &target, 42),
+                       None);
 
                scorer.payment_path_failed(&payment_path_for_amount(1), 42);
                assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 2048);
-               // The "it failed" increment is 32, where the probability should lie fully in the first
-               // octile.
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage_1, &params), 128);
+               // The "it failed" increment is 32, where the probability should lie several buckets into
+               // the first octile.
                assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
-                       Some(([32, 0, 0, 0, 0, 0, 0, 0], [32, 0, 0, 0, 0, 0, 0, 0])));
+                       Some(([32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+                               [0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])));
+               assert!(scorer.historical_estimated_payment_success_probability(42, &target, 1)
+                       .unwrap() > 0.35);
+               assert_eq!(scorer.historical_estimated_payment_success_probability(42, &target, 500),
+                       Some(0.0));
 
                // Even after we tell the scorer we definitely have enough available liquidity, it will
                // still remember that there was some failure in the past, and assign a non-0 penalty.
                scorer.payment_path_failed(&payment_path_for_amount(1000), 43);
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 198);
-               // The first octile should be decayed just slightly and the last octile has a new point.
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 32);
+               // The first points should be decayed just slightly and the last bucket has a new point.
                assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
-                       Some(([31, 0, 0, 0, 0, 0, 0, 32], [31, 0, 0, 0, 0, 0, 0, 32])));
+                       Some(([31, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0],
+                               [0, 0, 0, 0, 0, 0, 31, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32])));
+
+               // The exact success probability is a bit complicated and involves integer rounding, so we
+               // simply check bounds here.
+               let five_hundred_prob =
+                       scorer.historical_estimated_payment_success_probability(42, &target, 500).unwrap();
+               assert!(five_hundred_prob > 0.66);
+               assert!(five_hundred_prob < 0.68);
+               let one_prob =
+                       scorer.historical_estimated_payment_success_probability(42, &target, 1).unwrap();
+               assert!(one_prob < 1.0);
+               assert!(one_prob > 0.95);
 
                // Advance the time forward 16 half-lives (which the docs claim will ensure all data is
                // gone), and check that we're back to where we started.
@@ -2858,15 +3120,18 @@ mod tests {
                // Once fully decayed we still have data, but its all-0s. In the future we may remove the
                // data entirely instead.
                assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
-                       Some(([0; 8], [0; 8])));
+                       None);
+               assert_eq!(scorer.historical_estimated_payment_success_probability(42, &target, 1), None);
 
-               let usage = ChannelUsage {
+               let mut usage = ChannelUsage {
                        amount_msat: 100,
                        inflight_htlc_msat: 1024,
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_024 },
                };
                scorer.payment_path_failed(&payment_path_for_amount(1), 42);
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 409);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 2048);
+               usage.inflight_htlc_msat = 0;
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 866);
 
                let usage = ChannelUsage {
                        amount_msat: 1,
@@ -2982,4 +3247,77 @@ mod tests {
                assert_eq!(liquidity.min_liquidity_msat(), 256);
                assert_eq!(liquidity.max_liquidity_msat(), 768);
        }
+
+       #[test]
+       fn realistic_historical_failures() {
+               // The motivation for the unequal sized buckets came largely from attempting to pay 10k
+               // sats over a one bitcoin channel. This tests that case explicitly, ensuring that we score
+               // properly.
+               let logger = TestLogger::new();
+               let mut network_graph = network_graph(&logger);
+               let params = ProbabilisticScoringFeeParameters {
+                       historical_liquidity_penalty_multiplier_msat: 1024,
+                       historical_liquidity_penalty_amount_multiplier_msat: 1024,
+                       ..ProbabilisticScoringFeeParameters::zero_penalty()
+               };
+               let decay_params = ProbabilisticScoringDecayParameters {
+                       liquidity_offset_half_life: Duration::from_secs(60 * 60),
+                       historical_no_updates_half_life: Duration::from_secs(10),
+                       ..ProbabilisticScoringDecayParameters::default()
+               };
+
+               let capacity_msat = 100_000_000_000;
+               update_channel(&mut network_graph, 42, source_privkey(), 0, capacity_msat, 200);
+               update_channel(&mut network_graph, 42, target_privkey(), 1, capacity_msat, 200);
+
+               let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger);
+               let source = source_node_id();
+               let target = target_node_id();
+
+               let mut amount_msat = 10_000_000;
+               let usage = ChannelUsage {
+                       amount_msat,
+                       inflight_htlc_msat: 0,
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat, htlc_maximum_msat: capacity_msat },
+               };
+               // With no historical data the normal liquidity penalty calculation is used, which in this
+               // case is diminuitively low.
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 0);
+               assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
+                       None);
+               assert_eq!(scorer.historical_estimated_payment_success_probability(42, &target, 42),
+                       None);
+
+               // Fail to pay once, and then check the buckets and penalty.
+               scorer.payment_path_failed(&payment_path_for_amount(amount_msat), 42);
+               // The penalty should be the maximum penalty, as the payment we're scoring is now in the
+               // same bucket which is the only maximum datapoint.
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params),
+                       2048 + 2048 * amount_msat / super::AMOUNT_PENALTY_DIVISOR);
+               // The "it failed" increment is 32, which we should apply to the first upper-bound (between
+               // 6k sats and 12k sats).
+               assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
+                       Some(([32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+                               [0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])));
+               // The success probability estimate itself should be zero.
+               assert_eq!(scorer.historical_estimated_payment_success_probability(42, &target, amount_msat),
+                       Some(0.0));
+
+               // Now test again with the amount in the bottom bucket.
+               amount_msat /= 2;
+               // The new amount is entirely within the only minimum bucket with score, so the probability
+               // we assign is 1/2.
+               assert_eq!(scorer.historical_estimated_payment_success_probability(42, &target, amount_msat),
+                       Some(0.5));
+
+               // ...but once we see a failure, we consider the payment to be substantially less likely,
+               // even though not a probability of zero as we still look at the second max bucket which
+               // now shows 31.
+               scorer.payment_path_failed(&payment_path_for_amount(amount_msat), 42);
+               assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
+                       Some(([63, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+                               [32, 31, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])));
+               assert_eq!(scorer.historical_estimated_payment_success_probability(42, &target, amount_msat),
+                       Some(0.0));
+       }
 }