Make `as_directed_to` non-public
[rust-lightning] / lightning / src / routing / scoring.rs
index c8cf3583cd4b6533fd3aae281f37c8fc624fd818..860f3cb195418054322cd551f5e7487b84af8cad 100644 (file)
 //!
 //! [`find_route`]: crate::routing::router::find_route
 
-use ln::msgs::DecodeError;
-use routing::gossip::{EffectiveCapacity, NetworkGraph, NodeId};
-use routing::router::RouteHop;
-use util::ser::{Readable, ReadableArgs, Writeable, Writer};
-use util::logger::Logger;
-use util::time::Time;
-
-use prelude::*;
-use core::fmt;
+use crate::ln::msgs::DecodeError;
+use crate::routing::gossip::{EffectiveCapacity, NetworkGraph, NodeId};
+use crate::routing::router::RouteHop;
+use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
+use crate::util::logger::Logger;
+use crate::util::time::Time;
+
+use crate::prelude::*;
+use core::{cmp, fmt};
 use core::cell::{RefCell, RefMut};
 use core::convert::TryInto;
 use core::ops::{Deref, DerefMut};
 use core::time::Duration;
-use io::{self, Read};
-use sync::{Mutex, MutexGuard};
+use crate::io::{self, Read};
+use crate::sync::{Mutex, MutexGuard};
 
 /// We define Score ever-so-slightly differently based on whether we are being built for C bindings
 /// or not. For users, `LockableScore` must somehow be writeable to disk. For Rust users, this is
@@ -163,6 +163,7 @@ pub trait LockableScore<'a> {
 /// use the Persister to persist it.
 pub trait WriteableScore<'a>: LockableScore<'a> + Writeable {}
 
+#[cfg(not(c_bindings))]
 impl<'a, T> WriteableScore<'a> for T where T: LockableScore<'a> + Writeable {}
 
 /// (C-not exported)
@@ -188,15 +189,52 @@ pub struct MultiThreadedLockableScore<S: Score> {
        score: Mutex<S>,
 }
 #[cfg(c_bindings)]
-/// (C-not exported)
+/// A locked `MultiThreadedLockableScore`.
+pub struct MultiThreadedScoreLock<'a, S: Score>(MutexGuard<'a, S>);
+#[cfg(c_bindings)]
+impl<'a, T: Score + 'a> Score for MultiThreadedScoreLock<'a, T> {
+       fn channel_penalty_msat(&self, scid: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage) -> u64 {
+               self.0.channel_penalty_msat(scid, source, target, usage)
+       }
+       fn payment_path_failed(&mut self, path: &[&RouteHop], short_channel_id: u64) {
+               self.0.payment_path_failed(path, short_channel_id)
+       }
+       fn payment_path_successful(&mut self, path: &[&RouteHop]) {
+               self.0.payment_path_successful(path)
+       }
+       fn probe_failed(&mut self, path: &[&RouteHop], short_channel_id: u64) {
+               self.0.probe_failed(path, short_channel_id)
+       }
+       fn probe_successful(&mut self, path: &[&RouteHop]) {
+               self.0.probe_successful(path)
+       }
+}
+#[cfg(c_bindings)]
+impl<'a, T: Score + 'a> Writeable for MultiThreadedScoreLock<'a, T> {
+       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
+               self.0.write(writer)
+       }
+}
+
+#[cfg(c_bindings)]
 impl<'a, T: Score + 'a> LockableScore<'a> for MultiThreadedLockableScore<T> {
-       type Locked = MutexGuard<'a, T>;
+       type Locked = MultiThreadedScoreLock<'a, T>;
 
-       fn lock(&'a self) -> MutexGuard<'a, T> {
-               Mutex::lock(&self.score).unwrap()
+       fn lock(&'a self) -> MultiThreadedScoreLock<'a, T> {
+               MultiThreadedScoreLock(Mutex::lock(&self.score).unwrap())
        }
 }
 
+#[cfg(c_bindings)]
+impl<T: Score> Writeable for MultiThreadedLockableScore<T> {
+       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
+               self.lock().write(writer)
+       }
+}
+
+#[cfg(c_bindings)]
+impl<'a, T: Score + 'a> WriteableScore<'a> for MultiThreadedLockableScore<T> {}
+
 #[cfg(c_bindings)]
 impl<T: Score> MultiThreadedLockableScore<T> {
        /// Creates a new [`MultiThreadedLockableScore`] given an underlying [`Score`].
@@ -222,7 +260,7 @@ impl<'a, S: Writeable> Writeable for MutexGuard<'a, S> {
 }
 
 /// Proposed use of a channel passed as a parameter to [`Score::channel_penalty_msat`].
-#[derive(Clone, Copy)]
+#[derive(Clone, Copy, Debug)]
 pub struct ChannelUsage {
        /// The amount to send through the channel, denominated in millisatoshis.
        pub amount_msat: u64,
@@ -281,25 +319,34 @@ impl ReadableArgs<u64> for FixedPenaltyScorer {
 #[cfg(not(feature = "no-std"))]
 type ConfiguredTime = std::time::Instant;
 #[cfg(feature = "no-std")]
-use util::time::Eternity;
+use crate::util::time::Eternity;
 #[cfg(feature = "no-std")]
 type ConfiguredTime = Eternity;
 
 /// [`Score`] implementation using channel success probability distributions.
 ///
-/// Based on *Optimally Reliable & Cheap Payment Flows on the Lightning Network* by Rene Pickhardt
-/// and Stefan Richter [[1]]. Given the uncertainty of channel liquidity balances, probability
-/// distributions are defined based on knowledge learned from successful and unsuccessful attempts.
-/// Then the negative `log10` of the success probability is used to determine the cost of routing a
-/// specific HTLC amount through a channel.
+/// Channels are tracked with upper and lower liquidity bounds - when an HTLC fails at a channel,
+/// we learn that the upper-bound on the available liquidity is lower than the amount of the HTLC.
+/// When a payment is forwarded through a channel (but fails later in the route), we learn the
+/// lower-bound on the channel's available liquidity must be at least the value of the HTLC.
+///
+/// These bounds are then used to determine a success probability using the formula from
+/// *Optimally Reliable & Cheap Payment Flows on the Lightning Network* by Rene Pickhardt
+/// and Stefan Richter [[1]] (i.e. `(upper_bound - payment_amount) / (upper_bound - lower_bound)`).
 ///
-/// Knowledge about channel liquidity balances takes the form of upper and lower bounds on the
-/// possible liquidity. Certainty of the bounds is decreased over time using a decay function. See
-/// [`ProbabilisticScoringParameters`] for details.
+/// This probability is combined with the [`liquidity_penalty_multiplier_msat`] and
+/// [`liquidity_penalty_amount_multiplier_msat`] parameters to calculate a concrete penalty in
+/// milli-satoshis. The penalties, when added across all hops, have the property of being linear in
+/// terms of the entire path's success probability. This allows the router to directly compare
+/// penalties for different paths. See the documentation of those parameters for the exact formulas.
 ///
-/// Since the scorer aims to learn the current channel liquidity balances, it works best for nodes
-/// with high payment volume or that actively probe the [`NetworkGraph`]. Nodes with low payment
-/// volume are more likely to experience failed payment paths, which would need to be retried.
+/// The liquidity bounds are decayed by halving them every [`liquidity_offset_half_life`].
+///
+/// Further, we track the history of our upper and lower liquidity bounds for each channel,
+/// allowing us to assign a second penalty (using [`historical_liquidity_penalty_multiplier_msat`]
+/// and [`historical_liquidity_penalty_amount_multiplier_msat`]) based on the same probability
+/// formula, but using the history of a channel rather than our latest estimates for the liquidity
+/// bounds.
 ///
 /// # Note
 ///
@@ -307,6 +354,11 @@ type ConfiguredTime = Eternity;
 /// behavior.
 ///
 /// [1]: https://arxiv.org/abs/2107.05322
+/// [`liquidity_penalty_multiplier_msat`]: ProbabilisticScoringParameters::liquidity_penalty_multiplier_msat
+/// [`liquidity_penalty_amount_multiplier_msat`]: ProbabilisticScoringParameters::liquidity_penalty_amount_multiplier_msat
+/// [`liquidity_offset_half_life`]: ProbabilisticScoringParameters::liquidity_offset_half_life
+/// [`historical_liquidity_penalty_multiplier_msat`]: ProbabilisticScoringParameters::historical_liquidity_penalty_multiplier_msat
+/// [`historical_liquidity_penalty_amount_multiplier_msat`]: ProbabilisticScoringParameters::historical_liquidity_penalty_amount_multiplier_msat
 pub type ProbabilisticScorer<G, L> = ProbabilisticScorerUsingTime::<G, L, ConfiguredTime>;
 
 /// Probabilistic [`Score`] implementation.
@@ -360,19 +412,27 @@ pub struct ProbabilisticScoringParameters {
        /// uncertainty bounds of the channel liquidity balance. Amounts above the upper bound will
        /// result in a `u64::max_value` penalty, however.
        ///
+       /// `-log10(success_probability) * liquidity_penalty_multiplier_msat`
+       ///
        /// Default value: 30,000 msat
        ///
        /// [`liquidity_offset_half_life`]: Self::liquidity_offset_half_life
        pub liquidity_penalty_multiplier_msat: u64,
 
-       /// The time required to elapse before any knowledge learned about channel liquidity balances is
-       /// cut in half.
+       /// Whenever this amount of time elapses since the last update to a channel's liquidity bounds,
+       /// the distance from the bounds to "zero" is cut in half. In other words, the lower-bound on
+       /// the available liquidity is halved and the upper-bound moves half-way to the channel's total
+       /// capacity.
        ///
-       /// The bounds are defined in terms of offsets and are initially zero. Increasing the offsets
-       /// gives tighter bounds on the channel liquidity balance. Thus, halving the offsets decreases
-       /// the certainty of the channel liquidity balance.
+       /// Because halving the liquidity bounds grows the uncertainty on the channel's liquidity,
+       /// the penalty for an amount within the new bounds may change. See the [`ProbabilisticScorer`]
+       /// struct documentation for more info on the way the liquidity bounds are used.
        ///
-       /// Default value: 1 hour
+       /// For example, if the channel's capacity is 1 million sats, and the current upper and lower
+       /// liquidity bounds are 200,000 sats and 600,000 sats, after this amount of time the upper
+       /// and lower liquidity bounds will be decayed to 100,000 and 800,000 sats.
+       ///
+       /// Default value: 6 hours
        ///
        /// # Note
        ///
@@ -436,6 +496,16 @@ pub struct ProbabilisticScoringParameters {
        /// [`liquidity_penalty_amount_multiplier_msat`]: Self::liquidity_penalty_amount_multiplier_msat
        pub historical_liquidity_penalty_amount_multiplier_msat: u64,
 
+       /// If we aren't learning any new datapoints for a channel, the historical liquidity bounds
+       /// tracking can simply live on with increasingly stale data. Instead, when a channel has not
+       /// seen a liquidity estimate update for this amount of time, the historical datapoints are
+       /// decayed by half.
+       ///
+       /// Note that after 16 or more half lives all historical data will be completely gone.
+       ///
+       /// Default value: 14 days
+       pub historical_no_updates_half_life: Duration,
+
        /// Manual penalties used for the given nodes. Allows to set a particular penalty for a given
        /// node. Note that a manual penalty of `u64::max_value()` means the node would not ever be
        /// considered during path finding.
@@ -509,10 +579,89 @@ impl HistoricalBucketRangeTracker {
                        self.buckets[bucket_idx as usize] = self.buckets[bucket_idx as usize].saturating_add(32);
                }
        }
+       /// Decay all buckets by the given number of half-lives. Used to more aggressively remove old
+       /// datapoints as we receive newer information.
+       fn time_decay_data(&mut self, half_lives: u32) {
+               for e in self.buckets.iter_mut() {
+                       *e = e.checked_shr(half_lives).unwrap_or(0);
+               }
+       }
 }
 
 impl_writeable_tlv_based!(HistoricalBucketRangeTracker, { (0, buckets, required) });
 
+struct HistoricalMinMaxBuckets<'a> {
+       min_liquidity_offset_history: &'a HistoricalBucketRangeTracker,
+       max_liquidity_offset_history: &'a HistoricalBucketRangeTracker,
+}
+
+impl HistoricalMinMaxBuckets<'_> {
+       #[inline]
+       fn calculate_success_probability_times_billion(&self, required_decays: u32, payment_amt_64th_bucket: u8) -> Option<u64> {
+               // If historical penalties are enabled, calculate the penalty by walking the set of
+               // historical liquidity bucket (min, max) combinations (where min_idx < max_idx) and, for
+               // each, calculate the probability of success given our payment amount, then total the
+               // weighted average probability of success.
+               //
+               // We use a sliding scale to decide which point within a given bucket will be compared to
+               // the amount being sent - for lower-bounds, the amount being sent is compared to the lower
+               // edge of the first bucket (i.e. zero), but compared to the upper 7/8ths of the last
+               // bucket (i.e. 9 times the index, or 63), with each bucket in between increasing the
+               // comparison point by 1/64th. For upper-bounds, the same applies, however with an offset
+               // of 1/64th (i.e. starting at one and ending at 64). This avoids failing to assign
+               // penalties to channels at the edges.
+               //
+               // If we used the bottom edge of buckets, we'd end up never assigning any penalty at all to
+               // such a channel when sending less than ~0.19% of the channel's capacity (e.g. ~200k sats
+               // for a 1 BTC channel!).
+               //
+               // If we used the middle of each bucket we'd never assign any penalty at all when sending
+               // less than 1/16th of a channel's capacity, or 1/8th if we used the top of the bucket.
+               let mut total_valid_points_tracked = 0;
+
+               // Rather than actually decaying the individual buckets, which would lose precision, we
+               // simply track whether all buckets would be decayed to zero, in which case we treat it as
+               // if we had no data.
+               let mut is_fully_decayed = true;
+               let mut check_track_bucket_contains_undecayed_points =
+                       |bucket_val: u16| if bucket_val.checked_shr(required_decays).unwrap_or(0) > 0 { is_fully_decayed = false; };
+
+               for (min_idx, min_bucket) in self.min_liquidity_offset_history.buckets.iter().enumerate() {
+                       check_track_bucket_contains_undecayed_points(*min_bucket);
+                       for max_bucket in self.max_liquidity_offset_history.buckets.iter().take(8 - min_idx) {
+                               total_valid_points_tracked += (*min_bucket as u64) * (*max_bucket as u64);
+                               check_track_bucket_contains_undecayed_points(*max_bucket);
+                       }
+               }
+               // If the total valid points is smaller than 1.0 (i.e. 32 in our fixed-point scheme), treat
+               // it as if we were fully decayed.
+               if total_valid_points_tracked.checked_shr(required_decays).unwrap_or(0) < 32*32 || is_fully_decayed {
+                       return None;
+               }
+
+               let mut cumulative_success_prob_times_billion = 0;
+               for (min_idx, min_bucket) in self.min_liquidity_offset_history.buckets.iter().enumerate() {
+                       for (max_idx, max_bucket) in self.max_liquidity_offset_history.buckets.iter().enumerate().take(8 - min_idx) {
+                               let bucket_prob_times_million = (*min_bucket as u64) * (*max_bucket as u64)
+                                       * 1024 * 1024 / total_valid_points_tracked;
+                               let min_64th_bucket = min_idx as u8 * 9;
+                               let max_64th_bucket = (7 - max_idx as u8) * 9 + 1;
+                               if payment_amt_64th_bucket > max_64th_bucket {
+                                       // Success probability 0, the payment amount is above the max liquidity
+                               } else if payment_amt_64th_bucket <= min_64th_bucket {
+                                       cumulative_success_prob_times_billion += bucket_prob_times_million * 1024;
+                               } else {
+                                       cumulative_success_prob_times_billion += bucket_prob_times_million *
+                                               ((max_64th_bucket - payment_amt_64th_bucket) as u64) * 1024 /
+                                               ((max_64th_bucket - min_64th_bucket) as u64);
+                               }
+                       }
+               }
+
+               Some(cumulative_success_prob_times_billion)
+       }
+}
+
 /// Accounting for channel liquidity balance uncertainty.
 ///
 /// Direction is defined in terms of [`NodeId`] partial ordering, where the source node is the
@@ -641,10 +790,11 @@ impl ProbabilisticScoringParameters {
                        base_penalty_msat: 0,
                        base_penalty_amount_multiplier_msat: 0,
                        liquidity_penalty_multiplier_msat: 0,
-                       liquidity_offset_half_life: Duration::from_secs(3600),
+                       liquidity_offset_half_life: Duration::from_secs(6 * 60 * 60),
                        liquidity_penalty_amount_multiplier_msat: 0,
                        historical_liquidity_penalty_multiplier_msat: 0,
                        historical_liquidity_penalty_amount_multiplier_msat: 0,
+                       historical_no_updates_half_life: Duration::from_secs(60 * 60 * 24 * 14),
                        manual_node_penalties: HashMap::new(),
                        anti_probing_penalty_msat: 0,
                        considered_impossible_penalty_msat: 0,
@@ -666,10 +816,11 @@ impl Default for ProbabilisticScoringParameters {
                        base_penalty_msat: 500,
                        base_penalty_amount_multiplier_msat: 8192,
                        liquidity_penalty_multiplier_msat: 30_000,
-                       liquidity_offset_half_life: Duration::from_secs(3600),
+                       liquidity_offset_half_life: Duration::from_secs(6 * 60 * 60),
                        liquidity_penalty_amount_multiplier_msat: 192,
                        historical_liquidity_penalty_multiplier_msat: 10_000,
                        historical_liquidity_penalty_amount_multiplier_msat: 64,
+                       historical_no_updates_half_life: Duration::from_secs(60 * 60 * 24 * 14),
                        manual_node_penalties: HashMap::new(),
                        anti_probing_penalty_msat: 250,
                        considered_impossible_penalty_msat: 1_0000_0000_000,
@@ -791,35 +942,27 @@ impl<L: Deref<Target = u64>, BRT: Deref<Target = HistoricalBucketRangeTracker>,
 
                if params.historical_liquidity_penalty_multiplier_msat != 0 ||
                   params.historical_liquidity_penalty_amount_multiplier_msat != 0 {
-                       // If historical penalties are enabled, calculate the penalty by walking the set of
-                       // historical liquidity bucket (min, max) combinations (where min_idx < max_idx)
-                       // and, for each, calculate the probability of success given our payment amount, then
-                       // total the weighted average probability of success.
-                       //
-                       // We use a sliding scale to decide which point within a given bucket will be compared
-                       // to the amount being sent - for lower-bounds, the amount being sent is compared to
-                       // the lower edge of the first bucket (i.e. zero), but compared to the upper 7/8ths of
-                       // the last bucket (i.e. 9 times the index, or 63), with each bucket in between
-                       // increasing the comparison point by 1/64th. For upper-bounds, the same applies,
-                       // however with an offset of 1/64th (i.e. starting at one and ending at 64). This
-                       // avoids failing to assign penalties to channels at the edges.
-                       //
-                       // If we used the bottom edge of buckets, we'd end up never assigning any penalty at
-                       // all to such a channel when sending less than ~0.19% of the channel's capacity (e.g.
-                       // ~200k sats for a 1 BTC channel!).
-                       //
-                       // If we used the middle of each bucket we'd never assign any penalty at all when
-                       // sending less than 1/16th of a channel's capacity, or 1/8th if we used the top of the
-                       // bucket.
-                       let mut total_valid_points_tracked = 0;
-                       for (min_idx, min_bucket) in self.min_liquidity_offset_history.buckets.iter().enumerate() {
-                               for max_bucket in self.max_liquidity_offset_history.buckets.iter().take(8 - min_idx) {
-                                       total_valid_points_tracked += (*min_bucket as u64) * (*max_bucket as u64);
-                               }
-                       }
-                       if total_valid_points_tracked == 0 {
-                               // If we don't have any valid points, redo the non-historical calculation with no
-                               // liquidity bounds tracked and the historical penalty multipliers.
+                       let required_decays = self.now.duration_since(*self.last_updated).as_secs()
+                               .checked_div(params.historical_no_updates_half_life.as_secs())
+                               .map_or(u32::max_value(), |decays| cmp::min(decays, u32::max_value() as u64) as u32);
+                       let payment_amt_64th_bucket = amount_msat * 64 / self.capacity_msat;
+                       debug_assert!(payment_amt_64th_bucket <= 64);
+                       if payment_amt_64th_bucket > 64 { return res; }
+
+                       let buckets = HistoricalMinMaxBuckets {
+                               min_liquidity_offset_history: &self.min_liquidity_offset_history,
+                               max_liquidity_offset_history: &self.max_liquidity_offset_history,
+                       };
+                       if let Some(cumulative_success_prob_times_billion) = buckets
+                                       .calculate_success_probability_times_billion(required_decays, payment_amt_64th_bucket as u8) {
+                               let historical_negative_log10_times_2048 = approx::negative_log10_times_2048(cumulative_success_prob_times_billion + 1, 1024 * 1024 * 1024);
+                               res = res.saturating_add(Self::combined_penalty_msat(amount_msat,
+                                       historical_negative_log10_times_2048, params.historical_liquidity_penalty_multiplier_msat,
+                                       params.historical_liquidity_penalty_amount_multiplier_msat));
+                       } else {
+                               // If we don't have any valid points (or, once decayed, we have less than a full
+                               // point), redo the non-historical calculation with no liquidity bounds tracked and
+                               // the historical penalty multipliers.
                                let max_capacity = self.capacity_msat.saturating_sub(amount_msat).saturating_add(1);
                                let negative_log10_times_2048 =
                                        approx::negative_log10_times_2048(max_capacity, self.capacity_msat.saturating_add(1));
@@ -828,33 +971,6 @@ impl<L: Deref<Target = u64>, BRT: Deref<Target = HistoricalBucketRangeTracker>,
                                        params.historical_liquidity_penalty_amount_multiplier_msat));
                                return res;
                        }
-
-                       let payment_amt_64th_bucket = amount_msat * 64 / self.capacity_msat;
-                       debug_assert!(payment_amt_64th_bucket <= 64);
-                       if payment_amt_64th_bucket > 64 { return res; }
-
-                       let mut cumulative_success_prob_times_billion = 0;
-                       for (min_idx, min_bucket) in self.min_liquidity_offset_history.buckets.iter().enumerate() {
-                               for (max_idx, max_bucket) in self.max_liquidity_offset_history.buckets.iter().enumerate().take(8 - min_idx) {
-                                       let bucket_prob_times_million = (*min_bucket as u64) * (*max_bucket as u64)
-                                               * 1024 * 1024 / total_valid_points_tracked;
-                                       let min_64th_bucket = min_idx as u64 * 9;
-                                       let max_64th_bucket = (7 - max_idx as u64) * 9 + 1;
-                                       if payment_amt_64th_bucket > max_64th_bucket {
-                                               // Success probability 0, the payment amount is above the max liquidity
-                                       } else if payment_amt_64th_bucket <= min_64th_bucket {
-                                               cumulative_success_prob_times_billion += bucket_prob_times_million * 1024;
-                                       } else {
-                                               cumulative_success_prob_times_billion += bucket_prob_times_million *
-                                                       (max_64th_bucket - payment_amt_64th_bucket) * 1024 /
-                                                       (max_64th_bucket - min_64th_bucket);
-                                       }
-                               }
-                       }
-                       let historical_negative_log10_times_2048 = approx::negative_log10_times_2048(cumulative_success_prob_times_billion + 1, 1024 * 1024 * 1024);
-                       res = res.saturating_add(Self::combined_penalty_msat(amount_msat,
-                               historical_negative_log10_times_2048, params.historical_liquidity_penalty_multiplier_msat,
-                               params.historical_liquidity_penalty_amount_multiplier_msat));
                }
 
                res
@@ -901,21 +1017,25 @@ impl<L: Deref<Target = u64>, BRT: Deref<Target = HistoricalBucketRangeTracker>,
 impl<L: DerefMut<Target = u64>, BRT: DerefMut<Target = HistoricalBucketRangeTracker>, T: Time, U: DerefMut<Target = T>> DirectedChannelLiquidity<'_, L, BRT, T, U> {
        /// Adjusts the channel liquidity balance bounds when failing to route `amount_msat`.
        fn failed_at_channel<Log: Deref>(&mut self, amount_msat: u64, chan_descr: fmt::Arguments, logger: &Log) where Log::Target: Logger {
-               if amount_msat < self.max_liquidity_msat() {
-                       log_debug!(logger, "Setting max liquidity of {} to {}", chan_descr, amount_msat);
+               let existing_max_msat = self.max_liquidity_msat();
+               if amount_msat < existing_max_msat {
+                       log_debug!(logger, "Setting max liquidity of {} from {} to {}", chan_descr, existing_max_msat, amount_msat);
                        self.set_max_liquidity_msat(amount_msat);
                } else {
-                       log_trace!(logger, "Max liquidity of {} already more than {}", chan_descr, amount_msat);
+                       log_trace!(logger, "Max liquidity of {} is {} (already less than or equal to {})",
+                               chan_descr, existing_max_msat, amount_msat);
                }
        }
 
        /// Adjusts the channel liquidity balance bounds when failing to route `amount_msat` downstream.
        fn failed_downstream<Log: Deref>(&mut self, amount_msat: u64, chan_descr: fmt::Arguments, logger: &Log) where Log::Target: Logger {
-               if amount_msat > self.min_liquidity_msat() {
-                       log_debug!(logger, "Setting min liquidity of {} to {}", chan_descr, amount_msat);
+               let existing_min_msat = self.min_liquidity_msat();
+               if amount_msat > existing_min_msat {
+                       log_debug!(logger, "Setting min liquidity of {} from {} to {}", existing_min_msat, chan_descr, amount_msat);
                        self.set_min_liquidity_msat(amount_msat);
                } else {
-                       log_trace!(logger, "Min liquidity of {} already less than {}", chan_descr, amount_msat);
+                       log_trace!(logger, "Min liquidity of {} is {} (already greater than or equal to {})",
+                               chan_descr, existing_min_msat, amount_msat);
                }
        }
 
@@ -927,6 +1047,12 @@ impl<L: DerefMut<Target = u64>, BRT: DerefMut<Target = HistoricalBucketRangeTrac
        }
 
        fn update_history_buckets(&mut self) {
+               let half_lives = self.now.duration_since(*self.last_updated).as_secs()
+                       .checked_div(self.params.historical_no_updates_half_life.as_secs())
+                       .map(|v| v.try_into().unwrap_or(u32::max_value())).unwrap_or(u32::max_value());
+               self.min_liquidity_offset_history.time_decay_data(half_lives);
+               self.max_liquidity_offset_history.time_decay_data(half_lives);
+
                debug_assert!(*self.min_liquidity_offset_msat <= self.capacity_msat);
                self.min_liquidity_offset_history.track_datapoint(
                        // Ensure the bucket index we pass is in the range [0, 7], even if the liquidity offset
@@ -949,8 +1075,8 @@ impl<L: DerefMut<Target = u64>, BRT: DerefMut<Target = HistoricalBucketRangeTrac
                } else {
                        self.decayed_offset_msat(*self.max_liquidity_offset_msat)
                };
-               *self.last_updated = self.now;
                self.update_history_buckets();
+               *self.last_updated = self.now;
        }
 
        /// Adjusts the upper bound of the channel liquidity balance in this direction.
@@ -961,8 +1087,8 @@ impl<L: DerefMut<Target = u64>, BRT: DerefMut<Target = HistoricalBucketRangeTrac
                } else {
                        self.decayed_offset_msat(*self.min_liquidity_offset_msat)
                };
-               *self.last_updated = self.now;
                self.update_history_buckets();
+               *self.last_updated = self.now;
        }
 }
 
@@ -987,7 +1113,7 @@ impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> Score for Probabilis
                                        return base_penalty_msat;
                                }
                        },
-                       EffectiveCapacity::Total { capacity_msat, htlc_maximum_msat: Some(htlc_maximum_msat) } => {
+                       EffectiveCapacity::Total { capacity_msat, htlc_maximum_msat } => {
                                if htlc_maximum_msat >= capacity_msat/2 {
                                        anti_probing_penalty_msat = self.params.anti_probing_penalty_msat;
                                }
@@ -1017,31 +1143,32 @@ impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> Score for Probabilis
                                .get(&hop.short_channel_id)
                                .and_then(|channel| channel.as_directed_to(&target));
 
-                       if hop.short_channel_id == short_channel_id && hop_idx == 0 {
+                       let at_failed_channel = hop.short_channel_id == short_channel_id;
+                       if at_failed_channel && hop_idx == 0 {
                                log_warn!(self.logger, "Payment failed at the first hop - we do not attempt to learn channel info in such cases as we can directly observe local state.\n\tBecause we know the local state, we should generally not see failures here - this may be an indication that your channel peer on channel {} is broken and you may wish to close the channel.", hop.short_channel_id);
                        }
 
                        // Only score announced channels.
                        if let Some((channel, source)) = channel_directed_from_source {
                                let capacity_msat = channel.effective_capacity().as_msat();
-                               if hop.short_channel_id == short_channel_id {
+                               if at_failed_channel {
                                        self.channel_liquidities
                                                .entry(hop.short_channel_id)
                                                .or_insert_with(ChannelLiquidity::new)
                                                .as_directed_mut(source, &target, capacity_msat, &self.params)
                                                .failed_at_channel(amount_msat, format_args!("SCID {}, towards {:?}", hop.short_channel_id, target), &self.logger);
-                                       break;
+                               } else {
+                                       self.channel_liquidities
+                                               .entry(hop.short_channel_id)
+                                               .or_insert_with(ChannelLiquidity::new)
+                                               .as_directed_mut(source, &target, capacity_msat, &self.params)
+                                               .failed_downstream(amount_msat, format_args!("SCID {}, towards {:?}", hop.short_channel_id, target), &self.logger);
                                }
-
-                               self.channel_liquidities
-                                       .entry(hop.short_channel_id)
-                                       .or_insert_with(ChannelLiquidity::new)
-                                       .as_directed_mut(source, &target, capacity_msat, &self.params)
-                                       .failed_downstream(amount_msat, format_args!("SCID {}, towards {:?}", hop.short_channel_id, target), &self.logger);
                        } else {
                                log_debug!(self.logger, "Not able to penalize channel with SCID {} as we do not have graph info for it (likely a route-hint last-hop).",
                                        hop.short_channel_id);
                        }
+                       if at_failed_channel { break; }
                }
        }
 
@@ -1475,16 +1602,16 @@ impl<T: Time> Readable for ChannelLiquidity<T> {
 #[cfg(test)]
 mod tests {
        use super::{ChannelLiquidity, HistoricalBucketRangeTracker, ProbabilisticScoringParameters, ProbabilisticScorerUsingTime};
-       use util::time::Time;
-       use util::time::tests::SinceEpoch;
+       use crate::util::time::Time;
+       use crate::util::time::tests::SinceEpoch;
 
-       use ln::features::{ChannelFeatures, NodeFeatures};
-       use ln::msgs::{ChannelAnnouncement, ChannelUpdate, UnsignedChannelAnnouncement, UnsignedChannelUpdate};
-       use routing::gossip::{EffectiveCapacity, NetworkGraph, NodeId};
-       use routing::router::RouteHop;
-       use routing::scoring::{ChannelUsage, Score};
-       use util::ser::{ReadableArgs, Writeable};
-       use util::test_utils::TestLogger;
+       use crate::ln::channelmanager;
+       use crate::ln::msgs::{ChannelAnnouncement, ChannelUpdate, UnsignedChannelAnnouncement, UnsignedChannelUpdate};
+       use crate::routing::gossip::{EffectiveCapacity, NetworkGraph, NodeId};
+       use crate::routing::router::RouteHop;
+       use crate::routing::scoring::{ChannelUsage, Score};
+       use crate::util::ser::{ReadableArgs, Writeable};
+       use crate::util::test_utils::TestLogger;
 
        use bitcoin::blockdata::constants::genesis_block;
        use bitcoin::hashes::Hash;
@@ -1492,7 +1619,7 @@ mod tests {
        use bitcoin::network::constants::Network;
        use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
        use core::time::Duration;
-       use io;
+       use crate::io;
 
        fn source_privkey() -> SecretKey {
                SecretKey::from_slice(&[42; 32]).unwrap()
@@ -1569,7 +1696,7 @@ mod tests {
                let node_2_secret = &SecretKey::from_slice(&[40; 32]).unwrap();
                let secp_ctx = Secp256k1::new();
                let unsigned_announcement = UnsignedChannelAnnouncement {
-                       features: ChannelFeatures::known(),
+                       features: channelmanager::provided_channel_features(),
                        chain_hash: genesis_hash,
                        short_channel_id,
                        node_id_1: PublicKey::from_secret_key(&secp_ctx, &node_1_key),
@@ -1586,7 +1713,7 @@ mod tests {
                        bitcoin_signature_2: secp_ctx.sign_ecdsa(&msghash, &node_2_secret),
                        contents: unsigned_announcement,
                };
-               let chain_source: Option<&::util::test_utils::TestChainSource> = None;
+               let chain_source: Option<&crate::util::test_utils::TestChainSource> = None;
                network_graph.update_channel_from_announcement(
                        &signed_announcement, &chain_source).unwrap();
                update_channel(network_graph, short_channel_id, node_1_key, 0);
@@ -1619,32 +1746,22 @@ mod tests {
                network_graph.update_channel(&signed_update).unwrap();
        }
 
+       fn path_hop(pubkey: PublicKey, short_channel_id: u64, fee_msat: u64) -> RouteHop {
+               RouteHop {
+                       pubkey,
+                       node_features: channelmanager::provided_node_features(),
+                       short_channel_id,
+                       channel_features: channelmanager::provided_channel_features(),
+                       fee_msat,
+                       cltv_expiry_delta: 18,
+               }
+       }
+
        fn payment_path_for_amount(amount_msat: u64) -> Vec<RouteHop> {
                vec![
-                       RouteHop {
-                               pubkey: source_pubkey(),
-                               node_features: NodeFeatures::known(),
-                               short_channel_id: 41,
-                               channel_features: ChannelFeatures::known(),
-                               fee_msat: 1,
-                               cltv_expiry_delta: 18,
-                       },
-                       RouteHop {
-                               pubkey: target_pubkey(),
-                               node_features: NodeFeatures::known(),
-                               short_channel_id: 42,
-                               channel_features: ChannelFeatures::known(),
-                               fee_msat: 2,
-                               cltv_expiry_delta: 18,
-                       },
-                       RouteHop {
-                               pubkey: recipient_pubkey(),
-                               node_features: NodeFeatures::known(),
-                               short_channel_id: 43,
-                               channel_features: ChannelFeatures::known(),
-                               fee_msat: amount_msat,
-                               cltv_expiry_delta: 18,
-                       },
+                       path_hop(source_pubkey(), 41, 1),
+                       path_hop(target_pubkey(), 42, 2),
+                       path_hop(recipient_pubkey(), 43, amount_msat),
                ]
        }
 
@@ -1859,7 +1976,7 @@ mod tests {
                let usage = ChannelUsage {
                        amount_msat: 1_024,
                        inflight_htlc_msat: 0,
-                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024_000, htlc_maximum_msat: Some(1_000) },
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024_000, htlc_maximum_msat: 1_000 },
                };
                assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0);
                let usage = ChannelUsage { amount_msat: 10_240, ..usage };
@@ -1872,7 +1989,7 @@ mod tests {
                let usage = ChannelUsage {
                        amount_msat: 128,
                        inflight_htlc_msat: 0,
-                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: Some(1_000) },
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_000 },
                };
                assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 58);
                let usage = ChannelUsage { amount_msat: 256, ..usage };
@@ -1912,7 +2029,7 @@ mod tests {
                let usage = ChannelUsage {
                        amount_msat: 39,
                        inflight_htlc_msat: 0,
-                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 100, htlc_maximum_msat: Some(1_000) },
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 100, htlc_maximum_msat: 1_000 },
                };
                assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0);
                let usage = ChannelUsage { amount_msat: 50, ..usage };
@@ -1936,7 +2053,7 @@ mod tests {
                let usage = ChannelUsage {
                        amount_msat: 500,
                        inflight_htlc_msat: 0,
-                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: Some(1_000) },
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: 1_000 },
                };
                let failed_path = payment_path_for_amount(500);
                let successful_path = payment_path_for_amount(200);
@@ -1966,7 +2083,7 @@ mod tests {
                let usage = ChannelUsage {
                        amount_msat: 250,
                        inflight_htlc_msat: 0,
-                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: Some(1_000) },
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: 1_000 },
                };
                assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 128);
                let usage = ChannelUsage { amount_msat: 500, ..usage };
@@ -2001,7 +2118,7 @@ mod tests {
                let usage = ChannelUsage {
                        amount_msat: 250,
                        inflight_htlc_msat: 0,
-                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: Some(1_000) },
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: 1_000 },
                };
                assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 128);
                let usage = ChannelUsage { amount_msat: 500, ..usage };
@@ -2019,6 +2136,65 @@ mod tests {
                assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value());
        }
 
+       #[test]
+       fn ignores_channels_after_removed_failed_channel() {
+               // Previously, if we'd tried to send over a channel which was removed from the network
+               // graph before we call `payment_path_failed` (which is the default if the we get a "no
+               // such channel" error in the `InvoicePayer`), we would call `failed_downstream` on all
+               // channels in the route, even ones which they payment never reached. This tests to ensure
+               // we do not score such channels.
+               let secp_ctx = Secp256k1::new();
+               let logger = TestLogger::new();
+               let genesis_hash = genesis_block(Network::Testnet).header.block_hash();
+               let mut network_graph = NetworkGraph::new(genesis_hash, &logger);
+               let secret_a = SecretKey::from_slice(&[42; 32]).unwrap();
+               let secret_b = SecretKey::from_slice(&[43; 32]).unwrap();
+               let secret_c = SecretKey::from_slice(&[44; 32]).unwrap();
+               let secret_d = SecretKey::from_slice(&[45; 32]).unwrap();
+               add_channel(&mut network_graph, 42, secret_a, secret_b);
+               // Don't add the channel from B -> C.
+               add_channel(&mut network_graph, 44, secret_c, secret_d);
+
+               let pub_a = PublicKey::from_secret_key(&secp_ctx, &secret_a);
+               let pub_b = PublicKey::from_secret_key(&secp_ctx, &secret_b);
+               let pub_c = PublicKey::from_secret_key(&secp_ctx, &secret_c);
+               let pub_d = PublicKey::from_secret_key(&secp_ctx, &secret_d);
+
+               let path = vec![
+                       path_hop(pub_b, 42, 1),
+                       path_hop(pub_c, 43, 2),
+                       path_hop(pub_d, 44, 100),
+               ];
+
+               let node_a = NodeId::from_pubkey(&pub_a);
+               let node_b = NodeId::from_pubkey(&pub_b);
+               let node_c = NodeId::from_pubkey(&pub_c);
+               let node_d = NodeId::from_pubkey(&pub_d);
+
+               let params = ProbabilisticScoringParameters {
+                       liquidity_penalty_multiplier_msat: 1_000,
+                       ..ProbabilisticScoringParameters::zero_penalty()
+               };
+               let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
+
+               let usage = ChannelUsage {
+                       amount_msat: 250,
+                       inflight_htlc_msat: 0,
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: 1_000 },
+               };
+               assert_eq!(scorer.channel_penalty_msat(42, &node_a, &node_b, usage), 128);
+               // Note that a default liquidity bound is used for B -> C as no channel exists
+               assert_eq!(scorer.channel_penalty_msat(43, &node_b, &node_c, usage), 128);
+               assert_eq!(scorer.channel_penalty_msat(44, &node_c, &node_d, usage), 128);
+
+               scorer.payment_path_failed(&path.iter().collect::<Vec<_>>(), 43);
+
+               assert_eq!(scorer.channel_penalty_msat(42, &node_a, &node_b, usage), 80);
+               // Note that a default liquidity bound is used for B -> C as no channel exists
+               assert_eq!(scorer.channel_penalty_msat(43, &node_b, &node_c, usage), 128);
+               assert_eq!(scorer.channel_penalty_msat(44, &node_c, &node_d, usage), 128);
+       }
+
        #[test]
        fn reduces_liquidity_upper_bound_along_path_on_success() {
                let logger = TestLogger::new();
@@ -2035,7 +2211,7 @@ mod tests {
                let usage = ChannelUsage {
                        amount_msat: 250,
                        inflight_htlc_msat: 0,
-                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: Some(1_000) },
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: 1_000 },
                };
                let path = payment_path_for_amount(500);
 
@@ -2067,7 +2243,7 @@ mod tests {
                let usage = ChannelUsage {
                        amount_msat: 0,
                        inflight_htlc_msat: 0,
-                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: Some(1_024) },
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_024 },
                };
                assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0);
                let usage = ChannelUsage { amount_msat: 1_023, ..usage };
@@ -2145,7 +2321,7 @@ mod tests {
                let usage = ChannelUsage {
                        amount_msat: 256,
                        inflight_htlc_msat: 0,
-                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: Some(1_000) },
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_000 },
                };
                assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 125);
 
@@ -2176,7 +2352,7 @@ mod tests {
                let usage = ChannelUsage {
                        amount_msat: 512,
                        inflight_htlc_msat: 0,
-                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: Some(1_000) },
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_000 },
                };
 
                assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 300);
@@ -2221,7 +2397,7 @@ mod tests {
                let usage = ChannelUsage {
                        amount_msat: 500,
                        inflight_htlc_msat: 0,
-                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: Some(1_000) },
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: 1_000 },
                };
 
                scorer.payment_path_failed(&payment_path_for_amount(500).iter().collect::<Vec<_>>(), 42);
@@ -2258,7 +2434,7 @@ mod tests {
                let usage = ChannelUsage {
                        amount_msat: 500,
                        inflight_htlc_msat: 0,
-                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: Some(1_000) },
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: 1_000 },
                };
 
                scorer.payment_path_failed(&payment_path_for_amount(500).iter().collect::<Vec<_>>(), 42);
@@ -2295,47 +2471,47 @@ mod tests {
                let usage = ChannelUsage {
                        amount_msat: 100_000_000,
                        inflight_htlc_msat: 0,
-                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 950_000_000, htlc_maximum_msat: Some(1_000) },
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 950_000_000, htlc_maximum_msat: 1_000 },
                };
                assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 4375);
                let usage = ChannelUsage {
-                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_950_000_000, htlc_maximum_msat: Some(1_000) }, ..usage
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_950_000_000, htlc_maximum_msat: 1_000 }, ..usage
                };
                assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 2739);
                let usage = ChannelUsage {
-                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 2_950_000_000, htlc_maximum_msat: Some(1_000) }, ..usage
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 2_950_000_000, htlc_maximum_msat: 1_000 }, ..usage
                };
                assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 2236);
                let usage = ChannelUsage {
-                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 3_950_000_000, htlc_maximum_msat: Some(1_000) }, ..usage
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 3_950_000_000, htlc_maximum_msat: 1_000 }, ..usage
                };
                assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1983);
                let usage = ChannelUsage {
-                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 4_950_000_000, htlc_maximum_msat: Some(1_000) }, ..usage
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 4_950_000_000, htlc_maximum_msat: 1_000 }, ..usage
                };
                assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1637);
                let usage = ChannelUsage {
-                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 5_950_000_000, htlc_maximum_msat: Some(1_000) }, ..usage
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 5_950_000_000, htlc_maximum_msat: 1_000 }, ..usage
                };
                assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1606);
                let usage = ChannelUsage {
-                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 6_950_000_000, htlc_maximum_msat: Some(1_000) }, ..usage
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 6_950_000_000, htlc_maximum_msat: 1_000 }, ..usage
                };
                assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1331);
                let usage = ChannelUsage {
-                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 7_450_000_000, htlc_maximum_msat: Some(1_000) }, ..usage
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 7_450_000_000, htlc_maximum_msat: 1_000 }, ..usage
                };
                assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1387);
                let usage = ChannelUsage {
-                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 7_950_000_000, htlc_maximum_msat: Some(1_000) }, ..usage
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 7_950_000_000, htlc_maximum_msat: 1_000 }, ..usage
                };
                assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1379);
                let usage = ChannelUsage {
-                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 8_950_000_000, htlc_maximum_msat: Some(1_000) }, ..usage
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 8_950_000_000, htlc_maximum_msat: 1_000 }, ..usage
                };
                assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1363);
                let usage = ChannelUsage {
-                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 9_950_000_000, htlc_maximum_msat: Some(1_000) }, ..usage
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 9_950_000_000, htlc_maximum_msat: 1_000 }, ..usage
                };
                assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1355);
        }
@@ -2349,7 +2525,7 @@ mod tests {
                let usage = ChannelUsage {
                        amount_msat: 128,
                        inflight_htlc_msat: 0,
-                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: Some(1_000) },
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_000 },
                };
 
                let params = ProbabilisticScoringParameters {
@@ -2385,7 +2561,7 @@ mod tests {
                let usage = ChannelUsage {
                        amount_msat: 512_000,
                        inflight_htlc_msat: 0,
-                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024_000, htlc_maximum_msat: Some(1_000) },
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024_000, htlc_maximum_msat: 1_000 },
                };
 
                let params = ProbabilisticScoringParameters {
@@ -2440,7 +2616,7 @@ mod tests {
                let usage = ChannelUsage {
                        amount_msat: 750,
                        inflight_htlc_msat: 0,
-                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: Some(1_000) },
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: 1_000 },
                };
                assert_ne!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value());
 
@@ -2479,6 +2655,7 @@ mod tests {
                let params = ProbabilisticScoringParameters {
                        historical_liquidity_penalty_multiplier_msat: 1024,
                        historical_liquidity_penalty_amount_multiplier_msat: 1024,
+                       historical_no_updates_half_life: Duration::from_secs(10),
                        ..ProbabilisticScoringParameters::zero_penalty()
                };
                let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
@@ -2488,7 +2665,7 @@ mod tests {
                let usage = ChannelUsage {
                        amount_msat: 100,
                        inflight_htlc_msat: 0,
-                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: Some(1_024) },
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_024 },
                };
                // With no historical data the normal liquidity penalty calculation is used.
                assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 47);
@@ -2500,6 +2677,11 @@ mod tests {
                // still remember that there was some failure in the past, and assign a non-0 penalty.
                scorer.payment_path_failed(&payment_path_for_amount(1000).iter().collect::<Vec<_>>(), 43);
                assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 198);
+
+               // Advance the time forward 16 half-lives (which the docs claim will ensure all data is
+               // gone), and check that we're back to where we started.
+               SinceEpoch::advance(Duration::from_secs(10 * 16));
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 47);
        }
 
        #[test]
@@ -2518,7 +2700,7 @@ mod tests {
                let usage = ChannelUsage {
                        amount_msat: 512_000,
                        inflight_htlc_msat: 0,
-                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024_000, htlc_maximum_msat: Some(1_000) },
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024_000, htlc_maximum_msat: 1_000 },
                };
                assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0);
 
@@ -2526,7 +2708,7 @@ mod tests {
                let usage = ChannelUsage {
                        amount_msat: 512_000,
                        inflight_htlc_msat: 0,
-                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024_000, htlc_maximum_msat: Some(1_024_000) },
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024_000, htlc_maximum_msat: 1_024_000 },
                };
                assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 500);
 
@@ -2534,7 +2716,7 @@ mod tests {
                let usage = ChannelUsage {
                        amount_msat: 512_000,
                        inflight_htlc_msat: 0,
-                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024_000, htlc_maximum_msat: Some(512_000) },
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024_000, htlc_maximum_msat: 512_000 },
                };
                assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 500);
 
@@ -2542,7 +2724,7 @@ mod tests {
                let usage = ChannelUsage {
                        amount_msat: 512_000,
                        inflight_htlc_msat: 0,
-                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024_000, htlc_maximum_msat: Some(511_999) },
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024_000, htlc_maximum_msat: 511_999 },
                };
                assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0);
        }