projects
/
rust-lightning
/ commitdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
| commitdiff |
tree
raw
|
patch
|
inline
| side by side (parent:
10cfe5c
)
DRY up historical bucket_idx calculation
author
Jeffrey Czyz
<jkczyz@gmail.com>
Fri, 6 Jan 2023 02:13:12 +0000
(20:13 -0600)
committer
Jeffrey Czyz
<jkczyz@gmail.com>
Fri, 3 Mar 2023 15:41:52 +0000
(09:41 -0600)
lightning/src/routing/scoring.rs
patch
|
blob
|
history
diff --git
a/lightning/src/routing/scoring.rs
b/lightning/src/routing/scoring.rs
index d1ed0f7cb6d55b5206ce791ebc6ec1d10ff1aebc..e2d1107566e55e577d16a11a54dd0a9f8a777c17 100644
(file)
--- a/
lightning/src/routing/scoring.rs
+++ b/
lightning/src/routing/scoring.rs
@@
-550,7
+550,7
@@
struct HistoricalBucketRangeTracker {
impl HistoricalBucketRangeTracker {
fn new() -> Self { Self { buckets: [0; 8] } }
impl HistoricalBucketRangeTracker {
fn new() -> Self { Self { buckets: [0; 8] } }
- fn track_datapoint(&mut self,
bucket_idx: u8
) {
+ fn track_datapoint(&mut self,
liquidity_offset_msat: u64, capacity_msat: u64
) {
// We have 8 leaky buckets for min and max liquidity. Each bucket tracks the amount of time
// we spend in each bucket as a 16-bit fixed-point number with a 5 bit fractional part.
//
// We have 8 leaky buckets for min and max liquidity. Each bucket tracks the amount of time
// we spend in each bucket as a 16-bit fixed-point number with a 5 bit fractional part.
//
@@
-571,6
+571,12
@@
impl HistoricalBucketRangeTracker {
//
// The constants were picked experimentally, selecting a decay amount that restricts us
// from overflowing buckets without having to cap them manually.
//
// The constants were picked experimentally, selecting a decay amount that restricts us
// from overflowing buckets without having to cap them manually.
+
+ // Ensure the bucket index is in the range [0, 7], even if the liquidity offset is zero or
+ // the channel's capacity, though the second should generally never happen.
+ debug_assert!(liquidity_offset_msat <= capacity_msat);
+ let bucket_idx: u8 = (liquidity_offset_msat.saturating_sub(1) * 8 / capacity_msat)
+ .try_into().unwrap_or(32); // 32 is bogus for 8 buckets, and will be ignored
debug_assert!(bucket_idx < 8);
if bucket_idx < 8 {
for e in self.buckets.iter_mut() {
debug_assert!(bucket_idx < 8);
if bucket_idx < 8 {
for e in self.buckets.iter_mut() {
@@
-1151,18
+1157,12
@@
impl<L: DerefMut<Target = u64>, BRT: DerefMut<Target = HistoricalBucketRangeTrac
self.min_liquidity_offset_history.time_decay_data(half_lives);
self.max_liquidity_offset_history.time_decay_data(half_lives);
self.min_liquidity_offset_history.time_decay_data(half_lives);
self.max_liquidity_offset_history.time_decay_data(half_lives);
- debug_assert!(*self.min_liquidity_offset_msat <= self.capacity_msat);
self.min_liquidity_offset_history.track_datapoint(
self.min_liquidity_offset_history.track_datapoint(
- // Ensure the bucket index we pass is in the range [0, 7], even if the liquidity offset
- // is zero or the channel's capacity, though the second should generally never happen.
- (self.min_liquidity_offset_msat.saturating_sub(1) * 8 / self.capacity_msat)
- .try_into().unwrap_or(32)); // 32 is bogus for 8 buckets, and will be ignored
- debug_assert!(*self.max_liquidity_offset_msat <= self.capacity_msat);
+ *self.min_liquidity_offset_msat, self.capacity_msat
+ );
self.max_liquidity_offset_history.track_datapoint(
self.max_liquidity_offset_history.track_datapoint(
- // Ensure the bucket index we pass is in the range [0, 7], even if the liquidity offset
- // is zero or the channel's capacity, though the second should generally never happen.
- (self.max_liquidity_offset_msat.saturating_sub(1) * 8 / self.capacity_msat)
- .try_into().unwrap_or(32)); // 32 is bogus for 8 buckets, and will be ignored
+ *self.max_liquidity_offset_msat, self.capacity_msat
+ );
}
/// Adjusts the lower bound of the channel liquidity balance in this direction.
}
/// Adjusts the lower bound of the channel liquidity balance in this direction.