]> git.bitcoin.ninja Git - rust-lightning/commitdiff
Avoid excess divides in the amount < min bucket scoring loop
authorMatt Corallo <git@bluematt.me>
Sat, 16 Dec 2023 02:12:42 +0000 (02:12 +0000)
committerMatt Corallo <git@bluematt.me>
Wed, 17 Jul 2024 14:38:03 +0000 (14:38 +0000)
When we iterate over buckets in the "the amount we're sending is
smaller than the minimum bucket we're looking at", there is no need
to divide by the total points tracked until we've summed all the
buckets. Here we pull that divide out to the end, removing one of
the hottest single instructions in our scoring logic.

lightning/src/routing/scoring.rs

index 3460ce655e1c696b827321489897bd18c2bf8e85..e64c9826ddb37a5aa083f2621ff97c140ecef509 100644 (file)
@@ -1807,6 +1807,7 @@ mod bucketed_history {
                        }
 
                        let mut cumulative_success_prob_times_billion = 0;
+                       let mut cumulative_success_points = 0;
                        // Special-case the 0th min bucket - it generally means we failed a payment, so only
                        // consider the highest (i.e. largest-offset-from-max-capacity) max bucket for all
                        // points against the 0th min bucket. This avoids the case where we fail to route
@@ -1840,15 +1841,11 @@ mod bucketed_history {
                                if payment_pos < min_bucket_start_pos {
                                        for (max_idx, max_bucket) in max_liquidity_offset_history_buckets.iter().enumerate().take(32 - min_idx) {
                                                let max_bucket_end_pos = BUCKET_START_POS[32 - max_idx] - 1;
-                                               // Note that this multiply can only barely not overflow - two 16 bit ints plus
-                                               // 30 bits is 62 bits.
-                                               let bucket_prob_times_billion = (*min_bucket as u64) * (*max_bucket as u64)
-                                                       * 1024 * 1024 * 1024 / total_valid_points_tracked;
                                                if payment_pos >= max_bucket_end_pos {
                                                        // Success probability 0, the payment amount may be above the max liquidity
                                                        break;
                                                }
-                                               cumulative_success_prob_times_billion += bucket_prob_times_billion;
+                                               cumulative_success_points += ((*min_bucket as u32) * (*max_bucket as u32)) as u64;
                                        }
                                } else {
                                        for (max_idx, max_bucket) in max_liquidity_offset_history_buckets.iter().enumerate().take(32 - min_idx) {
@@ -1870,6 +1867,13 @@ mod bucketed_history {
                                }
                        }
 
+                       // Once we've added all 32*32/2 32-bit success points together, we may have up to 42
+                       // bits. Thus, we still have > 20 bits left, which we multiply before dividing by
+                       // total_valid_points_tracked. We finally normalize back to billions.
+                       debug_assert!(cumulative_success_points < u64::max_value() / 1024 / 1024);
+                       cumulative_success_prob_times_billion +=
+                               cumulative_success_points * 1024 * 1024 / total_valid_points_tracked * 1024;
+
                        Some(cumulative_success_prob_times_billion)
                }
        }