Avoid excess divides in the amount < min bucket scoring loop
authorMatt Corallo <git@bluematt.me>
Sat, 16 Dec 2023 02:12:42 +0000 (02:12 +0000)
committerMatt Corallo <git@bluematt.me>
Sat, 16 Dec 2023 04:01:01 +0000 (04:01 +0000)
When we iterate over buckets in the "the amount we're sending is
smaller than the minimum bucket we're looking at", there is no need
to divide by the total points tracked until we've summed all the
buckets. Here we pull that divide out to the end, removing one of
the hottest single instructions in our scoring logic.

lightning/src/routing/scoring.rs

index 93cd71ec6a7c368201124a2217af34790b7d16fa..0c68aceb6b8cc85e7628c580a517113922e9fb95 100644 (file)
@@ -1804,6 +1804,7 @@ mod bucketed_history {
                        }
 
                        let mut cumulative_success_prob_times_billion = 0;
+                       let mut cumulative_success_points = 0;
                        // Special-case the 0th min bucket - it generally means we failed a payment, so only
                        // consider the highest (i.e. largest-offset-from-max-capacity) max bucket for all
                        // points against the 0th min bucket. This avoids the case where we fail to route
@@ -1837,15 +1838,11 @@ mod bucketed_history {
                                if payment_pos < min_bucket_start_pos {
                                        for (max_idx, max_bucket) in max_liquidity_offset_history_buckets.iter().enumerate().take(32 - min_idx) {
                                                let max_bucket_end_pos = BUCKET_START_POS[32 - max_idx] - 1;
-                                               // Note that this multiply can only barely not overflow - two 16 bit ints plus
-                                               // 30 bits is 62 bits.
-                                               let bucket_prob_times_billion = (*min_bucket as u64) * (*max_bucket as u64)
-                                                       * 1024 * 1024 * 1024 / total_valid_points_tracked;
                                                if payment_pos >= max_bucket_end_pos {
                                                        // Success probability 0, the payment amount may be above the max liquidity
                                                        break;
                                                }
-                                               cumulative_success_prob_times_billion += bucket_prob_times_billion;
+                                               cumulative_success_points += ((*min_bucket as u32) * (*max_bucket as u32)) as u64;
                                        }
                                } else {
                                        for (max_idx, max_bucket) in max_liquidity_offset_history_buckets.iter().enumerate().take(32 - min_idx) {
@@ -1867,6 +1864,13 @@ mod bucketed_history {
                                }
                        }
 
+                       // Once we've added all 32*32/2 32-bit success points together, we may have up to 42
+                       // bits. Thus, we still have > 20 bits left, which we multiply before dividing by
+                       // total_valid_points_tracked. We finally normalize back to billions.
+                       debug_assert!(cumulative_success_points < u64::max_value() / 1024 / 1024);
+                       cumulative_success_prob_times_billion +=
+                               cumulative_success_points * 1024 * 1024 / total_valid_points_tracked * 1024;
+
                        Some(cumulative_success_prob_times_billion)
                }
        }