From: Matt Corallo Date: Sun, 10 Dec 2023 04:30:09 +0000 (+0000) Subject: Prefetch per-direction channel info before looking at the channel X-Git-Url: http://git.bitcoin.ninja/?a=commitdiff_plain;h=1a1d528b88cc1d4f6d23e7a34a3f96a43310e165;p=rust-lightning Prefetch per-direction channel info before looking at the channel In the previous commit, we laid out `ChannelInfo` to ensure most of the data we cared about was sitting on two adjacent cache lines. However, this left off the per-direction `ChannelUpdateInfo`, which is sitting elsewhere. Here, we try to reduce the cost we pay for accessing those when we call `ChannelInfo::as_directed` by prefetching them as soon as we fetch the `ChannelInfo` from the per-channel `HashMap`. We then check the features for unknown flags, giving the CPU a handful of instructions to chew on before we actually need the `ChannelUpdateInfo`. Sadly, this currently requires unsafe Rust (and is currently only available on stable for x86), even though the x86 ISA is explicit that the instruction "does not affect program behavior". Still, this optimization reduces time taken waiting for the `ChannelUpdateInfo` to load from ~5% of our routefinding time to ~2.5%, for a net reduction of ~2.5% in routefinding time. --- diff --git a/lightning/src/lib.rs b/lightning/src/lib.rs index 5274ea0bf..167659de1 100644 --- a/lightning/src/lib.rs +++ b/lightning/src/lib.rs @@ -38,7 +38,6 @@ //! * `max_level_trace` #![cfg_attr(not(any(test, fuzzing, feature = "_test_utils")), deny(missing_docs))] -#![cfg_attr(not(any(test, feature = "_test_utils")), forbid(unsafe_code))] #![deny(rustdoc::broken_intra_doc_links)] #![deny(rustdoc::private_intra_doc_links)] diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index 28a991a32..c03fbfaf6 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -1758,6 +1758,34 @@ fn iter_equal(mut iter_a: I1, mut iter_b: I2) } } +#[cfg(target_feature = "sse")] +#[inline(always)] +unsafe fn do_prefetch(ptr: *const T) { + #[cfg(target_arch = "x86_64")] + use core::arch::x86_64::*; + #[cfg(target_arch = "x86")] + use core::arch::x86::*; + _mm_prefetch(ptr as *const i8, _MM_HINT_T0); +} + +#[cfg(not(target_feature = "sse"))] +#[inline(always)] +unsafe fn do_prefetch(_: *const T) {} + +#[inline(always)] +fn prefetch_first_byte(t: &T) { + // While X86's prefetch should be safe even on an invalid memory address (the ISA says + // "PREFETCHh instruction is merely a hint and does not affect program behavior"), we take + // an extra step towards safety here by requiring the pointer be valid (as Rust references + // are always valid when accessed). + // + // Note that a pointer in Rust could be to a zero sized type, in which case the pointer could + // be NULL (or some other bogus value), so we explicitly check for that here. + if ::core::mem::size_of::() != 0 { + unsafe { do_prefetch(t) } + } +} + /// It's useful to keep track of the hops associated with the fees required to use them, /// so that we can choose cheaper paths (as per Dijkstra's algorithm). /// Fee values should be updated only in the context of the whole path, see update_value_and_recompute_fees. @@ -2741,6 +2769,13 @@ where L::Target: Logger { if !features.requires_unknown_bits() { for chan_id in $node.channels.iter() { let chan = network_channels.get(chan_id).unwrap(); + // Calling chan.as_directed_to, below, will require access to memory two + // cache lines away from chan.features (in the form of `one_to_two` or + // `two_to_one`, depending on our direction). Thus, while we're looking at + // feature flags, go ahead and prefetch that memory, reducing the price we + // pay for it later. + prefetch_first_byte(&chan.one_to_two); + prefetch_first_byte(&chan.two_to_one); if !chan.features.requires_unknown_bits() { if let Some((directed_channel, source)) = chan.as_directed_to(&$node_id) { if first_hops.is_none() || *source != our_node_id {