// While RouteGraphNode can be laid out with fewer bytes, performance appears to be improved
// substantially when it is laid out at exactly 64 bytes.
-//
-// Thus, we use `#[repr(C)]` on the struct to force a suboptimal layout and check that it stays 64
-// bytes here.
-#[cfg(any(ldk_bench, not(any(test, fuzzing))))]
const _GRAPH_NODE_SMALL: usize = 64 - core::mem::size_of::<RouteGraphNode>();
-#[cfg(any(ldk_bench, not(any(test, fuzzing))))]
const _GRAPH_NODE_FIXED_SIZE: usize = core::mem::size_of::<RouteGraphNode>() - 64;
/// A wrapper around the various hop representations.
}
}
- #[inline]
+ #[inline(always)]
fn src_node_counter(&self) -> u32 {
match self {
CandidateRouteHop::FirstHop { payer_node_counter, .. } => *payer_node_counter,
/// Fee values should be updated only in the context of the whole path, see update_value_and_recompute_fees.
/// These fee values are useful to choose hops as we traverse the graph "payee-to-payer".
#[derive(Clone)]
-#[repr(C)] // Force fields to appear in the order we define them.
+#[repr(align(128))]
struct PathBuildingHop<'a> {
candidate: CandidateRouteHop<'a>,
target_node_counter: Option<u32>,
/// decrease as well. Thus, we have to explicitly track which nodes have been processed and
/// avoid processing them again.
was_processed: bool,
+ /// XXX
+ is_first_hop_target: bool,
/// Used to compare channels when choosing the for routing.
/// Includes paying for the use of a hop and the following hops, as well as
/// an estimated cost of reaching this hop.
/// channel scoring.
path_penalty_msat: u64,
- // The last 16 bytes are on the next cache line by default in glibc's malloc. Thus, we should
- // only place fields which are not hot there. Luckily, the next three fields are only read if
- // we end up on the selected path, and only in the final path layout phase, so we don't care
- // too much if reading them is slow.
-
fee_msat: u64,
/// All the fees paid *after* this channel on the way to the destination
value_contribution_msat: u64,
}
-// Checks that the entries in the `find_route` `dist` map fit in (exactly) two standard x86-64
-// cache lines. Sadly, they're not guaranteed to actually lie on a cache line (and in fact,
-// generally won't, because at least glibc's malloc will align to a nice, big, round
-// boundary...plus 16), but at least it will reduce the amount of data we'll need to load.
-//
-// Note that these assertions only pass on somewhat recent rustc, and thus are gated on the
-// ldk_bench flag.
-#[cfg(ldk_bench)]
-const _NODE_MAP_SIZE_TWO_CACHE_LINES: usize = 128 - core::mem::size_of::<(NodeId, PathBuildingHop)>();
-#[cfg(ldk_bench)]
-const _NODE_MAP_SIZE_EXACTLY_CACHE_LINES: usize = core::mem::size_of::<(NodeId, PathBuildingHop)>() - 128;
+const _NODE_MAP_SIZE_TWO_CACHE_LINES: usize = 128 - core::mem::size_of::<Option<PathBuildingHop>>();
+const _NODE_MAP_SIZE_EXACTLY_TWO_CACHE_LINES: usize = core::mem::size_of::<Option<PathBuildingHop>>() - 128;
impl<'a> core::fmt::Debug for PathBuildingHop<'a> {
fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> {
// Can't overflow due to how the values were computed right above.
None => unreachable!(),
};
+ let htlc_minimum_msat = $candidate.htlc_minimum_msat();
#[allow(unused_comparisons)] // $next_hops_path_htlc_minimum_msat is 0 in some calls so rustc complains
- let over_path_minimum_msat = amount_to_transfer_over_msat >= $candidate.htlc_minimum_msat() &&
+ let over_path_minimum_msat = amount_to_transfer_over_msat >= htlc_minimum_msat &&
amount_to_transfer_over_msat >= $next_hops_path_htlc_minimum_msat;
#[allow(unused_comparisons)] // $next_hops_path_htlc_minimum_msat is 0 in some calls so rustc complains
let may_overpay_to_meet_path_minimum_msat =
- ((amount_to_transfer_over_msat < $candidate.htlc_minimum_msat() &&
- recommended_value_msat >= $candidate.htlc_minimum_msat()) ||
+ ((amount_to_transfer_over_msat < htlc_minimum_msat &&
+ recommended_value_msat >= htlc_minimum_msat) ||
(amount_to_transfer_over_msat < $next_hops_path_htlc_minimum_msat &&
recommended_value_msat >= $next_hops_path_htlc_minimum_msat));
.saturating_add(curr_min);
let dist_entry = &mut dist[$candidate.src_node_counter() as usize];
- let mut old_entry = if let Some(hop) = dist_entry {
+ let old_entry = if let Some(hop) = dist_entry {
hop
} else {
// If there was previously no known way to access the source node
path_htlc_minimum_msat,
path_penalty_msat: u64::max_value(),
was_processed: false,
+ is_first_hop_target: false,
#[cfg(all(not(ldk_bench), any(test, fuzzing)))]
value_contribution_msat,
});
let fee_to_target_msat;
let next_hops_path_htlc_minimum_msat;
let next_hops_path_penalty_msat;
+ let is_first_hop_target;
let skip_node = if let Some(elem) = &mut dist[$node.node_counter as usize] {
let was_processed = elem.was_processed;
elem.was_processed = true;
fee_to_target_msat = elem.total_fee_msat;
next_hops_path_htlc_minimum_msat = elem.path_htlc_minimum_msat;
next_hops_path_penalty_msat = elem.path_penalty_msat;
+ is_first_hop_target = elem.is_first_hop_target;
was_processed
} else {
// Entries are added to dist in add_entry!() when there is a channel from a node.
fee_to_target_msat = 0;
next_hops_path_htlc_minimum_msat = 0;
next_hops_path_penalty_msat = 0;
+ is_first_hop_target = false;
false
};
if !skip_node {
- if let Some((first_channels, peer_node_counter)) = first_hop_targets.get(&$node_id) {
- for details in first_channels {
- debug_assert_eq!(*peer_node_counter, $node.node_counter);
- let candidate = CandidateRouteHop::FirstHop {
- details, payer_node_id: &our_node_id, payer_node_counter,
- target_node_counter: $node.node_counter,
- };
- add_entry!(&candidate, fee_to_target_msat,
- $next_hops_value_contribution,
- next_hops_path_htlc_minimum_msat, next_hops_path_penalty_msat,
- $next_hops_cltv_delta, $next_hops_path_length);
+ if is_first_hop_target {
+ if let Some((first_channels, peer_node_counter)) = first_hop_targets.get(&$node_id) {
+ for details in first_channels {
+ debug_assert_eq!(*peer_node_counter, $node.node_counter);
+ let candidate = CandidateRouteHop::FirstHop {
+ details, payer_node_id: &our_node_id, payer_node_counter,
+ target_node_counter: $node.node_counter,
+ };
+ add_entry!(&candidate, fee_to_target_msat,
+ $next_hops_value_contribution,
+ next_hops_path_htlc_minimum_msat, next_hops_path_penalty_msat,
+ $next_hops_cltv_delta, $next_hops_path_length);
+ }
}
}
for e in dist.iter_mut() {
*e = None;
}
+ for (node_id, (chans, peer_node_counter)) in first_hop_targets.iter() {
+ // In order to avoid looking up whether each node is a first-hop target, we store a
+ // dummy entry in dist for each first-hop target, allowing us to do this lookup for
+ // free since we're already looking at the `was_processed` flag.
+ //
+ // Note that all the fields (except `is_first_hop_target`) will be overwritten whenever
+ // we find a path to the target, so are left as dummies here.
+ dist[*peer_node_counter as usize] = Some(PathBuildingHop {
+ candidate: CandidateRouteHop::FirstHop {
+ details: &chans[0],
+ payer_node_id: &our_node_id,
+ target_node_counter: u32::max_value(),
+ payer_node_counter: u32::max_value(),
+ },
+ target_node_counter: None,
+ fee_msat: 0,
+ next_hops_fee_msat: u64::max_value(),
+ hop_use_fee_msat: u64::max_value(),
+ total_fee_msat: u64::max_value(),
+ path_htlc_minimum_msat: u64::max_value(),
+ path_penalty_msat: u64::max_value(),
+ was_processed: false,
+ is_first_hop_target: true,
+ #[cfg(all(not(ldk_bench), any(test, fuzzing)))]
+ value_contribution_msat: 0,
+ });
+ }
hit_minimum_limit = false;
// If first hop is a private channel and the only way to reach the payee, this is the only