From dfb02f1878f7f80cd9eab345db5a4589d803fe21 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sat, 2 Dec 2023 19:13:02 +0000 Subject: [PATCH] Marginally optimize test logging 973636bd2ab2ba35fb8b9703f1d5d0e72f069cdc introduced a new `HashMap` in the `TestLogger` but then did lookups by iterating the entire map. This fixes that, and also takes this opportunity to stop allocating new `String`s for the module to store each log entry in the `TestLogger` --- lightning-background-processor/src/lib.rs | 14 +++++++------- lightning/src/ln/functional_tests.rs | 20 ++++++++++---------- lightning/src/util/test_utils.rs | 21 +++++++++------------ 3 files changed, 26 insertions(+), 29 deletions(-) diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index e4f2ebfb6..95796e860 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -1375,9 +1375,9 @@ mod tests { let desired_log_1 = "Calling ChannelManager's timer_tick_occurred".to_string(); let desired_log_2 = "Calling PeerManager's timer_tick_occurred".to_string(); let desired_log_3 = "Rebroadcasting monitor's pending claims".to_string(); - if log_entries.get(&("lightning_background_processor".to_string(), desired_log_1)).is_some() && - log_entries.get(&("lightning_background_processor".to_string(), desired_log_2)).is_some() && - log_entries.get(&("lightning_background_processor".to_string(), desired_log_3)).is_some() { + if log_entries.get(&("lightning_background_processor", desired_log_1)).is_some() && + log_entries.get(&("lightning_background_processor", desired_log_2)).is_some() && + log_entries.get(&("lightning_background_processor", desired_log_3)).is_some() { break } } @@ -1556,7 +1556,7 @@ mod tests { loop { let log_entries = nodes[0].logger.lines.lock().unwrap(); let expected_log = "Persisting scorer".to_string(); - if log_entries.get(&("lightning_background_processor".to_string(), expected_log)).is_some() { + if log_entries.get(&("lightning_background_processor", expected_log)).is_some() { break } } @@ -1580,7 +1580,7 @@ mod tests { $sleep; let log_entries = $nodes[0].logger.lines.lock().unwrap(); let loop_counter = "Calling ChannelManager's timer_tick_occurred".to_string(); - if *log_entries.get(&("lightning_background_processor".to_string(), loop_counter)) + if *log_entries.get(&("lightning_background_processor", loop_counter)) .unwrap_or(&0) > 1 { // Wait until the loop has gone around at least twice. @@ -1792,7 +1792,7 @@ mod tests { let log_entries = nodes[0].logger.lines.lock().unwrap(); let expected_log = "Persisting scorer after update".to_string(); - assert_eq!(*log_entries.get(&("lightning_background_processor".to_string(), expected_log)).unwrap(), 5); + assert_eq!(*log_entries.get(&("lightning_background_processor", expected_log)).unwrap(), 5); } #[tokio::test] @@ -1838,7 +1838,7 @@ mod tests { let log_entries = nodes[0].logger.lines.lock().unwrap(); let expected_log = "Persisting scorer after update".to_string(); - assert_eq!(*log_entries.get(&("lightning_background_processor".to_string(), expected_log)).unwrap(), 5); + assert_eq!(*log_entries.get(&("lightning_background_processor", expected_log)).unwrap(), 5); }); let (r1, r2) = tokio::join!(t1, t2); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 8df252586..a1d136427 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -693,7 +693,7 @@ fn test_update_fee_that_funder_cannot_afford() { *feerate_lock += 4; } nodes[0].node.timer_tick_occurred(); - nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot afford to send new feerate at {}", feerate + 4), 1); + nodes[0].logger.assert_log("lightning::ln::channel", format!("Cannot afford to send new feerate at {}", feerate + 4), 1); check_added_monitors!(nodes[0], 0); const INITIAL_COMMITMENT_NUMBER: u64 = 281474976710654; @@ -768,7 +768,7 @@ fn test_update_fee_that_funder_cannot_afford() { //check to see if the funder, who sent the update_fee request, can afford the new fee (funder_balance >= fee+channel_reserve) //Should produce and error. nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commit_signed_msg); - nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Funding remote cannot afford proposed new fee".to_string(), 1); + nodes[1].logger.assert_log("lightning::ln::channelmanager", "Funding remote cannot afford proposed new fee".to_string(), 1); check_added_monitors!(nodes[1], 1); check_closed_broadcast!(nodes[1], true); check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") }, @@ -1529,7 +1529,7 @@ fn test_fee_spike_violation_fails_htlc() { }, _ => panic!("Unexpected event"), }; - nodes[1].logger.assert_log("lightning::ln::channel".to_string(), + nodes[1].logger.assert_log("lightning::ln::channel", format!("Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", raa_msg.channel_id), 1); check_added_monitors!(nodes[1], 2); @@ -1617,7 +1617,7 @@ fn test_chan_reserve_violation_inbound_htlc_outbound_channel() { nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &msg); // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd. - nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string(), 1); + nodes[0].logger.assert_log("lightning::ln::channelmanager", "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string(), 1); assert_eq!(nodes[0].node.list_channels().len(), 0); let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); assert_eq!(err_msg.data, "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value"); @@ -1796,7 +1796,7 @@ fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg); // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd. - nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Remote HTLC add would put them under remote reserve value".to_string(), 1); + nodes[1].logger.assert_log("lightning::ln::channelmanager", "Remote HTLC add would put them under remote reserve value".to_string(), 1); assert_eq!(nodes[1].node.list_channels().len(), 1); let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value"); @@ -5930,7 +5930,7 @@ fn test_fail_holding_cell_htlc_upon_free() { // us to surface its failure to the user. chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0); - nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 1 HTLC updates in channel {}", chan.2), 1); + nodes[0].logger.assert_log("lightning::ln::channel", format!("Freeing holding cell with 1 HTLC updates in channel {}", chan.2), 1); // Check that the payment failed to be sent out. let events = nodes[0].node.get_and_clear_pending_events(); @@ -6018,7 +6018,7 @@ fn test_free_and_fail_holding_cell_htlcs() { // to surface its failure to the user. The first payment should succeed. chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0); - nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 2 HTLC updates in channel {}", chan.2), 1); + nodes[0].logger.assert_log("lightning::ln::channel", format!("Freeing holding cell with 2 HTLC updates in channel {}", chan.2), 1); // Check that the second payment failed to be sent out. let events = nodes[0].node.get_and_clear_pending_events(); @@ -6292,7 +6292,7 @@ fn test_update_add_htlc_bolt2_receiver_zero_value_msat() { updates.update_add_htlcs[0].amount_msat = 0; nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); - nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Remote side tried to send a 0-msat HTLC".to_string(), 1); + nodes[1].logger.assert_log("lightning::ln::channelmanager", "Remote side tried to send a 0-msat HTLC".to_string(), 1); check_closed_broadcast!(nodes[1], true).unwrap(); check_added_monitors!(nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string() }, @@ -9843,10 +9843,10 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e // Outbound dust balance: 6399 sats let dust_inbound_overflow = dust_inbound_htlc_on_holder_tx_msat * (dust_inbound_htlc_on_holder_tx + 1); let dust_outbound_overflow = dust_outbound_htlc_on_holder_tx_msat * dust_outbound_htlc_on_holder_tx + dust_inbound_htlc_on_holder_tx_msat; - nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, max_dust_htlc_exposure_msat), 1); + nodes[0].logger.assert_log("lightning::ln::channel", format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, max_dust_htlc_exposure_msat), 1); } else { // Outbound dust balance: 5200 sats - nodes[0].logger.assert_log("lightning::ln::channel".to_string(), + nodes[0].logger.assert_log("lightning::ln::channel", format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", dust_htlc_on_counterparty_tx_msat * (dust_htlc_on_counterparty_tx - 1) + dust_htlc_on_counterparty_tx_msat + 4, max_dust_htlc_exposure_msat), 1); diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index 0606e36e5..a6c2d7745 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -930,8 +930,8 @@ impl events::MessageSendEventsProvider for TestRoutingMessageHandler { pub struct TestLogger { level: Level, pub(crate) id: String, - pub lines: Mutex>, - pub context: Mutex, Option), usize>>, + pub lines: Mutex>, + pub context: Mutex, Option), usize>>, } impl TestLogger { @@ -949,7 +949,7 @@ impl TestLogger { pub fn enable(&mut self, level: Level) { self.level = level; } - pub fn assert_log(&self, module: String, line: String, count: usize) { + pub fn assert_log(&self, module: &str, line: String, count: usize) { let log_entries = self.lines.lock().unwrap(); assert_eq!(log_entries.get(&(module, line)), Some(&count)); } @@ -961,7 +961,7 @@ impl TestLogger { pub fn assert_log_contains(&self, module: &str, line: &str, count: usize) { let log_entries = self.lines.lock().unwrap(); let l: usize = log_entries.iter().filter(|&(&(ref m, ref l), _c)| { - m == module && l.contains(line) + *m == module && l.contains(line) }).map(|(_, c) | { c }).sum(); assert_eq!(l, count) } @@ -974,7 +974,7 @@ impl TestLogger { pub fn assert_log_regex(&self, module: &str, pattern: regex::Regex, count: usize) { let log_entries = self.lines.lock().unwrap(); let l: usize = log_entries.iter().filter(|&(&(ref m, ref l), _c)| { - m == module && pattern.is_match(&l) + *m == module && pattern.is_match(&l) }).map(|(_, c) | { c }).sum(); assert_eq!(l, count) } @@ -983,18 +983,15 @@ impl TestLogger { &self, module: &str, peer_id: Option, channel_id: Option, count: usize ) { let context_entries = self.context.lock().unwrap(); - let l: usize = context_entries.iter() - .filter(|&(&(ref m, ref p, ref c), _)| m == module && *p == peer_id && *c == channel_id) - .map(|(_, c) | c) - .sum(); - assert_eq!(l, count) + let l = context_entries.get(&(module, peer_id, channel_id)).unwrap(); + assert_eq!(*l, count) } } impl Logger for TestLogger { fn log(&self, record: Record) { - *self.lines.lock().unwrap().entry((record.module_path.to_string(), format!("{}", record.args))).or_insert(0) += 1; - *self.context.lock().unwrap().entry((record.module_path.to_string(), record.peer_id, record.channel_id)).or_insert(0) += 1; + *self.lines.lock().unwrap().entry((record.module_path, format!("{}", record.args))).or_insert(0) += 1; + *self.context.lock().unwrap().entry((record.module_path, record.peer_id, record.channel_id)).or_insert(0) += 1; if record.level >= self.level { #[cfg(all(not(ldk_bench), feature = "std"))] { let pfx = format!("{} {} [{}:{}]", self.id, record.level.to_string(), record.module_path, record.line); -- 2.39.5