From: Valentine Wallace Date: Wed, 21 Dec 2022 20:45:57 +0000 (-0500) Subject: Test utils: allow queueing >2 persistence update results X-Git-Tag: v0.0.114-beta~73^2 X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=commitdiff_plain;h=19516c041e4527f6475e7bdcb046f463a374d468;p=rust-lightning Test utils: allow queueing >2 persistence update results --- diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index 17fe69182..8dba235f2 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -815,6 +815,7 @@ mod tests { chanmon_cfgs[1].persister.offchain_monitor_updates.lock().unwrap().clear(); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.claim_funds(payment_preimage_1); check_added_monitors!(nodes[1], 1); @@ -823,8 +824,6 @@ mod tests { check_added_monitors!(nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash_2, 1_000_000); - chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let persistences = chanmon_cfgs[1].persister.offchain_monitor_updates.lock().unwrap().clone(); assert_eq!(persistences.len(), 1); let (funding_txo, updates) = persistences.iter().next().unwrap(); diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 1d1eab482..914e5d9a2 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -782,6 +782,7 @@ fn test_monitor_update_raa_while_paused() { check_added_monitors!(nodes[1], 1); let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_2.msgs[0]); nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg); @@ -793,7 +794,6 @@ fn test_monitor_update_raa_while_paused() { assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); - chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[0], 0); @@ -1223,6 +1223,7 @@ fn raa_no_response_awaiting_raa_state() { // nodes[1]) followed by an RAA. Fail the monitor updating prior to the CS, deliver the RAA, // then restore channel monitor updates. chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1233,7 +1234,6 @@ fn raa_no_response_awaiting_raa_state() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); - chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); // nodes[1] should be AwaitingRAA here! @@ -1959,7 +1959,7 @@ fn test_path_paused_mpp() { // Set it so that the first monitor update (for the path 0 -> 1 -> 3) succeeds, but the second // (for the path 0 -> 2 -> 3) fails. chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - chanmon_cfgs[0].persister.set_next_update_ret(Some(ChannelMonitorUpdateStatus::InProgress)); + chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); // Now check that we get the right return value, indicating that the first path succeeded but // the second got a MonitorUpdateInProgress err. This implies @@ -2268,6 +2268,7 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); check_added_monitors!(nodes[0], 0); + chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[0].node.claim_funds(payment_preimage_0); check_added_monitors!(nodes[0], 1); diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index 9ca517964..5b101e345 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -224,10 +224,9 @@ impl<'a> chain::Watch for TestChainMonitor<'a> { } pub struct TestPersister { - pub update_ret: Mutex, - /// If this is set to Some(), after the next return, we'll always return this until update_ret - /// is changed: - pub next_update_ret: Mutex>, + /// The queue of update statuses we'll return. If none are queued, ::Completed will always be + /// returned. + pub update_rets: Mutex>, /// When we get an update_persisted_channel call with no ChannelMonitorUpdate, we insert the /// MonitorUpdateId here. pub chain_sync_monitor_persistences: Mutex>>, @@ -238,34 +237,29 @@ pub struct TestPersister { impl TestPersister { pub fn new() -> Self { Self { - update_ret: Mutex::new(chain::ChannelMonitorUpdateStatus::Completed), - next_update_ret: Mutex::new(None), + update_rets: Mutex::new(VecDeque::new()), chain_sync_monitor_persistences: Mutex::new(HashMap::new()), offchain_monitor_updates: Mutex::new(HashMap::new()), } } - pub fn set_update_ret(&self, ret: chain::ChannelMonitorUpdateStatus) { - *self.update_ret.lock().unwrap() = ret; - } - - pub fn set_next_update_ret(&self, next_ret: Option) { - *self.next_update_ret.lock().unwrap() = next_ret; + /// Queue an update status to return. + pub fn set_update_ret(&self, next_ret: chain::ChannelMonitorUpdateStatus) { + self.update_rets.lock().unwrap().push_back(next_ret); } } impl chainmonitor::Persist for TestPersister { fn persist_new_channel(&self, _funding_txo: OutPoint, _data: &channelmonitor::ChannelMonitor, _id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus { - let ret = self.update_ret.lock().unwrap().clone(); - if let Some(next_ret) = self.next_update_ret.lock().unwrap().take() { - *self.update_ret.lock().unwrap() = next_ret; + if let Some(update_ret) = self.update_rets.lock().unwrap().pop_front() { + return update_ret } - ret + chain::ChannelMonitorUpdateStatus::Completed } fn update_persisted_channel(&self, funding_txo: OutPoint, update: &Option, _data: &channelmonitor::ChannelMonitor, update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus { - let ret = self.update_ret.lock().unwrap().clone(); - if let Some(next_ret) = self.next_update_ret.lock().unwrap().take() { - *self.update_ret.lock().unwrap() = next_ret; + let mut ret = chain::ChannelMonitorUpdateStatus::Completed; + if let Some(update_ret) = self.update_rets.lock().unwrap().pop_front() { + ret = update_ret; } if update.is_none() { self.chain_sync_monitor_persistences.lock().unwrap().entry(funding_txo).or_insert(HashSet::new()).insert(update_id);