X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Futil%2Ftest_utils.rs;h=a0ccf4f816ea991e31a019351018d43a95633324;hb=0ac839d8cd6362cc034a5e431d525dea28a75c2c;hp=d29a55f12010cfb3a057c959248e31de841a7482;hpb=4894d52d30399c21b7994952a8de0d1d7848c58d;p=rust-lightning diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index d29a55f1..a0ccf4f8 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -82,9 +82,13 @@ pub struct TestChainMonitor<'a> { pub chain_monitor: chainmonitor::ChainMonitor>, pub keys_manager: &'a TestKeysInterface, pub update_ret: Mutex>>, - // If this is set to Some(), after the next return, we'll always return this until update_ret - // is changed: + /// If this is set to Some(), after the next return, we'll always return this until update_ret + /// is changed: pub next_update_ret: Mutex>>, + /// If this is set to Some(), the next update_channel call (not watch_channel) must be a + /// ChannelForceClosed event for the given channel_id with should_broadcast set to the given + /// boolean. + pub expect_channel_force_closed: Mutex>, } impl<'a> TestChainMonitor<'a> { pub fn new(chain_source: Option<&'a TestChainSource>, broadcaster: &'a chaininterface::BroadcasterInterface, logger: &'a TestLogger, fee_estimator: &'a TestFeeEstimator, persister: &'a channelmonitor::Persist, keys_manager: &'a TestKeysInterface) -> Self { @@ -95,6 +99,7 @@ impl<'a> TestChainMonitor<'a> { keys_manager, update_ret: Mutex::new(None), next_update_ret: Mutex::new(None), + expect_channel_force_closed: Mutex::new(None), } } } @@ -104,7 +109,7 @@ impl<'a> chain::Watch for TestChainMonitor<'a> { // to a watchtower and disk... let mut w = TestVecWriter(Vec::new()); monitor.write(&mut w).unwrap(); - let new_monitor = <(Option, channelmonitor::ChannelMonitor)>::read( + let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>::read( &mut ::std::io::Cursor::new(&w.0), self.keys_manager).unwrap().1; assert!(new_monitor == monitor); self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(), (funding_txo, monitor.get_latest_update_id())); @@ -129,6 +134,14 @@ impl<'a> chain::Watch for TestChainMonitor<'a> { assert!(channelmonitor::ChannelMonitorUpdate::read( &mut ::std::io::Cursor::new(&w.0)).unwrap() == update); + if let Some(exp) = self.expect_channel_force_closed.lock().unwrap().take() { + assert_eq!(funding_txo.to_channel_id(), exp.0); + assert_eq!(update.updates.len(), 1); + if let channelmonitor::ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] { + assert_eq!(should_broadcast, exp.1); + } else { panic!(); } + } + self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(), (funding_txo, update.update_id)); let update_res = self.chain_monitor.update_channel(funding_txo, update); // At every point where we get a monitor update, we should be able to send a useful monitor @@ -137,7 +150,7 @@ impl<'a> chain::Watch for TestChainMonitor<'a> { let monitor = monitors.get(&funding_txo).unwrap(); w.0.clear(); monitor.write(&mut w).unwrap(); - let new_monitor = <(Option, channelmonitor::ChannelMonitor)>::read( + let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>::read( &mut ::std::io::Cursor::new(&w.0), self.keys_manager).unwrap().1; assert!(new_monitor == *monitor); self.added_monitors.lock().unwrap().push((funding_txo, new_monitor)); @@ -218,6 +231,7 @@ impl msgs::ChannelMessageHandler for TestChannelMessageHandler { fn handle_commitment_signed(&self, _their_node_id: &PublicKey, _msg: &msgs::CommitmentSigned) {} fn handle_revoke_and_ack(&self, _their_node_id: &PublicKey, _msg: &msgs::RevokeAndACK) {} fn handle_update_fee(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFee) {} + fn handle_channel_update(&self, _their_node_id: &PublicKey, _msg: &msgs::ChannelUpdate) {} fn handle_announcement_signatures(&self, _their_node_id: &PublicKey, _msg: &msgs::AnnouncementSignatures) {} fn handle_channel_reestablish(&self, _their_node_id: &PublicKey, _msg: &msgs::ChannelReestablish) {} fn peer_disconnected(&self, _their_node_id: &PublicKey, _no_connection_possible: bool) {}