X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fchain%2Fchainmonitor.rs;h=19095fa2375a6d8c3a52a61262736af9ac2fd61c;hb=30b6873d03e59fd2b329725f71e9df98eaba1c2c;hp=fc7e50a1cb26c2a8f3dff74b4d4f77f186fe148c;hpb=9fe0cf19f6dba5895bcb21c7158c04fbc90c1fb6;p=rust-lightning diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index fc7e50a1..19095fa2 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -474,7 +474,7 @@ where C::Target: chain::Filter, /// This wrapper avoids having to update some of our tests for now as they assume the direct /// chain::Watch API wherein we mark a monitor fully-updated by just calling /// channel_monitor_updated once with the highest ID. - #[cfg(any(test, feature = "fuzztarget"))] + #[cfg(any(test, fuzzing))] pub fn force_channel_monitor_updated(&self, funding_txo: OutPoint, monitor_update_id: u64) { self.pending_monitor_events.lock().unwrap().push(MonitorEvent::UpdateCompleted { funding_txo, @@ -482,7 +482,7 @@ where C::Target: chain::Filter, }); } - #[cfg(any(test, feature = "fuzztarget", feature = "_test_utils"))] + #[cfg(any(test, fuzzing, feature = "_test_utils"))] pub fn get_and_clear_pending_events(&self) -> Vec { use util::events::EventsProvider; let events = core::cell::RefCell::new(Vec::new()); @@ -630,9 +630,9 @@ where C::Target: chain::Filter, // We should never ever trigger this from within ChannelManager. Technically a // user could use this object with some proxying in between which makes this // possible, but in tests and fuzzing, this should be a panic. - #[cfg(any(test, feature = "fuzztarget"))] + #[cfg(any(test, fuzzing))] panic!("ChannelManager generated a channel update for a channel that was not yet registered!"); - #[cfg(not(any(test, feature = "fuzztarget")))] + #[cfg(not(any(test, fuzzing)))] Err(ChannelMonitorUpdateErr::PermanentFailure) }, Some(monitor_state) => { @@ -920,9 +920,10 @@ mod tests { merkle_root: Default::default() }; nodes[0].chain_monitor.chain_monitor.best_block_updated(&latest_header, nodes[0].best_block_info().1 + LATENCY_GRACE_PERIOD_BLOCKS); } else { - for (funding_outpoint, update_ids) in chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().iter() { + let persistences = chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clone(); + for (funding_outpoint, update_ids) in persistences { for update_id in update_ids { - nodes[0].chain_monitor.chain_monitor.channel_monitor_updated(*funding_outpoint, *update_id).unwrap(); + nodes[0].chain_monitor.chain_monitor.channel_monitor_updated(funding_outpoint, update_id).unwrap(); } } } @@ -935,4 +936,27 @@ mod tests { do_chainsync_pauses_events(false); do_chainsync_pauses_events(true); } + + #[test] + fn update_during_chainsync_fails_channel() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + + chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clear(); + chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::PermanentFailure)); + + connect_blocks(&nodes[0], 1); + // Before processing events, the ChannelManager will still think the Channel is open and + // there won't be any ChannelMonitorUpdates + assert_eq!(nodes[0].node.list_channels().len(), 1); + check_added_monitors!(nodes[0], 0); + // ... however once we get events once, the channel will close, creating a channel-closed + // ChannelMonitorUpdate. + check_closed_broadcast!(nodes[0], true); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Failed to persist ChannelMonitor update during chain sync".to_string() }); + check_added_monitors!(nodes[0], 1); + } }