X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fchain%2Fchainmonitor.rs;h=503e6bdee0669551d1853932447a5db08fc92c17;hb=75ca50f5c0834c01d84826b880f048ca031f08ce;hp=9db666bb03d0ff4e706069d3eba5576258eabb1d;hpb=cd9cd47f686c0ac6543e05fd23fe67d74407c409;p=rust-lightning diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index 9db666bb..503e6bde 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -23,7 +23,7 @@ //! events. The remote server would make use of [`ChainMonitor`] for block processing and for //! servicing [`ChannelMonitor`] updates from the client. -use bitcoin::blockdata::block::{Block, BlockHeader}; +use bitcoin::blockdata::block::BlockHeader; use bitcoin::hash_types::Txid; use chain; @@ -235,7 +235,7 @@ pub struct ChainMonitor>, + pending_monitor_events: Mutex)>>, /// The best block height seen, used as a proxy for the passage of time. highest_chain_height: AtomicUsize, } @@ -299,7 +299,7 @@ where C::Target: chain::Filter, log_trace!(self.logger, "Finished syncing Channel Monitor for channel {}", log_funding_info!(monitor)), Err(ChannelMonitorUpdateErr::PermanentFailure) => { monitor_state.channel_perm_failed.store(true, Ordering::Release); - self.pending_monitor_events.lock().unwrap().push(MonitorEvent::UpdateFailed(*funding_outpoint)); + self.pending_monitor_events.lock().unwrap().push((*funding_outpoint, vec![MonitorEvent::UpdateFailed(*funding_outpoint)])); }, Err(ChannelMonitorUpdateErr::TemporaryFailure) => { log_debug!(self.logger, "Channel Monitor sync for channel {} in progress, holding events until completion!", log_funding_info!(monitor)); @@ -455,10 +455,10 @@ where C::Target: chain::Filter, // UpdateCompleted event. return Ok(()); } - self.pending_monitor_events.lock().unwrap().push(MonitorEvent::UpdateCompleted { + self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::UpdateCompleted { funding_txo, monitor_update_id: monitor_data.monitor.get_latest_update_id(), - }); + }])); }, MonitorUpdateId { contents: UpdateOrigin::ChainSync(_) } => { if !monitor_data.has_pending_chainsync_updates(&pending_monitor_updates) { @@ -474,15 +474,15 @@ where C::Target: chain::Filter, /// This wrapper avoids having to update some of our tests for now as they assume the direct /// chain::Watch API wherein we mark a monitor fully-updated by just calling /// channel_monitor_updated once with the highest ID. - #[cfg(any(test, feature = "fuzztarget"))] + #[cfg(any(test, fuzzing))] pub fn force_channel_monitor_updated(&self, funding_txo: OutPoint, monitor_update_id: u64) { - self.pending_monitor_events.lock().unwrap().push(MonitorEvent::UpdateCompleted { + self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::UpdateCompleted { funding_txo, monitor_update_id, - }); + }])); } - #[cfg(any(test, feature = "fuzztarget", feature = "_test_utils"))] + #[cfg(any(test, fuzzing, feature = "_test_utils"))] pub fn get_and_clear_pending_events(&self) -> Vec { use util::events::EventsProvider; let events = core::cell::RefCell::new(Vec::new()); @@ -501,9 +501,7 @@ where L::Target: Logger, P::Target: Persist, { - fn block_connected(&self, block: &Block, height: u32) { - let header = &block.header; - let txdata: Vec<_> = block.txdata.iter().enumerate().collect(); + fn filtered_block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) { log_debug!(self.logger, "New best block {} at height {} provided via block_connected", header.block_hash(), height); self.process_chain_data(header, Some(height), &txdata, |monitor, txdata| { monitor.block_connected( @@ -630,9 +628,9 @@ where C::Target: chain::Filter, // We should never ever trigger this from within ChannelManager. Technically a // user could use this object with some proxying in between which makes this // possible, but in tests and fuzzing, this should be a panic. - #[cfg(any(test, feature = "fuzztarget"))] + #[cfg(any(test, fuzzing))] panic!("ChannelManager generated a channel update for a channel that was not yet registered!"); - #[cfg(not(any(test, feature = "fuzztarget")))] + #[cfg(not(any(test, fuzzing)))] Err(ChannelMonitorUpdateErr::PermanentFailure) }, Some(monitor_state) => { @@ -668,7 +666,7 @@ where C::Target: chain::Filter, } } - fn release_pending_monitor_events(&self) -> Vec { + fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec)> { let mut pending_monitor_events = self.pending_monitor_events.lock().unwrap().split_off(0); for monitor_state in self.monitors.read().unwrap().values() { let is_pending_monitor_update = monitor_state.has_pending_chainsync_updates(&monitor_state.pending_monitor_updates.lock().unwrap()); @@ -694,7 +692,11 @@ where C::Target: chain::Filter, log_error!(self.logger, " To avoid funds-loss, we are allowing monitor updates to be released."); log_error!(self.logger, " This may cause duplicate payment events to be generated."); } - pending_monitor_events.append(&mut monitor_state.monitor.get_and_clear_pending_monitor_events()); + let monitor_events = monitor_state.monitor.get_and_clear_pending_monitor_events(); + if monitor_events.len() > 0 { + let monitor_outpoint = monitor_state.monitor.get_funding_txo().0; + pending_monitor_events.push((monitor_outpoint, monitor_events)); + } } } pending_monitor_events @@ -920,9 +922,10 @@ mod tests { merkle_root: Default::default() }; nodes[0].chain_monitor.chain_monitor.best_block_updated(&latest_header, nodes[0].best_block_info().1 + LATENCY_GRACE_PERIOD_BLOCKS); } else { - for (funding_outpoint, update_ids) in chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().iter() { + let persistences = chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clone(); + for (funding_outpoint, update_ids) in persistences { for update_id in update_ids { - nodes[0].chain_monitor.chain_monitor.channel_monitor_updated(*funding_outpoint, *update_id).unwrap(); + nodes[0].chain_monitor.chain_monitor.channel_monitor_updated(funding_outpoint, update_id).unwrap(); } } }