X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fchain%2Fchainmonitor.rs;h=5c4ede0b16161819a8c7b54f4b8696cb5274912a;hb=28c9b56113ff1ebb1b505a2c979c55c1626aa06b;hp=9db666bb03d0ff4e706069d3eba5576258eabb1d;hpb=cd9cd47f686c0ac6543e05fd23fe67d74407c409;p=rust-lightning diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index 9db666bb..5c4ede0b 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -23,7 +23,7 @@ //! events. The remote server would make use of [`ChainMonitor`] for block processing and for //! servicing [`ChannelMonitor`] updates from the client. -use bitcoin::blockdata::block::{Block, BlockHeader}; +use bitcoin::blockdata::block::BlockHeader; use bitcoin::hash_types::Txid; use chain; @@ -43,6 +43,7 @@ use prelude::*; use sync::{RwLock, RwLockReadGuard, Mutex, MutexGuard}; use core::ops::Deref; use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; +use bitcoin::secp256k1::PublicKey; #[derive(Clone, Copy, Hash, PartialEq, Eq)] /// A specific update's ID stored in a `MonitorUpdateId`, separated out to make the contents @@ -235,7 +236,7 @@ pub struct ChainMonitor>, + pending_monitor_events: Mutex, Option)>>, /// The best block height seen, used as a proxy for the passage of time. highest_chain_height: AtomicUsize, } @@ -299,7 +300,7 @@ where C::Target: chain::Filter, log_trace!(self.logger, "Finished syncing Channel Monitor for channel {}", log_funding_info!(monitor)), Err(ChannelMonitorUpdateErr::PermanentFailure) => { monitor_state.channel_perm_failed.store(true, Ordering::Release); - self.pending_monitor_events.lock().unwrap().push(MonitorEvent::UpdateFailed(*funding_outpoint)); + self.pending_monitor_events.lock().unwrap().push((*funding_outpoint, vec![MonitorEvent::UpdateFailed(*funding_outpoint)], monitor.get_counterparty_node_id())); }, Err(ChannelMonitorUpdateErr::TemporaryFailure) => { log_debug!(self.logger, "Channel Monitor sync for channel {} in progress, holding events until completion!", log_funding_info!(monitor)); @@ -455,10 +456,10 @@ where C::Target: chain::Filter, // UpdateCompleted event. return Ok(()); } - self.pending_monitor_events.lock().unwrap().push(MonitorEvent::UpdateCompleted { + self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::UpdateCompleted { funding_txo, monitor_update_id: monitor_data.monitor.get_latest_update_id(), - }); + }], monitor_data.monitor.get_counterparty_node_id())); }, MonitorUpdateId { contents: UpdateOrigin::ChainSync(_) } => { if !monitor_data.has_pending_chainsync_updates(&pending_monitor_updates) { @@ -474,15 +475,17 @@ where C::Target: chain::Filter, /// This wrapper avoids having to update some of our tests for now as they assume the direct /// chain::Watch API wherein we mark a monitor fully-updated by just calling /// channel_monitor_updated once with the highest ID. - #[cfg(any(test, feature = "fuzztarget"))] + #[cfg(any(test, fuzzing))] pub fn force_channel_monitor_updated(&self, funding_txo: OutPoint, monitor_update_id: u64) { - self.pending_monitor_events.lock().unwrap().push(MonitorEvent::UpdateCompleted { + let monitors = self.monitors.read().unwrap(); + let counterparty_node_id = monitors.get(&funding_txo).and_then(|m| m.monitor.get_counterparty_node_id()); + self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::UpdateCompleted { funding_txo, monitor_update_id, - }); + }], counterparty_node_id)); } - #[cfg(any(test, feature = "fuzztarget", feature = "_test_utils"))] + #[cfg(any(test, fuzzing, feature = "_test_utils"))] pub fn get_and_clear_pending_events(&self) -> Vec { use util::events::EventsProvider; let events = core::cell::RefCell::new(Vec::new()); @@ -501,9 +504,7 @@ where L::Target: Logger, P::Target: Persist, { - fn block_connected(&self, block: &Block, height: u32) { - let header = &block.header; - let txdata: Vec<_> = block.txdata.iter().enumerate().collect(); + fn filtered_block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) { log_debug!(self.logger, "New best block {} at height {} provided via block_connected", header.block_hash(), height); self.process_chain_data(header, Some(height), &txdata, |monitor, txdata| { monitor.block_connected( @@ -630,15 +631,15 @@ where C::Target: chain::Filter, // We should never ever trigger this from within ChannelManager. Technically a // user could use this object with some proxying in between which makes this // possible, but in tests and fuzzing, this should be a panic. - #[cfg(any(test, feature = "fuzztarget"))] + #[cfg(any(test, fuzzing))] panic!("ChannelManager generated a channel update for a channel that was not yet registered!"); - #[cfg(not(any(test, feature = "fuzztarget")))] + #[cfg(not(any(test, fuzzing)))] Err(ChannelMonitorUpdateErr::PermanentFailure) }, Some(monitor_state) => { let monitor = &monitor_state.monitor; log_trace!(self.logger, "Updating ChannelMonitor for channel {}", log_funding_info!(monitor)); - let update_res = monitor.update_monitor(&update, &self.broadcaster, &self.fee_estimator, &self.logger); + let update_res = monitor.update_monitor(&update, &self.broadcaster, &*self.fee_estimator, &self.logger); if update_res.is_err() { log_error!(self.logger, "Failed to update ChannelMonitor for channel {}.", log_funding_info!(monitor)); } @@ -668,7 +669,7 @@ where C::Target: chain::Filter, } } - fn release_pending_monitor_events(&self) -> Vec { + fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec, Option)> { let mut pending_monitor_events = self.pending_monitor_events.lock().unwrap().split_off(0); for monitor_state in self.monitors.read().unwrap().values() { let is_pending_monitor_update = monitor_state.has_pending_chainsync_updates(&monitor_state.pending_monitor_updates.lock().unwrap()); @@ -694,7 +695,12 @@ where C::Target: chain::Filter, log_error!(self.logger, " To avoid funds-loss, we are allowing monitor updates to be released."); log_error!(self.logger, " This may cause duplicate payment events to be generated."); } - pending_monitor_events.append(&mut monitor_state.monitor.get_and_clear_pending_monitor_events()); + let monitor_events = monitor_state.monitor.get_and_clear_pending_monitor_events(); + if monitor_events.len() > 0 { + let monitor_outpoint = monitor_state.monitor.get_funding_txo().0; + let counterparty_node_id = monitor_state.monitor.get_counterparty_node_id(); + pending_monitor_events.push((monitor_outpoint, monitor_events, counterparty_node_id)); + } } } pending_monitor_events @@ -729,7 +735,7 @@ impl even mod tests { use bitcoin::BlockHeader; use ::{check_added_monitors, check_closed_broadcast, check_closed_event}; - use ::{expect_payment_sent, expect_payment_sent_without_paths, expect_payment_path_successful, get_event_msg}; + use ::{expect_payment_sent, expect_payment_claimed, expect_payment_sent_without_paths, expect_payment_path_successful, get_event_msg}; use ::{get_htlc_update_msgs, get_local_commitment_txn, get_revoke_commit_msgs, get_route_and_payment_hash, unwrap_send_err}; use chain::{ChannelMonitorUpdateErr, Confirm, Watch}; use chain::channelmonitor::LATENCY_GRACE_PERIOD_BLOCKS; @@ -796,16 +802,18 @@ mod tests { create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); // Route two payments to be claimed at the same time. - let payment_preimage_1 = route_payment(&nodes[0], &[&nodes[1]], 1_000_000).0; - let payment_preimage_2 = route_payment(&nodes[0], &[&nodes[1]], 1_000_000).0; + let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); + let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); chanmon_cfgs[1].persister.offchain_monitor_updates.lock().unwrap().clear(); chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); nodes[1].node.claim_funds(payment_preimage_1); check_added_monitors!(nodes[1], 1); + expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); nodes[1].node.claim_funds(payment_preimage_2); check_added_monitors!(nodes[1], 1); + expect_payment_claimed!(nodes[1], payment_hash_2, 1_000_000); chanmon_cfgs[1].persister.set_update_ret(Ok(())); @@ -875,8 +883,9 @@ mod tests { let (route, second_payment_hash, _, second_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); // First route a payment that we will claim on chain and give the recipient the preimage. - let payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 1_000_000).0; + let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); nodes[1].node.claim_funds(payment_preimage); + expect_payment_claimed!(nodes[1], payment_hash, 1_000_000); nodes[1].node.get_and_clear_pending_msg_events(); check_added_monitors!(nodes[1], 1); let remote_txn = get_local_commitment_txn!(nodes[1], channel.2); @@ -920,9 +929,10 @@ mod tests { merkle_root: Default::default() }; nodes[0].chain_monitor.chain_monitor.best_block_updated(&latest_header, nodes[0].best_block_info().1 + LATENCY_GRACE_PERIOD_BLOCKS); } else { - for (funding_outpoint, update_ids) in chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().iter() { + let persistences = chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clone(); + for (funding_outpoint, update_ids) in persistences { for update_id in update_ids { - nodes[0].chain_monitor.chain_monitor.channel_monitor_updated(*funding_outpoint, *update_id).unwrap(); + nodes[0].chain_monitor.chain_monitor.channel_monitor_updated(funding_outpoint, update_id).unwrap(); } } }