From: Matt Corallo <649246+TheBlueMatt@users.noreply.github.com> Date: Wed, 17 Mar 2021 20:35:03 +0000 (+0000) Subject: Merge pull request #828 from bmancini55/reply_channel_range X-Git-Tag: v0.0.14~48 X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=commitdiff_plain;h=8799a2a0440603bb10b7cf121f60e41f0ef1a6fa;hp=-c;p=rust-lightning Merge pull request #828 from bmancini55/reply_channel_range Handle query_channel_range gossip queries --- 8799a2a0440603bb10b7cf121f60e41f0ef1a6fa diff --combined lightning/src/ln/channel.rs index 9e4bb081,7a992f64..13b2dbb1 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@@ -37,8 -37,10 +37,9 @@@ use util::ser::{Readable, ReadableArgs use util::logger::Logger; use util::errors::APIError; use util::config::{UserConfig,ChannelConfig}; + use util::scid_utils::scid_from_parts; use std; -use std::default::Default; use std::{cmp,mem,fmt}; use std::ops::Deref; #[cfg(any(test, feature = "fuzztarget"))] @@@ -367,6 -369,9 +368,6 @@@ pub(super) struct Channel, short_channel_id: Option, - /// Used to deduplicate block_connected callbacks, also used to verify consistency during - /// ChannelManager deserialization (hence pub(super)) - pub(super) last_block_connected: BlockHash, funding_tx_confirmations: u64, counterparty_dust_limit_satoshis: u64, @@@ -565,6 -570,7 +566,6 @@@ impl Channel funding_tx_confirmed_in: None, short_channel_id: None, - last_block_connected: Default::default(), funding_tx_confirmations: 0, feerate_per_kw: feerate, @@@ -800,6 -806,7 +801,6 @@@ funding_tx_confirmed_in: None, short_channel_id: None, - last_block_connected: Default::default(), funding_tx_confirmations: 0, feerate_per_kw: msg.feerate_per_kw, @@@ -1516,7 -1523,7 +1517,7 @@@ &self.get_counterparty_pubkeys().funding_pubkey } - pub fn funding_created(&mut self, msg: &msgs::FundingCreated, logger: &L) -> Result<(msgs::FundingSigned, ChannelMonitor), ChannelError> where L::Target: Logger { + pub fn funding_created(&mut self, msg: &msgs::FundingCreated, last_block_hash: BlockHash, logger: &L) -> Result<(msgs::FundingSigned, ChannelMonitor), ChannelError> where L::Target: Logger { if self.is_outbound() { return Err(ChannelError::Close("Received funding_created for an outbound channel?".to_owned())); } @@@ -1570,7 -1577,7 +1571,7 @@@ &self.channel_transaction_parameters, funding_redeemscript.clone(), self.channel_value_satoshis, obscure_factor, - holder_commitment_tx); + holder_commitment_tx, last_block_hash); channel_monitor.provide_latest_counterparty_commitment_tx(counterparty_initial_commitment_txid, Vec::new(), self.cur_counterparty_commitment_transaction_number, self.counterparty_cur_commitment_point.unwrap(), logger); @@@ -1587,7 -1594,7 +1588,7 @@@ /// Handles a funding_signed message from the remote end. /// If this call is successful, broadcast the funding transaction (and not before!) - pub fn funding_signed(&mut self, msg: &msgs::FundingSigned, logger: &L) -> Result, ChannelError> where L::Target: Logger { + pub fn funding_signed(&mut self, msg: &msgs::FundingSigned, last_block_hash: BlockHash, logger: &L) -> Result, ChannelError> where L::Target: Logger { if !self.is_outbound() { return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned())); } @@@ -1640,7 -1647,7 +1641,7 @@@ &self.channel_transaction_parameters, funding_redeemscript.clone(), self.channel_value_satoshis, obscure_factor, - holder_commitment_tx); + holder_commitment_tx, last_block_hash); channel_monitor.provide_latest_counterparty_commitment_tx(counterparty_initial_bitcoin_tx.txid, Vec::new(), self.cur_counterparty_commitment_transaction_number, self.counterparty_cur_commitment_point.unwrap(), logger); @@@ -3511,12 -3518,12 +3512,12 @@@ _ => true } }); - let non_shutdown_state = self.channel_state & (!MULTI_STATE_FLAGS); - if header.block_hash() != self.last_block_connected { - if self.funding_tx_confirmations > 0 { - self.funding_tx_confirmations += 1; - } + + if self.funding_tx_confirmations > 0 { + self.funding_tx_confirmations += 1; } + + let non_shutdown_state = self.channel_state & (!MULTI_STATE_FLAGS); if non_shutdown_state & !(ChannelState::TheirFundingLocked as u32) == ChannelState::FundingSent as u32 { for &(index_in_block, tx) in txdata.iter() { let funding_txo = self.get_funding_txo().unwrap(); @@@ -3550,56 -3557,55 +3551,53 @@@ } } } - if height > 0xff_ff_ff || (index_in_block) > 0xff_ff_ff { - panic!("Block was bogus - either height 16 million or had > 16 million transactions"); - } - assert!(txo_idx <= 0xffff); // txo_idx is a (u16 as usize), so this is just listed here for completeness self.funding_tx_confirmations = 1; - self.short_channel_id = Some(((height as u64) << (5*8)) | - ((index_in_block as u64) << (2*8)) | - ((txo_idx as u64) << (0*8))); + self.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) { + Ok(scid) => Some(scid), + Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"), + } } } } } - if header.block_hash() != self.last_block_connected { - self.last_block_connected = header.block_hash(); - self.update_time_counter = cmp::max(self.update_time_counter, header.time); - if self.funding_tx_confirmations > 0 { - if self.funding_tx_confirmations == self.minimum_depth as u64 { - let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 { - self.channel_state |= ChannelState::OurFundingLocked as u32; - true - } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirFundingLocked as u32) { - self.channel_state = ChannelState::ChannelFunded as u32 | (self.channel_state & MULTI_STATE_FLAGS); - self.update_time_counter += 1; - true - } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurFundingLocked as u32) { - // We got a reorg but not enough to trigger a force close, just update - // funding_tx_confirmed_in and return. - false - } else if self.channel_state < ChannelState::ChannelFunded as u32 { - panic!("Started confirming a channel in a state pre-FundingSent?: {}", self.channel_state); + + self.update_time_counter = cmp::max(self.update_time_counter, header.time); + if self.funding_tx_confirmations > 0 { + if self.funding_tx_confirmations == self.minimum_depth as u64 { + let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 { + self.channel_state |= ChannelState::OurFundingLocked as u32; + true + } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirFundingLocked as u32) { + self.channel_state = ChannelState::ChannelFunded as u32 | (self.channel_state & MULTI_STATE_FLAGS); + self.update_time_counter += 1; + true + } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurFundingLocked as u32) { + // We got a reorg but not enough to trigger a force close, just update + // funding_tx_confirmed_in and return. + false + } else if self.channel_state < ChannelState::ChannelFunded as u32 { + panic!("Started confirming a channel in a state pre-FundingSent?: {}", self.channel_state); + } else { + // We got a reorg but not enough to trigger a force close, just update + // funding_tx_confirmed_in and return. + false + }; + self.funding_tx_confirmed_in = Some(header.block_hash()); + + //TODO: Note that this must be a duplicate of the previous commitment point they sent us, + //as otherwise we will have a commitment transaction that they can't revoke (well, kinda, + //they can by sending two revoke_and_acks back-to-back, but not really). This appears to be + //a protocol oversight, but I assume I'm just missing something. + if need_commitment_update { + if self.channel_state & (ChannelState::MonitorUpdateFailed as u32) == 0 { + let next_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx); + return Ok((Some(msgs::FundingLocked { + channel_id: self.channel_id, + next_per_commitment_point, + }), timed_out_htlcs)); } else { - // We got a reorg but not enough to trigger a force close, just update - // funding_tx_confirmed_in and return. - false - }; - self.funding_tx_confirmed_in = Some(self.last_block_connected); - - //TODO: Note that this must be a duplicate of the previous commitment point they sent us, - //as otherwise we will have a commitment transaction that they can't revoke (well, kinda, - //they can by sending two revoke_and_acks back-to-back, but not really). This appears to be - //a protocol oversight, but I assume I'm just missing something. - if need_commitment_update { - if self.channel_state & (ChannelState::MonitorUpdateFailed as u32) == 0 { - let next_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx); - return Ok((Some(msgs::FundingLocked { - channel_id: self.channel_id, - next_per_commitment_point, - }), timed_out_htlcs)); - } else { - self.monitor_pending_funding_locked = true; - return Ok((None, timed_out_htlcs)); - } + self.monitor_pending_funding_locked = true; + return Ok((None, timed_out_htlcs)); } } } @@@ -3617,7 -3623,8 +3615,7 @@@ return true; } } - self.last_block_connected = header.block_hash(); - if Some(self.last_block_connected) == self.funding_tx_confirmed_in { + if Some(header.block_hash()) == self.funding_tx_confirmed_in { self.funding_tx_confirmations = self.minimum_depth as u64 - 1; } false @@@ -4170,11 -4177,7 +4168,11 @@@ /// those explicitly stated to be allowed after shutdown completes, eg some simple getters). /// Also returns the list of payment_hashes for channels which we can safely fail backwards /// immediately (others we will have to allow to time out). - pub fn force_shutdown(&mut self, should_broadcast: bool) -> (Option, ChannelMonitorUpdate, Vec<(HTLCSource, PaymentHash)>) { + pub fn force_shutdown(&mut self, should_broadcast: bool) -> (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>) { + // Note that we MUST only generate a monitor update that indicates force-closure - we're + // called during initialization prior to the chain_monitor in the encompassing ChannelManager + // being fully configured in some cases. Thus, its likely any monitor events we generate will + // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more. assert!(self.channel_state != ChannelState::ShutdownComplete as u32); // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and @@@ -4188,7 -4191,7 +4186,7 @@@ _ => {} } } - let funding_txo = if let Some(funding_txo) = self.get_funding_txo() { + let monitor_update = if let Some(funding_txo) = self.get_funding_txo() { // If we haven't yet exchanged funding signatures (ie channel_state < FundingSent), // returning a channel monitor update here would imply a channel monitor update before // we even registered the channel monitor to begin with, which is invalid. @@@ -4197,17 -4200,17 +4195,17 @@@ // monitor update to the user, even if we return one). // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more. if self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelFunded as u32 | ChannelState::ShutdownComplete as u32) != 0 { - Some(funding_txo.clone()) + self.latest_monitor_update_id += 1; + Some((funding_txo, ChannelMonitorUpdate { + update_id: self.latest_monitor_update_id, + updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }], + })) } else { None } } else { None }; self.channel_state = ChannelState::ShutdownComplete as u32; self.update_time_counter += 1; - self.latest_monitor_update_id += 1; - (funding_txo, ChannelMonitorUpdate { - update_id: self.latest_monitor_update_id, - updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }], - }, dropped_outbound_htlcs) + (monitor_update, dropped_outbound_htlcs) } } @@@ -4426,6 -4429,8 +4424,6 @@@ impl Writeable for Channe self.funding_tx_confirmed_in.write(writer)?; self.short_channel_id.write(writer)?; - - self.last_block_connected.write(writer)?; self.funding_tx_confirmations.write(writer)?; self.counterparty_dust_limit_satoshis.write(writer)?; @@@ -4586,6 -4591,8 +4584,6 @@@ impl<'a, Signer: Sign, K: Deref> Readab let funding_tx_confirmed_in = Readable::read(reader)?; let short_channel_id = Readable::read(reader)?; - - let last_block_connected = Readable::read(reader)?; let funding_tx_confirmations = Readable::read(reader)?; let counterparty_dust_limit_satoshis = Readable::read(reader)?; @@@ -4656,6 -4663,7 +4654,6 @@@ funding_tx_confirmed_in, short_channel_id, - last_block_connected, funding_tx_confirmations, counterparty_dust_limit_satoshis, @@@ -4910,8 -4918,6 +4908,8 @@@ mod tests let secp_ctx = Secp256k1::new(); let seed = [42; 32]; let network = Network::Testnet; + let chain_hash = genesis_block(network).header.block_hash(); + let last_block_hash = chain_hash; let keys_provider = test_utils::TestKeysInterface::new(&seed, network); // Go through the flow of opening a channel between two nodes. @@@ -4922,7 -4928,7 +4920,7 @@@ let mut node_a_chan = Channel::::new_outbound(&&feeest, &&keys_provider, node_b_node_id, 10000000, 100000, 42, &config).unwrap(); // Create Node B's channel by receiving Node A's open_channel message - let open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash()); + let open_channel_msg = node_a_chan.get_open_channel(chain_hash); let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap()); let mut node_b_chan = Channel::::new_from_req(&&feeest, &&keys_provider, node_b_node_id, InitFeatures::known(), &open_channel_msg, 7, &config).unwrap(); @@@ -4937,10 -4943,10 +4935,10 @@@ }]}; let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 }; let funding_created_msg = node_a_chan.get_outbound_funding_created(funding_outpoint, &&logger).unwrap(); - let (funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, &&logger).unwrap(); + let (funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, last_block_hash, &&logger).unwrap(); // Node B --> Node A: funding signed - let _ = node_a_chan.funding_signed(&funding_signed_msg, &&logger); + let _ = node_a_chan.funding_signed(&funding_signed_msg, last_block_hash, &&logger); // Now disconnect the two nodes and check that the commitment point in // Node B's channel_reestablish message is sane. diff --combined lightning/src/ln/channelmanager.rs index faae2c51,3109b852..970ad2d1 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@@ -206,7 -206,7 +206,7 @@@ pub struct PaymentPreimage(pub [u8;32]) #[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)] pub struct PaymentSecret(pub [u8;32]); -type ShutdownResult = (Option, ChannelMonitorUpdate, Vec<(HTLCSource, PaymentHash)>); +type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>); /// Error type returned across the channel_state mutex boundary. When an Err is generated for a /// Channel, we generally end up with a ChannelError::Close for which we have to close the channel @@@ -333,15 -333,6 +333,15 @@@ pub(super) struct ChannelHolder, } +/// Events which we process internally but cannot be procsesed immediately at the generation site +/// for some reason. They are handled in timer_chan_freshness_every_min, so may be processed with +/// quite some time lag. +enum BackgroundEvent { + /// Handle a ChannelMonitorUpdate that closes a channel, broadcasting its current latest holder + /// commitment transaction. + ClosingMonitorUpdate((OutPoint, ChannelMonitorUpdate)), +} + /// State we hold per-peer. In the future we should put channels in here, but for now we only hold /// the latest Init features we heard from the peer. struct PeerState { @@@ -389,7 -380,7 +389,7 @@@ pub type SimpleRefChannelManager<'a, 'b /// ChannelMonitors passed by reference to read(), those channels will be force-closed based on the /// ChannelMonitor state and no funds will be lost (mod on-chain transaction fees). /// -/// Note that the deserializer is only implemented for (Option, ChannelManager), which +/// Note that the deserializer is only implemented for (BlockHash, ChannelManager), which /// tells you the last block hash which was block_connect()ed. You MUST rescan any blocks along /// the "reorg path" (ie call block_disconnected() until you get to a common block and then call /// block_connected() to step towards your best block) upon deserialization before using the @@@ -423,7 -414,7 +423,7 @@@ pub struct ChannelManager, + last_block_hash: RwLock, secp_ctx: Secp256k1, #[cfg(any(test, feature = "_test_utils"))] @@@ -445,7 -436,6 +445,7 @@@ per_peer_state: RwLock>>, pending_events: Mutex>, + pending_background_events: Mutex>, /// Used when we have to take a BIG lock to make sure everything is self-consistent. /// Essentially just when we're serializing ourselves out. /// Taken first everywhere where we are making changes before any other locks. @@@ -461,30 -451,11 +461,30 @@@ logger: L, } +/// Chain-related parameters used to construct a new `ChannelManager`. +/// +/// Typically, the block-specific parameters are derived from the best block hash for the network, +/// as a newly constructed `ChannelManager` will not have created any channels yet. These parameters +/// are not needed when deserializing a previously constructed `ChannelManager`. +pub struct ChainParameters { + /// The network for determining the `chain_hash` in Lightning messages. + pub network: Network, + + /// The hash of the latest block successfully connected. + pub latest_hash: BlockHash, + + /// The height of the latest block successfully connected. + /// + /// Used to track on-chain channel funding outputs and send payments with reliable timelocks. + pub latest_height: usize, +} + /// Whenever we release the `ChannelManager`'s `total_consistency_lock`, from read mode, it is -/// desirable to notify any listeners on `wait_timeout`/`wait` that new updates are available for -/// persistence. Therefore, this struct is responsible for locking the total consistency lock and, -/// upon going out of scope, sending the aforementioned notification (since the lock being released -/// indicates that the updates are ready for persistence). +/// desirable to notify any listeners on `await_persistable_update_timeout`/ +/// `await_persistable_update` that new updates are available for persistence. Therefore, this +/// struct is responsible for locking the total consistency lock and, upon going out of scope, +/// sending the aforementioned notification (since the lock being released indicates that the +/// updates are ready for persistence). struct PersistenceNotifierGuard<'a> { persistence_notifier: &'a PersistenceNotifier, // We hold onto this result so the lock doesn't get released immediately. @@@ -508,12 -479,11 +508,12 @@@ impl<'a> Drop for PersistenceNotifierGu } } -/// The amount of time we require our counterparty wait to claim their money (ie time between when -/// we, or our watchtower, must check for them having broadcast a theft transaction). +/// The amount of time in blocks we require our counterparty wait to claim their money (ie time +/// between when we, or our watchtower, must check for them having broadcast a theft transaction). pub(crate) const BREAKDOWN_TIMEOUT: u16 = 6 * 24; -/// The amount of time we're willing to wait to claim money back to us -pub(crate) const MAX_LOCAL_BREAKDOWN_TIMEOUT: u16 = 6 * 24 * 7; +/// The amount of time in blocks we're willing to wait to claim money back to us. This matches +/// the maximum required amount in lnd as of March 2021. +pub(crate) const MAX_LOCAL_BREAKDOWN_TIMEOUT: u16 = 2 * 6 * 24 * 7; /// The minimum number of blocks between an inbound HTLC's CLTV and the corresponding outbound /// HTLC's CLTV. This should always be a few blocks greater than channelmonitor::CLTV_CLAIM_BUFFER, @@@ -790,22 -760,24 +790,22 @@@ impl= `MAX_FUNDING_SATOSHIS`! /// - /// Users must provide the current blockchain height from which to track onchain channel - /// funding outpoints and send payments with reliable timelocks. - /// /// Users need to notify the new ChannelManager when a new block is connected or - /// disconnected using its `block_connected` and `block_disconnected` methods. - pub fn new(network: Network, fee_est: F, chain_monitor: M, tx_broadcaster: T, logger: L, keys_manager: K, config: UserConfig, current_blockchain_height: usize) -> Self { + /// disconnected using its `block_connected` and `block_disconnected` methods, starting + /// from after `params.latest_hash`. + pub fn new(fee_est: F, chain_monitor: M, tx_broadcaster: T, logger: L, keys_manager: K, config: UserConfig, params: ChainParameters) -> Self { let mut secp_ctx = Secp256k1::new(); secp_ctx.seeded_randomize(&keys_manager.get_secure_random_bytes()); ChannelManager { default_configuration: config.clone(), - genesis_hash: genesis_block(network).header.block_hash(), + genesis_hash: genesis_block(params.network).header.block_hash(), fee_estimator: fee_est, chain_monitor, tx_broadcaster, - latest_block_height: AtomicUsize::new(current_blockchain_height), - last_block_hash: Mutex::new(Default::default()), + latest_block_height: AtomicUsize::new(params.latest_height), + last_block_hash: RwLock::new(params.latest_hash), secp_ctx, channel_state: Mutex::new(ChannelHolder{ @@@ -822,7 -794,6 +822,7 @@@ per_peer_state: RwLock::new(HashMap::new()), pending_events: Mutex::new(Vec::new()), + pending_background_events: Mutex::new(Vec::new()), total_consistency_lock: RwLock::new(()), persistence_notifier: PersistenceNotifier::new(), @@@ -971,12 -942,12 +971,12 @@@ #[inline] fn finish_force_close_channel(&self, shutdown_res: ShutdownResult) { - let (funding_txo_option, monitor_update, mut failed_htlcs) = shutdown_res; + let (monitor_update_option, mut failed_htlcs) = shutdown_res; log_trace!(self.logger, "Finishing force-closure of channel {} HTLCs to fail", failed_htlcs.len()); for htlc_source in failed_htlcs.drain(..) { self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); } - if let Some(funding_txo) = funding_txo_option { + if let Some((funding_txo, monitor_update)) = monitor_update_option { // There isn't anything we can do if we get an update failure - we're already // force-closing. The monitor update on the required in-memory copy should broadcast // the latest local state, which is the best we can do anyway. Thus, it is safe to @@@ -1883,42 -1854,13 +1883,42 @@@ events.append(&mut new_events); } + /// Free the background events, generally called from timer_chan_freshness_every_min. + /// + /// Exposed for testing to allow us to process events quickly without generating accidental + /// BroadcastChannelUpdate events in timer_chan_freshness_every_min. + /// + /// Expects the caller to have a total_consistency_lock read lock. + fn process_background_events(&self) { + let mut background_events = Vec::new(); + mem::swap(&mut *self.pending_background_events.lock().unwrap(), &mut background_events); + for event in background_events.drain(..) { + match event { + BackgroundEvent::ClosingMonitorUpdate((funding_txo, update)) => { + // The channel has already been closed, so no use bothering to care about the + // monitor updating completing. + let _ = self.chain_monitor.update_channel(funding_txo, update); + }, + } + } + } + + #[cfg(any(test, feature = "_test_utils"))] + pub(crate) fn test_process_background_events(&self) { + self.process_background_events(); + } + /// If a peer is disconnected we mark any channels with that peer as 'disabled'. /// After some time, if channels are still disabled we need to broadcast a ChannelUpdate /// to inform the network about the uselessness of these channels. /// /// This method handles all the details, and must be called roughly once per minute. + /// + /// Note that in some rare cases this may generate a `chain::Watch::update_channel` call. pub fn timer_chan_freshness_every_min(&self) { let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier); + self.process_background_events(); + let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; for (_, chan) in channel_state.by_id.iter_mut() { @@@ -2011,10 -1953,6 +2011,10 @@@ //identify whether we sent it or not based on the (I presume) very different runtime //between the branches here. We should make this async and move it into the forward HTLCs //timer handling. + + // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called + // from block_connected which may run during initialization prior to the chain_monitor + // being fully configured. See the docs for `ChannelManagerReadArgs` for more. match source { HTLCSource::OutboundRoute { ref path, .. } => { log_trace!(self.logger, "Failing outbound payment HTLC with payment_hash {}", log_bytes!(payment_hash.0)); @@@ -2456,7 -2394,6 +2456,7 @@@ fn internal_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> { let ((funding_msg, monitor), mut chan) = { + let last_block_hash = *self.last_block_hash.read().unwrap(); let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_lock; match channel_state.by_id.entry(msg.temporary_channel_id.clone()) { @@@ -2464,7 -2401,7 +2464,7 @@@ if chan.get().get_counterparty_node_id() != *counterparty_node_id { return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.temporary_channel_id)); } - (try_chan_entry!(self, chan.get_mut().funding_created(msg, &self.logger), channel_state, chan), chan.remove()) + (try_chan_entry!(self, chan.get_mut().funding_created(msg, last_block_hash, &self.logger), channel_state, chan), chan.remove()) }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id)) } @@@ -2481,7 -2418,7 +2481,7 @@@ // We do not do a force-close here as that would generate a monitor update for // a monitor that we didn't manage to store (and that we don't care about - we // don't respond with the funding_signed so the channel can never go on chain). - let (_funding_txo_option, _monitor_update, failed_htlcs) = chan.force_shutdown(true); + let (_monitor_update, failed_htlcs) = chan.force_shutdown(true); assert!(failed_htlcs.is_empty()); return Err(MsgHandleErrInternal::send_err_msg_no_close("ChannelMonitor storage failure".to_owned(), funding_msg.channel_id)); }, @@@ -2513,7 -2450,6 +2513,7 @@@ fn internal_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> { let (funding_txo, user_id) = { + let last_block_hash = *self.last_block_hash.read().unwrap(); let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_lock; match channel_state.by_id.entry(msg.channel_id) { @@@ -2521,7 -2457,7 +2521,7 @@@ if chan.get().get_counterparty_node_id() != *counterparty_node_id { return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); } - let monitor = match chan.get_mut().funding_signed(&msg, &self.logger) { + let monitor = match chan.get_mut().funding_signed(&msg, last_block_hash, &self.logger) { Ok(update) => update, Err(e) => try_chan_entry!(self, Err(e), channel_state, chan), }; @@@ -3164,29 -3100,6 +3164,29 @@@ self.finish_force_close_channel(failure); } } + + /// Handle a list of channel failures during a block_connected or block_disconnected call, + /// pushing the channel monitor update (if any) to the background events queue and removing the + /// Channel object. + fn handle_init_event_channel_failures(&self, mut failed_channels: Vec) { + for mut failure in failed_channels.drain(..) { + // Either a commitment transactions has been confirmed on-chain or + // Channel::block_disconnected detected that the funding transaction has been + // reorganized out of the main chain. + // We cannot broadcast our latest local state via monitor update (as + // Channel::force_shutdown tries to make us do) as we may still be in initialization, + // so we track the update internally and handle it when the user next calls + // timer_chan_freshness_every_min, guaranteeing we're running normally. + if let Some((funding_txo, update)) = failure.0.take() { + assert_eq!(update.updates.len(), 1); + if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] { + assert!(should_broadcast); + } else { unreachable!(); } + self.pending_background_events.lock().unwrap().push(BackgroundEvent::ClosingMonitorUpdate((funding_txo, update))); + } + self.finish_force_close_channel(failure); + } + } } impl MessageSendEventsProvider for ChannelManager @@@ -3254,17 -3167,9 +3254,17 @@@ impl bool { + pub fn await_persistable_update_timeout(&self, max_wait: Duration) -> bool { self.persistence_notifier.wait_timeout(max_wait) } - /// Blocks until ChannelManager needs to be persisted. Only one listener on `wait` is - /// guaranteed to be woken up. - pub fn wait(&self) { + /// Blocks until ChannelManager needs to be persisted. Only one listener on + /// `await_persistable_update` or `await_persistable_update_timeout` is guaranteed to be woken + /// up. + pub fn await_persistable_update(&self) { self.persistence_notifier.wait() } @@@ -3596,6 -3499,7 +3596,7 @@@ impl true, &events::MessageSendEvent::SendChannelRangeQuery { .. } => false, &events::MessageSendEvent::SendShortIdsQuery { .. } => false, + &events::MessageSendEvent::SendReplyChannelRange { .. } => false, } }); } @@@ -3673,7 -3577,7 +3674,7 @@@ } /// Used to signal to the ChannelManager persister that the manager needs to be re-persisted to -/// disk/backups, through `wait_timeout` and `wait`. +/// disk/backups, through `await_persistable_update_timeout` and `await_persistable_update`. struct PersistenceNotifier { /// Users won't access the persistence_lock directly, but rather wait on its bool using /// `wait_timeout` and `wait`. @@@ -3963,7 -3867,7 +3964,7 @@@ impl { + 0u8.write(writer)?; + funding_txo.write(writer)?; + monitor_update.write(writer)?; + }, + } + } + (self.last_node_announcement_serial.load(Ordering::Acquire) as u32).write(writer)?; Ok(()) @@@ -4034,26 -3926,15 +4035,26 @@@ /// At a high-level, the process for deserializing a ChannelManager and resuming normal operation /// is: /// 1) Deserialize all stored ChannelMonitors. -/// 2) Deserialize the ChannelManager by filling in this struct and calling <(Option, -/// ChannelManager)>::read(reader, args). +/// 2) Deserialize the ChannelManager by filling in this struct and calling: +/// <(BlockHash, ChannelManager)>::read(reader, args) /// This may result in closing some Channels if the ChannelMonitor is newer than the stored /// ChannelManager state to ensure no loss of funds. Thus, transactions may be broadcasted. -/// 3) Register all relevant ChannelMonitor outpoints with your chain watch mechanism using -/// ChannelMonitor::get_outputs_to_watch() and ChannelMonitor::get_funding_txo(). +/// 3) If you are not fetching full blocks, register all relevant ChannelMonitor outpoints the same +/// way you would handle a `chain::Filter` call using ChannelMonitor::get_outputs_to_watch() and +/// ChannelMonitor::get_funding_txo(). /// 4) Reconnect blocks on your ChannelMonitors. -/// 5) Move the ChannelMonitors into your local chain::Watch. -/// 6) Disconnect/connect blocks on the ChannelManager. +/// 5) Disconnect/connect blocks on the ChannelManager. +/// 6) Move the ChannelMonitors into your local chain::Watch. +/// +/// Note that the ordering of #4-6 is not of importance, however all three must occur before you +/// call any other methods on the newly-deserialized ChannelManager. +/// +/// Note that because some channels may be closed during deserialization, it is critical that you +/// always deserialize only the latest version of a ChannelManager and ChannelMonitors available to +/// you. If you deserialize an old ChannelManager (during which force-closure transactions may be +/// broadcast), and then later deserialize a newer version of the same ChannelManager (which will +/// not force-close the same channels but consider them live), you may end up revoking a state for +/// which you've already broadcasted the transaction. pub struct ChannelManagerReadArgs<'a, Signer: 'a + Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> where M::Target: chain::Watch, T::Target: BroadcasterInterface, @@@ -4126,7 -4007,7 +4127,7 @@@ impl<'a, Signer: 'a + Sign, M: Deref, T // Implement ReadableArgs for an Arc'd ChannelManager to make it a bit easier to work with the // SipmleArcChannelManager type: impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> - ReadableArgs> for (Option, Arc>) + ReadableArgs> for (BlockHash, Arc>) where M::Target: chain::Watch, T::Target: BroadcasterInterface, K::Target: KeysInterface, @@@ -4134,13 -4015,13 +4135,13 @@@ L::Target: Logger, { fn read(reader: &mut R, args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result { - let (blockhash, chan_manager) = <(Option, ChannelManager)>::read(reader, args)?; + let (blockhash, chan_manager) = <(BlockHash, ChannelManager)>::read(reader, args)?; Ok((blockhash, Arc::new(chan_manager))) } } impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> - ReadableArgs> for (Option, ChannelManager) + ReadableArgs> for (BlockHash, ChannelManager) where M::Target: chain::Watch, T::Target: BroadcasterInterface, K::Target: KeysInterface, @@@ -4166,6 -4047,10 +4167,6 @@@ let mut short_to_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); for _ in 0..channel_count { let mut channel: Channel = Channel::read(reader, &args.keys_manager)?; - if channel.last_block_connected != Default::default() && channel.last_block_connected != last_block_hash { - return Err(DecodeError::InvalidValue); - } - let funding_txo = channel.get_funding_txo().ok_or(DecodeError::InvalidValue)?; funding_txo_set.insert(funding_txo.clone()); if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) { @@@ -4180,7 -4065,7 +4181,7 @@@ channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() || channel.get_latest_monitor_update_id() < monitor.get_latest_update_id() { // But if the channel is behind of the monitor, close the channel: - let (_, _, mut new_failed_htlcs) = channel.force_shutdown(true); + let (_, mut new_failed_htlcs) = channel.force_shutdown(true); failed_htlcs.append(&mut new_failed_htlcs); monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger); } else { @@@ -4244,15 -4129,6 +4245,15 @@@ } } + let background_event_count: u64 = Readable::read(reader)?; + let mut pending_background_events_read: Vec = Vec::with_capacity(cmp::min(background_event_count as usize, MAX_ALLOC_SIZE/mem::size_of::())); + for _ in 0..background_event_count { + match ::read(reader)? { + 0 => pending_background_events_read.push(BackgroundEvent::ClosingMonitorUpdate((Readable::read(reader)?, Readable::read(reader)?))), + _ => return Err(DecodeError::InvalidValue), + } + } + let last_node_announcement_serial: u32 = Readable::read(reader)?; let mut secp_ctx = Secp256k1::new(); @@@ -4265,7 -4141,7 +4266,7 @@@ tx_broadcaster: args.tx_broadcaster, latest_block_height: AtomicUsize::new(latest_block_height as usize), - last_block_hash: Mutex::new(last_block_hash), + last_block_hash: RwLock::new(last_block_hash), secp_ctx, channel_state: Mutex::new(ChannelHolder { @@@ -4282,7 -4158,6 +4283,7 @@@ per_peer_state: RwLock::new(per_peer_state), pending_events: Mutex::new(pending_events_read), + pending_background_events: Mutex::new(pending_background_events_read), total_consistency_lock: RwLock::new(()), persistence_notifier: PersistenceNotifier::new(), @@@ -4298,7 -4173,12 +4299,7 @@@ //TODO: Broadcast channel update for closed channels, but only after we've made a //connection or two. - let last_seen_block_hash = if last_block_hash == Default::default() { - None - } else { - Some(last_block_hash) - }; - Ok((last_seen_block_hash, channel_manager)) + Ok((last_block_hash.clone(), channel_manager)) } } diff --combined lightning/src/ln/peer_handler.rs index 03470588,dacae671..0e3f0ed4 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@@ -42,7 -42,7 +42,7 @@@ use bitcoin::hashes::{HashEngine, Hash} /// A dummy struct which implements `RoutingMessageHandler` without storing any routing information /// or doing any processing. You can provide one of these as the route_handler in a MessageHandler. -struct IgnoringMessageHandler{} +pub struct IgnoringMessageHandler{} impl MessageSendEventsProvider for IgnoringMessageHandler { fn get_and_clear_pending_msg_events(&self) -> Vec { Vec::new() } } @@@ -67,7 -67,7 +67,7 @@@ impl Deref for IgnoringMessageHandler /// A dummy struct which implements `ChannelMessageHandler` without having any channels. /// You can provide one of these as the route_handler in a MessageHandler. -struct ErroringMessageHandler { +pub struct ErroringMessageHandler { message_queue: Mutex> } impl ErroringMessageHandler { @@@ -1281,6 -1281,17 +1281,17 @@@ impl { + log_trace!(self.logger, "Handling SendReplyChannelRange event in peer_handler for node {} with num_scids={} first_blocknum={} number_of_blocks={}, sync_complete={}", + log_pubkey!(node_id), + msg.short_channel_ids.len(), + msg.first_blocknum, + msg.number_of_blocks, + msg.sync_complete); + let (mut descriptor, peer) = get_peer_for_forwarding!(node_id, {}); + peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg))); + self.do_attempt_write_data(&mut descriptor, peer); + } } } @@@ -1345,7 -1356,7 +1356,7 @@@ /// It will send pings to each peer and disconnect those which did not respond to the last round of pings. /// Will most likely call send_data on all of the registered descriptors, thus, be very careful with reentrancy issues! - pub fn timer_tick_occured(&self) { + pub fn timer_tick_occurred(&self) { let mut peers_lock = self.peers.lock().unwrap(); { let peers = &mut *peers_lock; @@@ -1518,11 -1529,11 +1529,11 @@@ mod tests assert_eq!(peers[0].peers.lock().unwrap().peers.len(), 1); // peers[0] awaiting_pong is set to true, but the Peer is still connected - peers[0].timer_tick_occured(); + peers[0].timer_tick_occurred(); assert_eq!(peers[0].peers.lock().unwrap().peers.len(), 1); - // Since timer_tick_occured() is called again when awaiting_pong is true, all Peers are disconnected - peers[0].timer_tick_occured(); + // Since timer_tick_occurred() is called again when awaiting_pong is true, all Peers are disconnected + peers[0].timer_tick_occurred(); assert_eq!(peers[0].peers.lock().unwrap().peers.len(), 0); } diff --combined lightning/src/util/mod.rs index a6609884,612bd940..04b77872 --- a/lightning/src/util/mod.rs +++ b/lightning/src/util/mod.rs @@@ -22,13 -22,14 +22,14 @@@ pub(crate) mod chacha20 pub(crate) mod poly1305; pub(crate) mod chacha20poly1305rfc; pub(crate) mod transaction_utils; + pub(crate) mod scid_utils; #[macro_use] pub(crate) mod ser_macros; /// Logging macro utilities. #[macro_use] -pub mod macro_logger; +pub(crate) mod macro_logger; // These have to come after macro_logger to build pub mod logger;