From: Matt Corallo Date: Wed, 10 Mar 2021 03:05:21 +0000 (-0500) Subject: Handle 1-conf funding_locked in channel no matter the event order X-Git-Tag: v0.0.14~38^2 X-Git-Url: http://git.bitcoin.ninja/?a=commitdiff_plain;h=refs%2Fheads%2F2021-03-skip-blocks;p=rust-lightning Handle 1-conf funding_locked in channel no matter the event order See comment in the diff for more details --- diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 64933d325..55c2ffd57 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -436,11 +436,10 @@ pub fn do_test(data: &[u8], out: Out) { let mut header = BlockHeader { version: 0x20000000, prev_blockhash: chain_hash, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; let txdata: Vec<_> = channel_txn.iter().enumerate().map(|(i, tx)| (i + 1, tx)).collect(); $node.transactions_confirmed(&header, 1, &txdata); - $node.update_best_block(&header, 1); - for i in 2..100 { + for _ in 2..100 { header = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; - $node.update_best_block(&header, i); } + $node.update_best_block(&header, 99); } } } diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 0f09fd330..174eed9b9 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -3501,11 +3501,57 @@ impl Channel { self.network_sync == UpdateStatus::DisabledMarked } + fn check_get_funding_locked(&mut self, height: u32) -> Option { + if self.funding_tx_confirmation_height == 0 { + return None; + } + + let funding_tx_confirmations = height as i64 - self.funding_tx_confirmation_height as i64 + 1; + if funding_tx_confirmations <= 0 { + self.funding_tx_confirmation_height = 0; + } + + if funding_tx_confirmations < self.minimum_depth as i64 { + return None; + } + + let non_shutdown_state = self.channel_state & (!MULTI_STATE_FLAGS); + let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 { + self.channel_state |= ChannelState::OurFundingLocked as u32; + true + } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirFundingLocked as u32) { + self.channel_state = ChannelState::ChannelFunded as u32 | (self.channel_state & MULTI_STATE_FLAGS); + self.update_time_counter += 1; + true + } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurFundingLocked as u32) { + // We got a reorg but not enough to trigger a force close, just ignore. + false + } else if self.channel_state < ChannelState::ChannelFunded as u32 { + panic!("Started confirming a channel in a state pre-FundingSent?: {}", self.channel_state); + } else { + // We got a reorg but not enough to trigger a force close, just ignore. + false + }; + + if need_commitment_update { + if self.channel_state & (ChannelState::MonitorUpdateFailed as u32) == 0 { + let next_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx); + return Some(msgs::FundingLocked { + channel_id: self.channel_id, + next_per_commitment_point, + }); + } else { + self.monitor_pending_funding_locked = true; + } + } + None + } + /// When a transaction is confirmed, we check whether it is or spends the funding transaction /// In the first case, we store the confirmation height and calculating the short channel id. /// In the second, we simply return an Err indicating we need to be force-closed now. pub fn transactions_confirmed(&mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData, logger: &L) - -> Result<(), msgs::ErrorMessage> where L::Target: Logger { + -> Result, msgs::ErrorMessage> where L::Target: Logger { let non_shutdown_state = self.channel_state & (!MULTI_STATE_FLAGS); for &(index_in_block, tx) in txdata.iter() { if let Some(funding_txo) = self.get_funding_txo() { @@ -3550,6 +3596,12 @@ impl Channel { } } } + // If we allow 1-conf funding, we may need to check for funding_locked here and + // send it immediately instead of waiting for an update_best_block call (which + // may have already happened for this block). + if let Some(funding_locked) = self.check_get_funding_locked(height) { + return Ok(Some(funding_locked)); + } } for inp in tx.input.iter() { if inp.previous_output == funding_txo.into_bitcoin_outpoint() { @@ -3562,7 +3614,7 @@ impl Channel { } } } - Ok(()) + Ok(None) } /// When a new block is connected, we check the height of the block against outbound holding @@ -3592,59 +3644,32 @@ impl Channel { }); self.update_time_counter = cmp::max(self.update_time_counter, highest_header_time); - if self.funding_tx_confirmation_height > 0 { - let funding_tx_confirmations = height as i64 - self.funding_tx_confirmation_height as i64 + 1; - if funding_tx_confirmations <= 0 { - self.funding_tx_confirmation_height = 0; + + if let Some(funding_locked) = self.check_get_funding_locked(height) { + return Ok((Some(funding_locked), timed_out_htlcs)); + } + + let non_shutdown_state = self.channel_state & (!MULTI_STATE_FLAGS); + if non_shutdown_state >= ChannelState::ChannelFunded as u32 || + (non_shutdown_state & ChannelState::OurFundingLocked as u32) == ChannelState::OurFundingLocked as u32 { + let mut funding_tx_confirmations = height as i64 - self.funding_tx_confirmation_height as i64 + 1; + if self.funding_tx_confirmation_height == 0 { + // Note that check_get_funding_locked may reset funding_tx_confirmation_height to + // zero if it has been reorged out, however in either case, our state flags + // indicate we've already sent a funding_locked + funding_tx_confirmations = 0; } - let non_shutdown_state = self.channel_state & (!MULTI_STATE_FLAGS); - if (non_shutdown_state >= ChannelState::ChannelFunded as u32 || - (non_shutdown_state & ChannelState::OurFundingLocked as u32) == ChannelState::OurFundingLocked as u32) && - funding_tx_confirmations < self.minimum_depth as i64 / 2 { + // If we've sent funding_locked (or have both sent and received funding_locked), and + // the funding transaction's confirmation count has dipped below minimum_depth / 2, + // close the channel and hope we can get the latest state on chain (because presumably + // the funding transaction is at least still in the mempool of most nodes). + if funding_tx_confirmations < self.minimum_depth as i64 / 2 { return Err(msgs::ErrorMessage { channel_id: self.channel_id(), data: format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.", self.minimum_depth, funding_tx_confirmations), }); } - - if funding_tx_confirmations == self.minimum_depth as i64 { - let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 { - self.channel_state |= ChannelState::OurFundingLocked as u32; - true - } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirFundingLocked as u32) { - self.channel_state = ChannelState::ChannelFunded as u32 | (self.channel_state & MULTI_STATE_FLAGS); - self.update_time_counter += 1; - true - } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurFundingLocked as u32) { - // We got a reorg but not enough to trigger a force close, just update - // funding_tx_confirmed_in and return. - false - } else if self.channel_state < ChannelState::ChannelFunded as u32 { - panic!("Started confirming a channel in a state pre-FundingSent?: {}", self.channel_state); - } else { - // We got a reorg but not enough to trigger a force close, just update - // funding_tx_confirmed_in and return. - false - }; - - //TODO: Note that this must be a duplicate of the previous commitment point they sent us, - //as otherwise we will have a commitment transaction that they can't revoke (well, kinda, - //they can by sending two revoke_and_acks back-to-back, but not really). This appears to be - //a protocol oversight, but I assume I'm just missing something. - if need_commitment_update { - if self.channel_state & (ChannelState::MonitorUpdateFailed as u32) == 0 { - let next_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx); - return Ok((Some(msgs::FundingLocked { - channel_id: self.channel_id, - next_per_commitment_point, - }), timed_out_htlcs)); - } else { - self.monitor_pending_funding_locked = true; - return Ok((None, timed_out_htlcs)); - } - } - } } Ok((None, timed_out_htlcs)) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 9f943979d..31c8dccae 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3436,7 +3436,7 @@ impl ChannelMana log_trace!(self.logger, "{} transactions included in block {} at height {} provided", txdata.len(), block_hash, height); let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier); - self.do_chain_event(height, |channel| channel.transactions_confirmed(&block_hash, height, txdata, &self.logger).map(|_| (None, Vec::new()))); + self.do_chain_event(height, |channel| channel.transactions_confirmed(&block_hash, height, txdata, &self.logger).map(|a| (a, Vec::new()))); } /// Updates channel state with the current best blockchain tip. You should attempt to call this diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 96dd30f02..293ccf9f3 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -394,8 +394,7 @@ fn test_multi_flight_update_fee() { check_added_monitors!(nodes[1], 1); } -#[test] -fn test_1_conf_open() { +fn do_test_1_conf_open(connect_style: ConnectStyle) { // Previously, if the minium_depth config was set to 1, we'd never send a funding_locked. This // tests that we properly send one in that case. let mut alice_config = UserConfig::default(); @@ -409,7 +408,8 @@ fn test_1_conf_open() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(alice_config), Some(bob_config)]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + *nodes[0].connect_style.borrow_mut() = connect_style; let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001, InitFeatures::known(), InitFeatures::known()); mine_transaction(&nodes[1], &tx); @@ -425,6 +425,12 @@ fn test_1_conf_open() { node.net_graph_msg_handler.handle_channel_update(&bs_update).unwrap(); } } +#[test] +fn test_1_conf_open() { + do_test_1_conf_open(ConnectStyle::BestBlockFirst); + do_test_1_conf_open(ConnectStyle::TransactionsFirst); + do_test_1_conf_open(ConnectStyle::FullBlockViaListen); +} fn do_test_sanity_on_in_flight_opens(steps: u8) { // Previously, we had issues deserializing channels when we hadn't connected the first block diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index 6437db744..c9573a67c 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -184,7 +184,7 @@ fn test_onchain_htlc_timeout_delay_remote_commitment() { do_test_onchain_htlc_reorg(false, false); } -fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool) { +fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, connect_style: ConnectStyle) { // After creating a chan between nodes, we disconnect all blocks previously seen to force a // channel close on nodes[0] side. We also use this to provide very basic testing of logic // around freeing background events which store monitor updates during block_[dis]connected. @@ -195,6 +195,8 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool) { let new_chain_monitor: test_utils::TestChainMonitor; let nodes_0_deserialized: ChannelManager; let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + *nodes[0].connect_style.borrow_mut() = connect_style; + let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2; let channel_state = nodes[0].node.channel_state.lock().unwrap(); @@ -272,10 +274,18 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool) { #[test] fn test_unconf_chan() { - do_test_unconf_chan(true, true); - do_test_unconf_chan(false, true); - do_test_unconf_chan(true, false); - do_test_unconf_chan(false, false); + do_test_unconf_chan(true, true, ConnectStyle::BestBlockFirstSkippingBlocks); + do_test_unconf_chan(false, true, ConnectStyle::BestBlockFirstSkippingBlocks); + do_test_unconf_chan(true, false, ConnectStyle::BestBlockFirstSkippingBlocks); + do_test_unconf_chan(false, false, ConnectStyle::BestBlockFirstSkippingBlocks); +} + +#[test] +fn test_unconf_chan_via_listen() { + do_test_unconf_chan(true, true, ConnectStyle::FullBlockViaListen); + do_test_unconf_chan(false, true, ConnectStyle::FullBlockViaListen); + do_test_unconf_chan(true, false, ConnectStyle::FullBlockViaListen); + do_test_unconf_chan(false, false, ConnectStyle::FullBlockViaListen); } #[test]