self.network_sync == UpdateStatus::DisabledMarked
}
+ fn check_get_funding_locked(&mut self, height: u32) -> Option<msgs::FundingLocked> {
+ if self.funding_tx_confirmation_height == 0 {
+ return None;
+ }
+
+ let funding_tx_confirmations = height as i64 - self.funding_tx_confirmation_height as i64 + 1;
+ if funding_tx_confirmations <= 0 {
+ self.funding_tx_confirmation_height = 0;
+ }
+
+ if funding_tx_confirmations < self.minimum_depth as i64 {
+ return None;
+ }
+
+ let non_shutdown_state = self.channel_state & (!MULTI_STATE_FLAGS);
+ let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 {
+ self.channel_state |= ChannelState::OurFundingLocked as u32;
+ true
+ } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirFundingLocked as u32) {
+ self.channel_state = ChannelState::ChannelFunded as u32 | (self.channel_state & MULTI_STATE_FLAGS);
+ self.update_time_counter += 1;
+ true
+ } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurFundingLocked as u32) {
+ // We got a reorg but not enough to trigger a force close, just ignore.
+ false
+ } else if self.channel_state < ChannelState::ChannelFunded as u32 {
+ panic!("Started confirming a channel in a state pre-FundingSent?: {}", self.channel_state);
+ } else {
+ // We got a reorg but not enough to trigger a force close, just ignore.
+ false
+ };
+
+ if need_commitment_update {
+ if self.channel_state & (ChannelState::MonitorUpdateFailed as u32) == 0 {
+ let next_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx);
+ return Some(msgs::FundingLocked {
+ channel_id: self.channel_id,
+ next_per_commitment_point,
+ });
+ } else {
+ self.monitor_pending_funding_locked = true;
+ }
+ }
+ None
+ }
+
/// When a transaction is confirmed, we check whether it is or spends the funding transaction
/// In the first case, we store the confirmation height and calculating the short channel id.
/// In the second, we simply return an Err indicating we need to be force-closed now.
pub fn transactions_confirmed<L: Deref>(&mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData, logger: &L)
- -> Result<(), msgs::ErrorMessage> where L::Target: Logger {
+ -> Result<Option<msgs::FundingLocked>, msgs::ErrorMessage> where L::Target: Logger {
let non_shutdown_state = self.channel_state & (!MULTI_STATE_FLAGS);
for &(index_in_block, tx) in txdata.iter() {
if let Some(funding_txo) = self.get_funding_txo() {
}
}
}
+ // If we allow 1-conf funding, we may need to check for funding_locked here and
+ // send it immediately instead of waiting for an update_best_block call (which
+ // may have already happened for this block).
+ if let Some(funding_locked) = self.check_get_funding_locked(height) {
+ return Ok(Some(funding_locked));
+ }
}
for inp in tx.input.iter() {
if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
}
}
}
- Ok(())
+ Ok(None)
}
/// When a new block is connected, we check the height of the block against outbound holding
});
self.update_time_counter = cmp::max(self.update_time_counter, highest_header_time);
- if self.funding_tx_confirmation_height > 0 {
- let funding_tx_confirmations = height as i64 - self.funding_tx_confirmation_height as i64 + 1;
- if funding_tx_confirmations <= 0 {
- self.funding_tx_confirmation_height = 0;
+
+ if let Some(funding_locked) = self.check_get_funding_locked(height) {
+ return Ok((Some(funding_locked), timed_out_htlcs));
+ }
+
+ let non_shutdown_state = self.channel_state & (!MULTI_STATE_FLAGS);
+ if non_shutdown_state >= ChannelState::ChannelFunded as u32 ||
+ (non_shutdown_state & ChannelState::OurFundingLocked as u32) == ChannelState::OurFundingLocked as u32 {
+ let mut funding_tx_confirmations = height as i64 - self.funding_tx_confirmation_height as i64 + 1;
+ if self.funding_tx_confirmation_height == 0 {
+ // Note that check_get_funding_locked may reset funding_tx_confirmation_height to
+ // zero if it has been reorged out, however in either case, our state flags
+ // indicate we've already sent a funding_locked
+ funding_tx_confirmations = 0;
}
- let non_shutdown_state = self.channel_state & (!MULTI_STATE_FLAGS);
- if (non_shutdown_state >= ChannelState::ChannelFunded as u32 ||
- (non_shutdown_state & ChannelState::OurFundingLocked as u32) == ChannelState::OurFundingLocked as u32) &&
- funding_tx_confirmations < self.minimum_depth as i64 / 2 {
+ // If we've sent funding_locked (or have both sent and received funding_locked), and
+ // the funding transaction's confirmation count has dipped below minimum_depth / 2,
+ // close the channel and hope we can get the latest state on chain (because presumably
+ // the funding transaction is at least still in the mempool of most nodes).
+ if funding_tx_confirmations < self.minimum_depth as i64 / 2 {
return Err(msgs::ErrorMessage {
channel_id: self.channel_id(),
data: format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.", self.minimum_depth, funding_tx_confirmations),
});
}
-
- if funding_tx_confirmations == self.minimum_depth as i64 {
- let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 {
- self.channel_state |= ChannelState::OurFundingLocked as u32;
- true
- } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirFundingLocked as u32) {
- self.channel_state = ChannelState::ChannelFunded as u32 | (self.channel_state & MULTI_STATE_FLAGS);
- self.update_time_counter += 1;
- true
- } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurFundingLocked as u32) {
- // We got a reorg but not enough to trigger a force close, just update
- // funding_tx_confirmed_in and return.
- false
- } else if self.channel_state < ChannelState::ChannelFunded as u32 {
- panic!("Started confirming a channel in a state pre-FundingSent?: {}", self.channel_state);
- } else {
- // We got a reorg but not enough to trigger a force close, just update
- // funding_tx_confirmed_in and return.
- false
- };
-
- //TODO: Note that this must be a duplicate of the previous commitment point they sent us,
- //as otherwise we will have a commitment transaction that they can't revoke (well, kinda,
- //they can by sending two revoke_and_acks back-to-back, but not really). This appears to be
- //a protocol oversight, but I assume I'm just missing something.
- if need_commitment_update {
- if self.channel_state & (ChannelState::MonitorUpdateFailed as u32) == 0 {
- let next_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx);
- return Ok((Some(msgs::FundingLocked {
- channel_id: self.channel_id,
- next_per_commitment_point,
- }), timed_out_htlcs));
- } else {
- self.monitor_pending_funding_locked = true;
- return Ok((None, timed_out_htlcs));
- }
- }
- }
}
Ok((None, timed_out_htlcs))
check_added_monitors!(nodes[1], 1);
}
-#[test]
-fn test_1_conf_open() {
+fn do_test_1_conf_open(connect_style: ConnectStyle) {
// Previously, if the minium_depth config was set to 1, we'd never send a funding_locked. This
// tests that we properly send one in that case.
let mut alice_config = UserConfig::default();
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(alice_config), Some(bob_config)]);
- let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+ let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+ *nodes[0].connect_style.borrow_mut() = connect_style;
let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001, InitFeatures::known(), InitFeatures::known());
mine_transaction(&nodes[1], &tx);
node.net_graph_msg_handler.handle_channel_update(&bs_update).unwrap();
}
}
+#[test]
+fn test_1_conf_open() {
+ do_test_1_conf_open(ConnectStyle::BestBlockFirst);
+ do_test_1_conf_open(ConnectStyle::TransactionsFirst);
+ do_test_1_conf_open(ConnectStyle::FullBlockViaListen);
+}
fn do_test_sanity_on_in_flight_opens(steps: u8) {
// Previously, we had issues deserializing channels when we hadn't connected the first block
do_test_onchain_htlc_reorg(false, false);
}
-fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool) {
+fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, connect_style: ConnectStyle) {
// After creating a chan between nodes, we disconnect all blocks previously seen to force a
// channel close on nodes[0] side. We also use this to provide very basic testing of logic
// around freeing background events which store monitor updates during block_[dis]connected.
let new_chain_monitor: test_utils::TestChainMonitor;
let nodes_0_deserialized: ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+ *nodes[0].connect_style.borrow_mut() = connect_style;
+
let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
let channel_state = nodes[0].node.channel_state.lock().unwrap();
#[test]
fn test_unconf_chan() {
- do_test_unconf_chan(true, true);
- do_test_unconf_chan(false, true);
- do_test_unconf_chan(true, false);
- do_test_unconf_chan(false, false);
+ do_test_unconf_chan(true, true, ConnectStyle::BestBlockFirstSkippingBlocks);
+ do_test_unconf_chan(false, true, ConnectStyle::BestBlockFirstSkippingBlocks);
+ do_test_unconf_chan(true, false, ConnectStyle::BestBlockFirstSkippingBlocks);
+ do_test_unconf_chan(false, false, ConnectStyle::BestBlockFirstSkippingBlocks);
+}
+
+#[test]
+fn test_unconf_chan_via_listen() {
+ do_test_unconf_chan(true, true, ConnectStyle::FullBlockViaListen);
+ do_test_unconf_chan(false, true, ConnectStyle::FullBlockViaListen);
+ do_test_unconf_chan(true, false, ConnectStyle::FullBlockViaListen);
+ do_test_unconf_chan(false, false, ConnectStyle::FullBlockViaListen);
}
#[test]