From: Matt Corallo Date: Tue, 5 Sep 2023 21:13:07 +0000 (+0000) Subject: Handle sign_counterparty_commitment failing during inb funding X-Git-Url: http://git.bitcoin.ninja/?a=commitdiff_plain;h=8ac4804da2b7d42c23fb95ae442b644365c4ba43;p=rust-lightning Handle sign_counterparty_commitment failing during inb funding If sign_counterparty_commitment fails (i.e. because the signer is temporarily disconnected), this really indicates that we should retry the message sending which required the signature later, rather than force-closing the channel (which probably won't even work if the signer is missing). Here we add initial handling of sign_counterparty_commitment failing during inbound channel funding, setting a flag in `ChannelContext` which indicates we should retry sending the `funding_signed` later. We don't yet add any ability to do that retry. --- diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 5af38393e..81f453243 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -6654,7 +6654,7 @@ impl InboundV1Channel where SP::Target: SignerProvider { self.generate_accept_channel_message() } - fn funding_created_signature(&mut self, sig: &Signature, logger: &L) -> Result<(CommitmentTransaction, CommitmentTransaction, Signature), ChannelError> where L::Target: Logger { + fn funding_created_signature(&mut self, sig: &Signature, logger: &L) -> Result<(CommitmentTransaction, CommitmentTransaction, Option), ChannelError> where L::Target: Logger { let funding_script = self.context.get_funding_redeemscript(); let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); @@ -6683,7 +6683,7 @@ impl InboundV1Channel where SP::Target: SignerProvider { // TODO (arik): move match into calling method for Taproot ChannelSignerType::Ecdsa(ecdsa) => { let counterparty_signature = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx) - .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0; + .map(|(sig, _)| sig).ok(); // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish. Ok((counterparty_initial_commitment_tx, initial_commitment_tx, counterparty_signature)) @@ -6693,7 +6693,7 @@ impl InboundV1Channel where SP::Target: SignerProvider { pub fn funding_created( mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L - ) -> Result<(Channel, msgs::FundingSigned, ChannelMonitor<::Signer>), (Self, ChannelError)> + ) -> Result<(Channel, Option, ChannelMonitor<::Signer>), (Self, ChannelError)> where L::Target: Logger { @@ -6718,7 +6718,7 @@ impl InboundV1Channel where SP::Target: SignerProvider { // funding_created_signature may fail. self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters); - let (counterparty_initial_commitment_tx, initial_commitment_tx, signature) = match self.funding_created_signature(&msg.signature, logger) { + let (counterparty_initial_commitment_tx, initial_commitment_tx, sig_opt) = match self.funding_created_signature(&msg.signature, logger) { Ok(res) => res, Err(ChannelError::Close(e)) => { self.context.channel_transaction_parameters.funding_outpoint = None; @@ -6782,12 +6782,19 @@ impl InboundV1Channel where SP::Target: SignerProvider { let need_channel_ready = channel.check_get_channel_ready(0).is_some(); channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new()); - Ok((channel, msgs::FundingSigned { - channel_id, - signature, - #[cfg(taproot)] - partial_signature_with_nonce: None, - }, channel_monitor)) + let funding_signed = if let Some(signature) = sig_opt { + Some(msgs::FundingSigned { + channel_id, + signature, + #[cfg(taproot)] + partial_signature_with_nonce: None, + }) + } else { + channel.context.signer_pending_funding = true; + None + }; + + Ok((channel, funding_signed, channel_monitor)) } } @@ -7875,7 +7882,7 @@ mod tests { let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap(); // Node B --> Node A: funding signed - let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger).unwrap(); + let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap(); // Put some inbound and outbound HTLCs in A's channel. let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's. @@ -8002,7 +8009,7 @@ mod tests { let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap(); // Node B --> Node A: funding signed - let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger).unwrap(); + let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap(); // Now disconnect the two nodes and check that the commitment point in // Node B's channel_reestablish message is sane. @@ -8190,7 +8197,7 @@ mod tests { let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap(); // Node B --> Node A: funding signed - let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger).unwrap(); + let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap(); // Make sure that receiving a channel update will update the Channel as expected. let update = ChannelUpdate { @@ -9275,7 +9282,7 @@ mod tests { // Receive funding_signed, but the channel will be configured to hold sending channel_ready and // broadcasting the funding transaction until the batch is ready. let _ = node_a_chan.funding_signed( - &funding_signed_msg, + &funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger, diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 892e64eea..56820a6c4 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -6232,7 +6232,7 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - let (chan, funding_msg, monitor) = + let (chan, funding_msg_opt, monitor) = match peer_state.channel_by_id.remove(&msg.temporary_channel_id) { Some(ChannelPhase::UnfundedInboundV1(inbound_chan)) => { match inbound_chan.funding_created(msg, best_block, &self.signer_provider, &self.logger) { @@ -6255,9 +6255,12 @@ where None => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)) }; - match peer_state.channel_by_id.entry(funding_msg.channel_id) { + match peer_state.channel_by_id.entry(chan.context.channel_id()) { hash_map::Entry::Occupied(_) => { - Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id".to_owned(), funding_msg.channel_id)) + Err(MsgHandleErrInternal::send_err_msg_no_close( + "Already had channel with the new channel_id".to_owned(), + chan.context.channel_id() + )) }, hash_map::Entry::Vacant(e) => { let mut id_to_peer_lock = self.id_to_peer.lock().unwrap(); @@ -6265,7 +6268,7 @@ where hash_map::Entry::Occupied(_) => { return Err(MsgHandleErrInternal::send_err_msg_no_close( "The funding_created message had the same funding_txid as an existing channel - funding is not possible".to_owned(), - funding_msg.channel_id)) + chan.context.channel_id())) }, hash_map::Entry::Vacant(i_e) => { let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor); @@ -6277,10 +6280,12 @@ where // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't // accepted payment from yet. We do, however, need to wait to send our channel_ready // until we have persisted our monitor. - peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned { - node_id: counterparty_node_id.clone(), - msg: funding_msg, - }); + if let Some(msg) = funding_msg_opt { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned { + node_id: counterparty_node_id.clone(), + msg, + }); + } if let ChannelPhase::Funded(chan) = e.insert(ChannelPhase::Funded(chan)) { handle_new_monitor_update!(self, persist_state, peer_state_lock, peer_state, @@ -6291,9 +6296,13 @@ where Ok(()) } else { log_error!(self.logger, "Persisting initial ChannelMonitor failed, implying the funding outpoint was duplicated"); + let channel_id = match funding_msg_opt { + Some(msg) => msg.channel_id, + None => chan.context.channel_id(), + }; return Err(MsgHandleErrInternal::send_err_msg_no_close( "The funding_created message had the same funding_txid as an existing channel - funding is not possible".to_owned(), - funding_msg.channel_id)); + channel_id)); } } }