From: Matt Corallo Date: Sat, 3 Dec 2022 03:15:04 +0000 (+0000) Subject: Use the new monitor persistence flow for `funding_created` handling X-Git-Tag: v0.0.114-beta~9^2~1 X-Git-Url: http://git.bitcoin.ninja/?a=commitdiff_plain;h=685b08d8c13c62a3a4c4cf283c3d86b96fd3de23;p=rust-lightning Use the new monitor persistence flow for `funding_created` handling Building on the previous commits, this finishes our transition to doing all message-sending in the monitor update completion pipeline, unifying our immediate- and async- `ChannelMonitor` update and persistence flows. --- diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 5ae6f81a6..515e10264 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -2268,9 +2268,9 @@ impl Channel { pub fn funding_created( &mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L - ) -> Result<(msgs::FundingSigned, ChannelMonitor<::Signer>, Option), ChannelError> + ) -> Result<(msgs::FundingSigned, ChannelMonitor), ChannelError> where - SP::Target: SignerProvider, + SP::Target: SignerProvider, L::Target: Logger { if self.is_outbound() { @@ -2346,10 +2346,13 @@ impl Channel { log_info!(logger, "Generated funding_signed for peer for channel {}", log_bytes!(self.channel_id())); + let need_channel_ready = self.check_get_channel_ready(0).is_some(); + self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new()); + Ok((msgs::FundingSigned { channel_id: self.channel_id, signature - }, channel_monitor, self.check_get_channel_ready(0))) + }, channel_monitor)) } /// Handles a funding_signed message from the remote end. @@ -3740,15 +3743,17 @@ impl Channel { } /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted. - /// This must be called immediately after the [`chain::Watch`] call which returned - /// [`ChannelMonitorUpdateStatus::InProgress`]. + /// This must be called before we return the [`ChannelMonitorUpdate`] back to the + /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor + /// update completes (potentially immediately). /// The messages which were generated with the monitor update must *not* have been sent to the /// remote end, and must instead have been dropped. They will be regenerated when /// [`Self::monitor_updating_restored`] is called. /// + /// [`ChannelManager`]: super::channelmanager::ChannelManager /// [`chain::Watch`]: crate::chain::Watch /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress - pub fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool, + fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool, resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>, mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, mut pending_finalized_claimed_htlcs: Vec @@ -7189,7 +7194,7 @@ mod tests { }]}; let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 }; let funding_created_msg = node_a_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).unwrap(); - let (funding_signed_msg, _, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).unwrap(); + let (funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).unwrap(); // Node B --> Node A: funding signed let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 2071fd51b..bdfd29530 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -4352,60 +4352,31 @@ where } fn internal_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> { + let best_block = *self.best_block.read().unwrap(); + let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { debug_assert!(false); MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id) })?; - let ((funding_msg, monitor, mut channel_ready), mut chan) = { - let best_block = *self.best_block.read().unwrap(); - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; + + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + let ((funding_msg, monitor), chan) = match peer_state.channel_by_id.entry(msg.temporary_channel_id) { hash_map::Entry::Occupied(mut chan) => { (try_chan_entry!(self, chan.get_mut().funding_created(msg, best_block, &self.signer_provider, &self.logger), chan), chan.remove()) }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)) - } - }; - // Because we have exclusive ownership of the channel here we can release the peer_state - // lock before watch_channel - match self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor) { - ChannelMonitorUpdateStatus::Completed => {}, - ChannelMonitorUpdateStatus::PermanentFailure => { - // Note that we reply with the new channel_id in error messages if we gave up on the - // channel, not the temporary_channel_id. This is compatible with ourselves, but the - // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for - // any messages referencing a previously-closed channel anyway. - // We do not propagate the monitor update to the user as it would be for a monitor - // that we didn't manage to store (and that we don't care about - we don't respond - // with the funding_signed so the channel can never go on chain). - let (_monitor_update, failed_htlcs) = chan.force_shutdown(false); - assert!(failed_htlcs.is_empty()); - return Err(MsgHandleErrInternal::send_err_msg_no_close("ChannelMonitor storage failure".to_owned(), funding_msg.channel_id)); - }, - ChannelMonitorUpdateStatus::InProgress => { - // There's no problem signing a counterparty's funding transaction if our monitor - // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't - // accepted payment from yet. We do, however, need to wait to send our channel_ready - // until we have persisted our monitor. - chan.monitor_updating_paused(false, false, channel_ready.is_some(), Vec::new(), Vec::new(), Vec::new()); - channel_ready = None; // Don't send the channel_ready now - }, - } - // It's safe to unwrap as we've held the `per_peer_state` read lock since checking that the - // peer exists, despite the inner PeerState potentially having no channels after removing - // the channel above. - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; + }; + match peer_state.channel_by_id.entry(funding_msg.channel_id) { hash_map::Entry::Occupied(_) => { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id".to_owned(), funding_msg.channel_id)) + Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id".to_owned(), funding_msg.channel_id)) }, hash_map::Entry::Vacant(e) => { - let mut id_to_peer = self.id_to_peer.lock().unwrap(); - match id_to_peer.entry(chan.channel_id()) { + match self.id_to_peer.lock().unwrap().entry(chan.channel_id()) { hash_map::Entry::Occupied(_) => { return Err(MsgHandleErrInternal::send_err_msg_no_close( "The funding_created message had the same funding_txid as an existing channel - funding is not possible".to_owned(), @@ -4415,17 +4386,35 @@ where i_e.insert(chan.get_counterparty_node_id()); } } + + // There's no problem signing a counterparty's funding transaction if our monitor + // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't + // accepted payment from yet. We do, however, need to wait to send our channel_ready + // until we have persisted our monitor. + let new_channel_id = funding_msg.channel_id; peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned { node_id: counterparty_node_id.clone(), msg: funding_msg, }); - if let Some(msg) = channel_ready { - send_channel_ready!(self, peer_state.pending_msg_events, chan, msg); + + let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor); + + let chan = e.insert(chan); + let mut res = handle_new_monitor_update!(self, monitor_res, 0, peer_state_lock, peer_state, chan, MANUALLY_REMOVING, { peer_state.channel_by_id.remove(&new_channel_id) }); + + // Note that we reply with the new channel_id in error messages if we gave up on the + // channel, not the temporary_channel_id. This is compatible with ourselves, but the + // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for + // any messages referencing a previously-closed channel anyway. + // We do not propagate the monitor update to the user as it would be for a monitor + // that we didn't manage to store (and that we don't care about - we don't respond + // with the funding_signed so the channel can never go on chain). + if let Err(MsgHandleErrInternal { shutdown_finish: Some((res, _)), .. }) = &mut res { + res.0 = None; } - e.insert(chan); + res } } - Ok(()) } fn internal_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> { diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index fbd0e133f..aa258b571 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -8736,9 +8736,9 @@ fn test_duplicate_chan_id() { }; check_added_monitors!(nodes[0], 0); nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created); - // At this point we'll try to add a duplicate channel monitor, which will be rejected, but - // still needs to be cleared here. - check_added_monitors!(nodes[1], 1); + // At this point we'll look up if the channel_id is present and immediately fail the channel + // without trying to persist the `ChannelMonitor`. + check_added_monitors!(nodes[1], 0); // ...still, nodes[1] will reject the duplicate channel. {