Use the new monitor persistence flow for `funding_created` handling
authorMatt Corallo <git@bluematt.me>
Sat, 3 Dec 2022 03:15:04 +0000 (03:15 +0000)
committerMatt Corallo <git@bluematt.me>
Wed, 22 Feb 2023 17:34:46 +0000 (17:34 +0000)
Building on the previous commits, this finishes our transition to
doing all message-sending in the monitor update completion
pipeline, unifying our immediate- and async- `ChannelMonitor`
update and persistence flows.

lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_tests.rs

index 5ae6f81a605ed5fa619aaeea158cb4b5c53c7edc..515e10264fac63070d0588d9b0a3c57471d27eeb 100644 (file)
@@ -2268,9 +2268,9 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
 
        pub fn funding_created<SP: Deref, L: Deref>(
                &mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
-       ) -> Result<(msgs::FundingSigned, ChannelMonitor<<SP::Target as SignerProvider>::Signer>, Option<msgs::ChannelReady>), ChannelError>
+       ) -> Result<(msgs::FundingSigned, ChannelMonitor<Signer>), ChannelError>
        where
-               SP::Target: SignerProvider,
+               SP::Target: SignerProvider<Signer = Signer>,
                L::Target: Logger
        {
                if self.is_outbound() {
@@ -2346,10 +2346,13 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
 
                log_info!(logger, "Generated funding_signed for peer for channel {}", log_bytes!(self.channel_id()));
 
+               let need_channel_ready = self.check_get_channel_ready(0).is_some();
+               self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
+
                Ok((msgs::FundingSigned {
                        channel_id: self.channel_id,
                        signature
-               }, channel_monitor, self.check_get_channel_ready(0)))
+               }, channel_monitor))
        }
 
        /// Handles a funding_signed message from the remote end.
@@ -3740,15 +3743,17 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
        }
 
        /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
-       /// This must be called immediately after the [`chain::Watch`] call which returned
-       /// [`ChannelMonitorUpdateStatus::InProgress`].
+       /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
+       /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
+       /// update completes (potentially immediately).
        /// The messages which were generated with the monitor update must *not* have been sent to the
        /// remote end, and must instead have been dropped. They will be regenerated when
        /// [`Self::monitor_updating_restored`] is called.
        ///
+       /// [`ChannelManager`]: super::channelmanager::ChannelManager
        /// [`chain::Watch`]: crate::chain::Watch
        /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
-       pub fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
+       fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
                resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
                mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
                mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
@@ -7189,7 +7194,7 @@ mod tests {
                }]};
                let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
                let funding_created_msg = node_a_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).unwrap();
-               let (funding_signed_msg, _, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).unwrap();
+               let (funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).unwrap();
 
                // Node B --> Node A: funding signed
                let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger);
index 2071fd51bbb4a29de0f910d0de26e3b34c305dc4..bdfd29530db05426ade0390fa5079dfd3ede5790 100644 (file)
@@ -4352,60 +4352,31 @@ where
        }
 
        fn internal_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> {
+               let best_block = *self.best_block.read().unwrap();
+
                let per_peer_state = self.per_peer_state.read().unwrap();
                let peer_state_mutex = per_peer_state.get(counterparty_node_id)
                        .ok_or_else(|| {
                                debug_assert!(false);
                                MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id)
                        })?;
-               let ((funding_msg, monitor, mut channel_ready), mut chan) = {
-                       let best_block = *self.best_block.read().unwrap();
-                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
-                       let peer_state = &mut *peer_state_lock;
+
+               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+               let peer_state = &mut *peer_state_lock;
+               let ((funding_msg, monitor), chan) =
                        match peer_state.channel_by_id.entry(msg.temporary_channel_id) {
                                hash_map::Entry::Occupied(mut chan) => {
                                        (try_chan_entry!(self, chan.get_mut().funding_created(msg, best_block, &self.signer_provider, &self.logger), chan), chan.remove())
                                },
                                hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
-                       }
-               };
-               // Because we have exclusive ownership of the channel here we can release the peer_state
-               // lock before watch_channel
-               match self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor) {
-                       ChannelMonitorUpdateStatus::Completed => {},
-                       ChannelMonitorUpdateStatus::PermanentFailure => {
-                               // Note that we reply with the new channel_id in error messages if we gave up on the
-                               // channel, not the temporary_channel_id. This is compatible with ourselves, but the
-                               // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
-                               // any messages referencing a previously-closed channel anyway.
-                               // We do not propagate the monitor update to the user as it would be for a monitor
-                               // that we didn't manage to store (and that we don't care about - we don't respond
-                               // with the funding_signed so the channel can never go on chain).
-                               let (_monitor_update, failed_htlcs) = chan.force_shutdown(false);
-                               assert!(failed_htlcs.is_empty());
-                               return Err(MsgHandleErrInternal::send_err_msg_no_close("ChannelMonitor storage failure".to_owned(), funding_msg.channel_id));
-                       },
-                       ChannelMonitorUpdateStatus::InProgress => {
-                               // There's no problem signing a counterparty's funding transaction if our monitor
-                               // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
-                               // accepted payment from yet. We do, however, need to wait to send our channel_ready
-                               // until we have persisted our monitor.
-                               chan.monitor_updating_paused(false, false, channel_ready.is_some(), Vec::new(), Vec::new(), Vec::new());
-                               channel_ready = None; // Don't send the channel_ready now
-                       },
-               }
-               // It's safe to unwrap as we've held the `per_peer_state` read lock since checking that the
-               // peer exists, despite the inner PeerState potentially having no channels after removing
-               // the channel above.
-               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
-               let peer_state = &mut *peer_state_lock;
+                       };
+
                match peer_state.channel_by_id.entry(funding_msg.channel_id) {
                        hash_map::Entry::Occupied(_) => {
-                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id".to_owned(), funding_msg.channel_id))
+                               Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id".to_owned(), funding_msg.channel_id))
                        },
                        hash_map::Entry::Vacant(e) => {
-                               let mut id_to_peer = self.id_to_peer.lock().unwrap();
-                               match id_to_peer.entry(chan.channel_id()) {
+                               match self.id_to_peer.lock().unwrap().entry(chan.channel_id()) {
                                        hash_map::Entry::Occupied(_) => {
                                                return Err(MsgHandleErrInternal::send_err_msg_no_close(
                                                        "The funding_created message had the same funding_txid as an existing channel - funding is not possible".to_owned(),
@@ -4415,17 +4386,35 @@ where
                                                i_e.insert(chan.get_counterparty_node_id());
                                        }
                                }
+
+                               // There's no problem signing a counterparty's funding transaction if our monitor
+                               // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
+                               // accepted payment from yet. We do, however, need to wait to send our channel_ready
+                               // until we have persisted our monitor.
+                               let new_channel_id = funding_msg.channel_id;
                                peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
                                        node_id: counterparty_node_id.clone(),
                                        msg: funding_msg,
                                });
-                               if let Some(msg) = channel_ready {
-                                       send_channel_ready!(self, peer_state.pending_msg_events, chan, msg);
+
+                               let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor);
+
+                               let chan = e.insert(chan);
+                               let mut res = handle_new_monitor_update!(self, monitor_res, 0, peer_state_lock, peer_state, chan, MANUALLY_REMOVING, { peer_state.channel_by_id.remove(&new_channel_id) });
+
+                               // Note that we reply with the new channel_id in error messages if we gave up on the
+                               // channel, not the temporary_channel_id. This is compatible with ourselves, but the
+                               // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
+                               // any messages referencing a previously-closed channel anyway.
+                               // We do not propagate the monitor update to the user as it would be for a monitor
+                               // that we didn't manage to store (and that we don't care about - we don't respond
+                               // with the funding_signed so the channel can never go on chain).
+                               if let Err(MsgHandleErrInternal { shutdown_finish: Some((res, _)), .. }) = &mut res {
+                                       res.0 = None;
                                }
-                               e.insert(chan);
+                               res
                        }
                }
-               Ok(())
        }
 
        fn internal_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> {
index fbd0e133fe50923a60f2b9764825cf9d92e3a1b3..aa258b5713988ec56119f6caaa2920d271c0d4f8 100644 (file)
@@ -8736,9 +8736,9 @@ fn test_duplicate_chan_id() {
        };
        check_added_monitors!(nodes[0], 0);
        nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
-       // At this point we'll try to add a duplicate channel monitor, which will be rejected, but
-       // still needs to be cleared here.
-       check_added_monitors!(nodes[1], 1);
+       // At this point we'll look up if the channel_id is present and immediately fail the channel
+       // without trying to persist the `ChannelMonitor`.
+       check_added_monitors!(nodes[1], 0);
 
        // ...still, nodes[1] will reject the duplicate channel.
        {