]> git.bitcoin.ninja Git - rust-lightning/commitdiff
Return the counterparty node_id as a part of a force-shutdown res
authorMatt Corallo <git@bluematt.me>
Wed, 10 May 2023 00:45:08 +0000 (00:45 +0000)
committerMatt Corallo <git@bluematt.me>
Tue, 30 May 2023 18:15:32 +0000 (18:15 +0000)
In the coming commits we'll need the counterparty node_id when
handling a background monitor update as we may need to resume
normal channel operation as a result. Thus, we go ahead and pipe it
through from the shutdown end, as it makes the codepaths
consistent.

Sadly, the monitor-originated shutdown case doesn't allow for a
required counterparty node_id as some versions of LDK didn't have
it present in the ChannelMonitor.

lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs

index 2f8f418f27f4b06f313e05dc392fa00f6e1a0d2d..18ad49f3271109f9d5be43c2f5f947d80ebbef1e 100644 (file)
@@ -434,7 +434,7 @@ pub(super) struct ReestablishResponses {
 
 /// The return type of `force_shutdown`
 pub(crate) type ShutdownResult = (
-       Option<(OutPoint, ChannelMonitorUpdate)>,
+       Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
        Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>
 );
 
@@ -6263,7 +6263,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
                        if self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 {
                                self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
-                               Some((funding_txo, ChannelMonitorUpdate {
+                               Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
                                        update_id: self.latest_monitor_update_id,
                                        updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
                                }))
index c06a5872df5eec874bebddad45c0d387eb3a10c3..29d2ae81671086b47271a6fc78980ac33438dbdb 100644 (file)
@@ -499,11 +499,26 @@ struct ClaimablePayments {
 /// for some reason. They are handled in timer_tick_occurred, so may be processed with
 /// quite some time lag.
 enum BackgroundEvent {
-       /// Handle a ChannelMonitorUpdate
+       /// Handle a ChannelMonitorUpdate which closes the channel. This is only separated from
+       /// [`Self::MonitorUpdateRegeneratedOnStartup`] as the non-closing variant needs a public key
+       /// to handle channel resumption, whereas if the channel has been force-closed we do not need
+       /// the counterparty node_id.
        ///
        /// Note that any such events are lost on shutdown, so in general they must be updates which
        /// are regenerated on startup.
-       MonitorUpdateRegeneratedOnStartup((OutPoint, ChannelMonitorUpdate)),
+       ClosingMonitorUpdateRegeneratedOnStartup((OutPoint, ChannelMonitorUpdate)),
+       /// Handle a ChannelMonitorUpdate which may or may not close the channel. In general this
+       /// should be used rather than [`Self::ClosingMonitorUpdateRegeneratedOnStartup`], however in
+       /// cases where the `counterparty_node_id` is not available as the channel has closed from a
+       /// [`ChannelMonitor`] error the other variant is acceptable.
+       ///
+       /// Note that any such events are lost on shutdown, so in general they must be updates which
+       /// are regenerated on startup.
+       MonitorUpdateRegeneratedOnStartup {
+               counterparty_node_id: PublicKey,
+               funding_txo: OutPoint,
+               update: ChannelMonitorUpdate
+       },
 }
 
 #[derive(Debug)]
@@ -2193,7 +2208,7 @@ where
                        let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
                        self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
                }
-               if let Some((funding_txo, monitor_update)) = monitor_update_option {
+               if let Some((_, funding_txo, monitor_update)) = monitor_update_option {
                        // There isn't anything we can do if we get an update failure - we're already
                        // force-closing. The monitor update on the required in-memory copy should broadcast
                        // the latest local state, which is the best we can do anyway. Thus, it is safe to
@@ -3774,7 +3789,12 @@ where
 
                for event in background_events.drain(..) {
                        match event {
-                               BackgroundEvent::MonitorUpdateRegeneratedOnStartup((funding_txo, update)) => {
+                               BackgroundEvent::ClosingMonitorUpdateRegeneratedOnStartup((funding_txo, update)) => {
+                                       // The channel has already been closed, so no use bothering to care about the
+                                       // monitor updating completing.
+                                       let _ = self.chain_monitor.update_channel(funding_txo, &update);
+                               },
+                               BackgroundEvent::MonitorUpdateRegeneratedOnStartup { funding_txo, update, .. } => {
                                        // The channel has already been closed, so no use bothering to care about the
                                        // monitor updating completing.
                                        let _ = self.chain_monitor.update_channel(funding_txo, &update);
@@ -5689,12 +5709,15 @@ where
                        // Channel::force_shutdown tries to make us do) as we may still be in initialization,
                        // so we track the update internally and handle it when the user next calls
                        // timer_tick_occurred, guaranteeing we're running normally.
-                       if let Some((funding_txo, update)) = failure.0.take() {
+                       if let Some((counterparty_node_id, funding_txo, update)) = failure.0.take() {
                                assert_eq!(update.updates.len(), 1);
                                if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] {
                                        assert!(should_broadcast);
                                } else { unreachable!(); }
-                               self.pending_background_events.lock().unwrap().push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup((funding_txo, update)));
+                               self.pending_background_events.lock().unwrap().push(
+                                       BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
+                                               counterparty_node_id, funding_txo, update
+                                       });
                        }
                        self.finish_force_close_channel(failure);
                }
@@ -7767,8 +7790,10 @@ where
                                        log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
                                                log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id());
                                        let (monitor_update, mut new_failed_htlcs) = channel.force_shutdown(true);
-                                       if let Some(monitor_update) = monitor_update {
-                                               pending_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup(monitor_update));
+                                       if let Some((counterparty_node_id, funding_txo, update)) = monitor_update {
+                                               pending_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
+                                                       counterparty_node_id, funding_txo, update
+                                               });
                                        }
                                        failed_htlcs.append(&mut new_failed_htlcs);
                                        channel_closures.push_back((events::Event::ChannelClosed {
@@ -7843,7 +7868,7 @@ where
                                        update_id: CLOSED_CHANNEL_UPDATE_ID,
                                        updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }],
                                };
-                               pending_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup((*funding_txo, monitor_update)));
+                               pending_background_events.push(BackgroundEvent::ClosingMonitorUpdateRegeneratedOnStartup((*funding_txo, monitor_update)));
                        }
                }