]> git.bitcoin.ninja Git - rust-lightning/commitdiff
Move `Channel::force_shutdown` to `ChannelContext` impl
authorDuncan Dean <git@dunxen.dev>
Tue, 13 Jun 2023 15:31:28 +0000 (17:31 +0200)
committerDuncan Dean <git@dunxen.dev>
Thu, 15 Jun 2023 10:51:45 +0000 (12:51 +0200)
lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs

index 7e983fa7788643b30407aaf55303d5fb5165f91f..bedaa576e1fdf7bd9d9e816981e84f32db4b6b18 100644 (file)
@@ -1874,6 +1874,52 @@ impl<Signer: ChannelSigner> ChannelContext<Signer> {
                        None
                }
        }
+
+       /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
+       /// shutdown of this channel - no more calls into this Channel may be made afterwards except
+       /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
+       /// Also returns the list of payment_hashes for channels which we can safely fail backwards
+       /// immediately (others we will have to allow to time out).
+       pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
+               // Note that we MUST only generate a monitor update that indicates force-closure - we're
+               // called during initialization prior to the chain_monitor in the encompassing ChannelManager
+               // being fully configured in some cases. Thus, its likely any monitor events we generate will
+               // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
+               assert!(self.channel_state != ChannelState::ShutdownComplete as u32);
+
+               // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
+               // return them to fail the payment.
+               let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
+               let counterparty_node_id = self.get_counterparty_node_id();
+               for htlc_update in self.holding_cell_htlc_updates.drain(..) {
+                       match htlc_update {
+                               HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
+                                       dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
+                               },
+                               _ => {}
+                       }
+               }
+               let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
+                       // If we haven't yet exchanged funding signatures (ie channel_state < FundingSent),
+                       // returning a channel monitor update here would imply a channel monitor update before
+                       // we even registered the channel monitor to begin with, which is invalid.
+                       // Thus, if we aren't actually at a point where we could conceivably broadcast the
+                       // funding transaction, don't return a funding txo (which prevents providing the
+                       // monitor update to the user, even if we return one).
+                       // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
+                       if self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 {
+                               self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
+                               Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
+                                       update_id: self.latest_monitor_update_id,
+                                       updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
+                               }))
+                       } else { None }
+               } else { None };
+
+               self.channel_state = ChannelState::ShutdownComplete as u32;
+               self.update_time_counter += 1;
+               (monitor_update, dropped_outbound_htlcs)
+       }
 }
 
 // Internal utility functions for channels
@@ -5823,52 +5869,6 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                Ok((shutdown, monitor_update, dropped_outbound_htlcs))
        }
 
-       /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
-       /// shutdown of this channel - no more calls into this Channel may be made afterwards except
-       /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
-       /// Also returns the list of payment_hashes for channels which we can safely fail backwards
-       /// immediately (others we will have to allow to time out).
-       pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
-               // Note that we MUST only generate a monitor update that indicates force-closure - we're
-               // called during initialization prior to the chain_monitor in the encompassing ChannelManager
-               // being fully configured in some cases. Thus, its likely any monitor events we generate will
-               // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
-               assert!(self.context.channel_state != ChannelState::ShutdownComplete as u32);
-
-               // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
-               // return them to fail the payment.
-               let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
-               let counterparty_node_id = self.context.get_counterparty_node_id();
-               for htlc_update in self.context.holding_cell_htlc_updates.drain(..) {
-                       match htlc_update {
-                               HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
-                                       dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.context.channel_id));
-                               },
-                               _ => {}
-                       }
-               }
-               let monitor_update = if let Some(funding_txo) = self.context.get_funding_txo() {
-                       // If we haven't yet exchanged funding signatures (ie channel_state < FundingSent),
-                       // returning a channel monitor update here would imply a channel monitor update before
-                       // we even registered the channel monitor to begin with, which is invalid.
-                       // Thus, if we aren't actually at a point where we could conceivably broadcast the
-                       // funding transaction, don't return a funding txo (which prevents providing the
-                       // monitor update to the user, even if we return one).
-                       // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
-                       if self.context.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 {
-                               self.context.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
-                               Some((self.context.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
-                                       update_id: self.context.latest_monitor_update_id,
-                                       updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
-                               }))
-                       } else { None }
-               } else { None };
-
-               self.context.channel_state = ChannelState::ShutdownComplete as u32;
-               self.context.update_time_counter += 1;
-               (monitor_update, dropped_outbound_htlcs)
-       }
-
        pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
                self.context.holding_cell_htlc_updates.iter()
                        .flat_map(|htlc_update| {
index e8c767204b96ec00d965124aaa98ea463dcc84e8..39ff93dd066883e73e1dd51ef019b338303c9b84 100644 (file)
@@ -1646,7 +1646,7 @@ macro_rules! convert_chan_err {
                        ChannelError::Close(msg) => {
                                log_error!($self.logger, "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg);
                                update_maps_on_chan_removal!($self, $channel);
-                               let shutdown_res = $channel.force_shutdown(true);
+                               let shutdown_res = $channel.context.force_shutdown(true);
                                (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.context.get_user_id(),
                                        shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
                        },
@@ -1813,7 +1813,7 @@ macro_rules! handle_new_monitor_update {
                                update_maps_on_chan_removal!($self, $chan);
                                let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown(
                                        "ChannelMonitor storage failure".to_owned(), $chan.context.channel_id(),
-                                       $chan.context.get_user_id(), $chan.force_shutdown(false),
+                                       $chan.context.get_user_id(), $chan.context.force_shutdown(false),
                                        $self.get_channel_update_for_broadcast(&$chan).ok()));
                                $remove;
                                res
@@ -2345,7 +2345,7 @@ where
                        }
                };
                log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
-               self.finish_force_close_channel(chan.force_shutdown(broadcast));
+               self.finish_force_close_channel(chan.context.force_shutdown(broadcast));
                if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
                        let mut peer_state = peer_state_mutex.lock().unwrap();
                        peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
@@ -3106,7 +3106,7 @@ where
 
                                let funding_res = chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger)
                                        .map_err(|e| if let ChannelError::Close(msg) = e {
-                                               MsgHandleErrInternal::from_finish_shutdown(msg, chan.context.channel_id(), chan.context.get_user_id(), chan.force_shutdown(true), None)
+                                               MsgHandleErrInternal::from_finish_shutdown(msg, chan.context.channel_id(), chan.context.get_user_id(), chan.context.force_shutdown(true), None)
                                        } else { unreachable!(); });
                                match funding_res {
                                        Ok(funding_msg) => (funding_msg, chan),
@@ -5686,7 +5686,7 @@ where
                                                                let pending_msg_events = &mut peer_state.pending_msg_events;
                                                                if let hash_map::Entry::Occupied(chan_entry) = peer_state.channel_by_id.entry(funding_outpoint.to_channel_id()) {
                                                                        let mut chan = remove_channel!(self, chan_entry);
-                                                                       failed_channels.push(chan.force_shutdown(false));
+                                                                       failed_channels.push(chan.context.force_shutdown(false));
                                                                        if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
                                                                                pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                                                        msg: update
@@ -6533,7 +6533,7 @@ where
                                                update_maps_on_chan_removal!(self, channel);
                                                // It looks like our counterparty went on-chain or funding transaction was
                                                // reorged out of the main chain. Close the channel.
-                                               failed_channels.push(channel.force_shutdown(true));
+                                               failed_channels.push(channel.context.force_shutdown(true));
                                                if let Ok(update) = self.get_channel_update_for_broadcast(&channel) {
                                                        pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                                msg: update
@@ -7963,7 +7963,7 @@ where
                                        log_error!(args.logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast.");
                                        log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
                                                log_bytes!(channel.context.channel_id()), monitor.get_latest_update_id(), channel.context.get_latest_monitor_update_id());
-                                       let (monitor_update, mut new_failed_htlcs) = channel.force_shutdown(true);
+                                       let (monitor_update, mut new_failed_htlcs) = channel.context.force_shutdown(true);
                                        if let Some((counterparty_node_id, funding_txo, update)) = monitor_update {
                                                pending_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
                                                        counterparty_node_id, funding_txo, update
@@ -8021,7 +8021,7 @@ where
                                // If we were persisted and shut down while the initial ChannelMonitor persistence
                                // was in-progress, we never broadcasted the funding transaction and can still
                                // safely discard the channel.
-                               let _ = channel.force_shutdown(false);
+                               let _ = channel.context.force_shutdown(false);
                                channel_closures.push_back((events::Event::ChannelClosed {
                                        channel_id: channel.context.channel_id(),
                                        user_channel_id: channel.context.get_user_id(),