From: Matt Corallo Date: Fri, 29 Dec 2023 03:23:59 +0000 (+0000) Subject: Move ChannelClosed generation into finish_close_channel X-Git-Tag: v0.0.120~13^2~3 X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=commitdiff_plain;h=080865dff975a57cc9b88a7678eee2f963847bc4;p=rust-lightning Move ChannelClosed generation into finish_close_channel Currently the channel shutdown sequence has a number of steps which all the shutdown callsites have to call. Because many shutdown cases are rare error cases, its relatively easy to miss a call and leave users without `Event`s or miss some important cleanup. One of those steps, calling `issue_channel_close_events`, is rather easy to remove, as it only generates two events, which can simply be moved to another shutdown step. Here we remove `issue_channel_close_events` by moving `ChannelClosed` event generation into `finish_force_close_channel`. --- diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index dee972f39..7026ab6d3 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -814,6 +814,7 @@ pub(super) struct ReestablishResponses { /// The result of a shutdown that should be handled. #[must_use] pub(crate) struct ShutdownResult { + pub(crate) closure_reason: ClosureReason, /// A channel monitor update to apply. pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>, /// A list of dropped outbound HTLCs that can safely be failed backwards immediately. @@ -822,6 +823,8 @@ pub(crate) struct ShutdownResult { /// propagated to the remainder of the batch. pub(crate) unbroadcasted_batch_funding_txid: Option, pub(crate) channel_id: ChannelId, + pub(crate) user_channel_id: u128, + pub(crate) channel_capacity_satoshis: u64, pub(crate) counterparty_node_id: PublicKey, pub(crate) unbroadcasted_funding_tx: Option, } @@ -2362,7 +2365,7 @@ impl ChannelContext where SP::Target: SignerProvider { /// those explicitly stated to be allowed after shutdown completes, eg some simple getters). /// Also returns the list of payment_hashes for channels which we can safely fail backwards /// immediately (others we will have to allow to time out). - pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult { + pub fn force_shutdown(&mut self, should_broadcast: bool, closure_reason: ClosureReason) -> ShutdownResult { // Note that we MUST only generate a monitor update that indicates force-closure - we're // called during initialization prior to the chain_monitor in the encompassing ChannelManager // being fully configured in some cases. Thus, its likely any monitor events we generate will @@ -2408,10 +2411,13 @@ impl ChannelContext where SP::Target: SignerProvider { self.channel_state = ChannelState::ShutdownComplete; self.update_time_counter += 1; ShutdownResult { + closure_reason, monitor_update, dropped_outbound_htlcs, unbroadcasted_batch_funding_txid, channel_id: self.channel_id, + user_channel_id: self.user_id, + channel_capacity_satoshis: self.channel_value_satoshis, counterparty_node_id: self.counterparty_node_id, unbroadcasted_funding_tx, } @@ -4941,10 +4947,13 @@ impl Channel where if let Some((last_fee, sig)) = self.context.last_sent_closing_fee { if last_fee == msg.fee_satoshis { let shutdown_result = ShutdownResult { + closure_reason: ClosureReason::CooperativeClosure, monitor_update: None, dropped_outbound_htlcs: Vec::new(), unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(), channel_id: self.context.channel_id, + user_channel_id: self.context.user_id, + channel_capacity_satoshis: self.context.channel_value_satoshis, counterparty_node_id: self.context.counterparty_node_id, unbroadcasted_funding_tx: self.context.unbroadcasted_funding(), }; @@ -4972,10 +4981,13 @@ impl Channel where .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?; let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis { let shutdown_result = ShutdownResult { + closure_reason: ClosureReason::CooperativeClosure, monitor_update: None, dropped_outbound_htlcs: Vec::new(), unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(), channel_id: self.context.channel_id, + user_channel_id: self.context.user_id, + channel_capacity_satoshis: self.context.channel_value_satoshis, counterparty_node_id: self.context.counterparty_node_id, unbroadcasted_funding_tx: self.context.unbroadcasted_funding(), }; diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 51ea72ff4..f19878f51 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1966,14 +1966,6 @@ macro_rules! handle_error { msg: update }); } - if let Some((channel_id, user_channel_id)) = chan_id { - $self.pending_events.lock().unwrap().push_back((events::Event::ChannelClosed { - channel_id, user_channel_id, - reason: ClosureReason::ProcessingError { err: err.err.clone() }, - counterparty_node_id: Some($counterparty_node_id), - channel_capacity_sats: channel_capacity, - }, None)); - } } let logger = WithContext::from( @@ -2039,7 +2031,8 @@ macro_rules! convert_chan_phase_err { let logger = WithChannelContext::from(&$self.logger, &$channel.context); log_error!(logger, "Closing channel {} due to close-required error: {}", $channel_id, msg); update_maps_on_chan_removal!($self, $channel.context); - let shutdown_res = $channel.context.force_shutdown(true); + let reason = ClosureReason::ProcessingError { err: msg.clone() }; + let shutdown_res = $channel.context.force_shutdown(true, reason); let user_id = $channel.context.get_user_id(); let channel_capacity_satoshis = $channel.context.get_value_satoshis(); @@ -2701,18 +2694,6 @@ where .collect() } - /// Helper function that issues the channel close events - fn issue_channel_close_events(&self, context: &ChannelContext, closure_reason: ClosureReason) { - let mut pending_events_lock = self.pending_events.lock().unwrap(); - pending_events_lock.push_back((events::Event::ChannelClosed { - channel_id: context.channel_id(), - user_channel_id: context.get_user_id(), - reason: closure_reason, - counterparty_node_id: Some(context.get_counterparty_node_id()), - channel_capacity_sats: Some(context.get_value_satoshis()), - }, None)); - } - fn close_channel_internal(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option, override_shutdown_script: Option) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); @@ -2754,9 +2735,8 @@ where peer_state_lock, peer_state, per_peer_state, chan); } } else { - self.issue_channel_close_events(chan_phase_entry.get().context(), ClosureReason::HolderForceClosed); let mut chan_phase = remove_channel_phase!(self, chan_phase_entry); - shutdown_result = Some(chan_phase.context_mut().force_shutdown(false)); + shutdown_result = Some(chan_phase.context_mut().force_shutdown(false, ClosureReason::HolderForceClosed)); } }, hash_map::Entry::Vacant(_) => { @@ -2853,6 +2833,7 @@ where let logger = WithContext::from( &self.logger, Some(shutdown_res.counterparty_node_id), Some(shutdown_res.channel_id), ); + log_debug!(logger, "Finishing closure of channel with {} HTLCs to fail", shutdown_res.dropped_outbound_htlcs.len()); for htlc_source in shutdown_res.dropped_outbound_htlcs.drain(..) { let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source; @@ -2878,8 +2859,7 @@ where let mut peer_state = peer_state_mutex.lock().unwrap(); if let Some(mut chan) = peer_state.channel_by_id.remove(&channel_id) { update_maps_on_chan_removal!(self, &chan.context()); - self.issue_channel_close_events(&chan.context(), ClosureReason::FundingBatchClosure); - shutdown_results.push(chan.context_mut().force_shutdown(false)); + shutdown_results.push(chan.context_mut().force_shutdown(false, ClosureReason::FundingBatchClosure)); } } has_uncompleted_channel = Some(has_uncompleted_channel.map_or(!state, |v| v || !state)); @@ -2892,6 +2872,14 @@ where { let mut pending_events = self.pending_events.lock().unwrap(); + pending_events.push_back((events::Event::ChannelClosed { + channel_id: shutdown_res.channel_id, + user_channel_id: shutdown_res.user_channel_id, + reason: shutdown_res.closure_reason, + counterparty_node_id: Some(shutdown_res.counterparty_node_id), + channel_capacity_sats: Some(shutdown_res.channel_capacity_satoshis), + }, None)); + if let Some(transaction) = shutdown_res.unbroadcasted_funding_tx { pending_events.push_back((events::Event::DiscardFunding { channel_id: shutdown_res.channel_id, transaction @@ -2920,17 +2908,16 @@ where let logger = WithContext::from(&self.logger, Some(*peer_node_id), Some(*channel_id)); if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(channel_id.clone()) { log_error!(logger, "Force-closing channel {}", channel_id); - self.issue_channel_close_events(&chan_phase_entry.get().context(), closure_reason); let mut chan_phase = remove_channel_phase!(self, chan_phase_entry); mem::drop(peer_state); mem::drop(per_peer_state); match chan_phase { ChannelPhase::Funded(mut chan) => { - self.finish_close_channel(chan.context.force_shutdown(broadcast)); + self.finish_close_channel(chan.context.force_shutdown(broadcast, closure_reason)); (self.get_channel_update_for_broadcast(&chan).ok(), chan.context.get_counterparty_node_id()) }, ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) => { - self.finish_close_channel(chan_phase.context_mut().force_shutdown(false)); + self.finish_close_channel(chan_phase.context_mut().force_shutdown(false, closure_reason)); // Unfunded channel has no update (None, chan_phase.context().get_counterparty_node_id()) }, @@ -3760,7 +3747,8 @@ where .map_err(|(mut chan, e)| if let ChannelError::Close(msg) = e { let channel_id = chan.context.channel_id(); let user_id = chan.context.get_user_id(); - let shutdown_res = chan.context.force_shutdown(false); + let reason = ClosureReason::ProcessingError { err: msg.clone() }; + let shutdown_res = chan.context.force_shutdown(false, reason); let channel_capacity = chan.context.get_value_satoshis(); (chan, MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, user_id, shutdown_res, None, channel_capacity)) } else { unreachable!(); }); @@ -3967,8 +3955,8 @@ where .and_then(|mut peer_state| peer_state.channel_by_id.remove(&channel_id)) .map(|mut chan| { update_maps_on_chan_removal!(self, &chan.context()); - self.issue_channel_close_events(&chan.context(), ClosureReason::ProcessingError { err: e.clone() }); - shutdown_results.push(chan.context_mut().force_shutdown(false)); + let closure_reason = ClosureReason::ProcessingError { err: e.clone() }; + shutdown_results.push(chan.context_mut().force_shutdown(false, closure_reason)); }); } } @@ -4890,8 +4878,7 @@ where log_error!(logger, "Force-closing pending channel with ID {} for not establishing in a timely manner", chan_id); update_maps_on_chan_removal!(self, &context); - self.issue_channel_close_events(&context, ClosureReason::HolderForceClosed); - shutdown_channels.push(context.force_shutdown(false)); + shutdown_channels.push(context.force_shutdown(false, ClosureReason::HolderForceClosed)); pending_msg_events.push(MessageSendEvent::HandleError { node_id: counterparty_node_id, action: msgs::ErrorAction::SendErrorMessage { @@ -6511,9 +6498,8 @@ where let context = phase.context_mut(); let logger = WithChannelContext::from(&self.logger, context); log_error!(logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id); - self.issue_channel_close_events(&context, ClosureReason::CounterpartyCoopClosedUnfundedChannel); let mut chan = remove_channel_phase!(self, chan_phase_entry); - finish_shutdown = Some(chan.context_mut().force_shutdown(false)); + finish_shutdown = Some(chan.context_mut().force_shutdown(false, ClosureReason::CounterpartyCoopClosedUnfundedChannel)); }, } } else { @@ -6582,7 +6568,6 @@ where msg: update }); } - self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure); } mem::drop(per_peer_state); if let Some(shutdown_result) = shutdown_result { @@ -7234,13 +7219,12 @@ where let pending_msg_events = &mut peer_state.pending_msg_events; if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(funding_outpoint.to_channel_id()) { if let ChannelPhase::Funded(mut chan) = remove_channel_phase!(self, chan_phase_entry) { - failed_channels.push(chan.context.force_shutdown(false)); + failed_channels.push(chan.context.force_shutdown(false, ClosureReason::HolderForceClosed)); if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); } - self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed); pending_msg_events.push(events::MessageSendEvent::HandleError { node_id: chan.context.get_counterparty_node_id(), action: msgs::ErrorAction::DisconnectPeer { @@ -7427,8 +7411,6 @@ where }); } - self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure); - log_info!(logger, "Broadcasting {}", log_tx!(tx)); self.tx_broadcaster.broadcast_transactions(&[&tx]); update_maps_on_chan_removal!(self, &chan.context); @@ -8441,14 +8423,13 @@ where update_maps_on_chan_removal!(self, &channel.context); // It looks like our counterparty went on-chain or funding transaction was // reorged out of the main chain. Close the channel. - failed_channels.push(channel.context.force_shutdown(true)); + let reason_message = format!("{}", reason); + failed_channels.push(channel.context.force_shutdown(true, reason)); if let Ok(update) = self.get_channel_update_for_broadcast(&channel) { pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); } - let reason_message = format!("{}", reason); - self.issue_channel_close_events(&channel.context, reason); pending_msg_events.push(events::MessageSendEvent::HandleError { node_id: channel.context.get_counterparty_node_id(), action: msgs::ErrorAction::DisconnectPeer { @@ -8846,8 +8827,7 @@ where }; // Clean up for removal. update_maps_on_chan_removal!(self, &context); - self.issue_channel_close_events(&context, ClosureReason::DisconnectedPeer); - failed_channels.push(context.force_shutdown(false)); + failed_channels.push(context.force_shutdown(false, ClosureReason::DisconnectedPeer)); false }); // Note that we don't bother generating any events for pre-accept channels - @@ -10293,7 +10273,7 @@ where log_error!(logger, " The ChannelMonitor for channel {} is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.", &channel.context.channel_id(), monitor.get_cur_counterparty_commitment_number(), channel.get_cur_counterparty_commitment_transaction_number()); } - let mut shutdown_result = channel.context.force_shutdown(true); + let mut shutdown_result = channel.context.force_shutdown(true, ClosureReason::OutdatedChannelManager); if shutdown_result.unbroadcasted_batch_funding_txid.is_some() { return Err(DecodeError::InvalidValue); } @@ -10355,7 +10335,7 @@ where // If we were persisted and shut down while the initial ChannelMonitor persistence // was in-progress, we never broadcasted the funding transaction and can still // safely discard the channel. - let _ = channel.context.force_shutdown(false); + let _ = channel.context.force_shutdown(false, ClosureReason::DisconnectedPeer); channel_closures.push_back((events::Event::ChannelClosed { channel_id: channel.context.channel_id(), user_channel_id: channel.context.get_user_id(), diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 2ad53faa8..ef0fcc926 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -3328,22 +3328,18 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), if deliver_bs_raa { 3 + nodes.len() - 1 } else { 4 + nodes.len() }); - match events[0] { - Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => { }, - _ => panic!("Unexepected event"), - } - match events[1] { - Event::PaymentPathFailed { ref payment_hash, .. } => { - assert_eq!(*payment_hash, fourth_payment_hash); - }, - _ => panic!("Unexpected event"), - } - match events[2] { - Event::PaymentFailed { ref payment_hash, .. } => { - assert_eq!(*payment_hash, fourth_payment_hash); - }, - _ => panic!("Unexpected event"), - } + assert!(events.iter().any(|ev| matches!( + ev, + Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } + ))); + assert!(events.iter().any(|ev| matches!( + ev, + Event::PaymentPathFailed { ref payment_hash, .. } if *payment_hash == fourth_payment_hash + ))); + assert!(events.iter().any(|ev| matches!( + ev, + Event::PaymentFailed { ref payment_hash, .. } if *payment_hash == fourth_payment_hash + ))); nodes[1].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[1], 1); @@ -9131,16 +9127,16 @@ fn test_duplicate_chan_id() { chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap() }, _ => panic!("Unexpected ChannelPhase variant"), - } + }.unwrap() }; check_added_monitors!(nodes[0], 0); - nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created.unwrap()); + nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created); // At this point we'll look up if the channel_id is present and immediately fail the channel // without trying to persist the `ChannelMonitor`. check_added_monitors!(nodes[1], 0); check_closed_events(&nodes[1], &[ - ExpectedCloseEvent::from_id_reason(channel_id, false, ClosureReason::ProcessingError { + ExpectedCloseEvent::from_id_reason(funding_created.temporary_channel_id, false, ClosureReason::ProcessingError { err: "Already had channel with the new channel_id".to_owned() }) ]);