/// The result of a shutdown that should be handled.
#[must_use]
pub(crate) struct ShutdownResult {
+ pub(crate) closure_reason: ClosureReason,
/// A channel monitor update to apply.
pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
/// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
/// propagated to the remainder of the batch.
pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
pub(crate) channel_id: ChannelId,
+ pub(crate) user_channel_id: u128,
+ pub(crate) channel_capacity_satoshis: u64,
pub(crate) counterparty_node_id: PublicKey,
pub(crate) unbroadcasted_funding_tx: Option<Transaction>,
}
/// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
/// Also returns the list of payment_hashes for channels which we can safely fail backwards
/// immediately (others we will have to allow to time out).
- pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
+ pub fn force_shutdown(&mut self, should_broadcast: bool, closure_reason: ClosureReason) -> ShutdownResult {
// Note that we MUST only generate a monitor update that indicates force-closure - we're
// called during initialization prior to the chain_monitor in the encompassing ChannelManager
// being fully configured in some cases. Thus, its likely any monitor events we generate will
self.channel_state = ChannelState::ShutdownComplete;
self.update_time_counter += 1;
ShutdownResult {
+ closure_reason,
monitor_update,
dropped_outbound_htlcs,
unbroadcasted_batch_funding_txid,
channel_id: self.channel_id,
+ user_channel_id: self.user_id,
+ channel_capacity_satoshis: self.channel_value_satoshis,
counterparty_node_id: self.counterparty_node_id,
unbroadcasted_funding_tx,
}
if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
if last_fee == msg.fee_satoshis {
let shutdown_result = ShutdownResult {
+ closure_reason: ClosureReason::CooperativeClosure,
monitor_update: None,
dropped_outbound_htlcs: Vec::new(),
unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
channel_id: self.context.channel_id,
+ user_channel_id: self.context.user_id,
+ channel_capacity_satoshis: self.context.channel_value_satoshis,
counterparty_node_id: self.context.counterparty_node_id,
unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
};
.map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
let shutdown_result = ShutdownResult {
+ closure_reason: ClosureReason::CooperativeClosure,
monitor_update: None,
dropped_outbound_htlcs: Vec::new(),
unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
channel_id: self.context.channel_id,
+ user_channel_id: self.context.user_id,
+ channel_capacity_satoshis: self.context.channel_value_satoshis,
counterparty_node_id: self.context.counterparty_node_id,
unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
};
msg: update
});
}
- if let Some((channel_id, user_channel_id)) = chan_id {
- $self.pending_events.lock().unwrap().push_back((events::Event::ChannelClosed {
- channel_id, user_channel_id,
- reason: ClosureReason::ProcessingError { err: err.err.clone() },
- counterparty_node_id: Some($counterparty_node_id),
- channel_capacity_sats: channel_capacity,
- }, None));
- }
}
let logger = WithContext::from(
let logger = WithChannelContext::from(&$self.logger, &$channel.context);
log_error!(logger, "Closing channel {} due to close-required error: {}", $channel_id, msg);
update_maps_on_chan_removal!($self, $channel.context);
- let shutdown_res = $channel.context.force_shutdown(true);
+ let reason = ClosureReason::ProcessingError { err: msg.clone() };
+ let shutdown_res = $channel.context.force_shutdown(true, reason);
let user_id = $channel.context.get_user_id();
let channel_capacity_satoshis = $channel.context.get_value_satoshis();
.collect()
}
- /// Helper function that issues the channel close events
- fn issue_channel_close_events(&self, context: &ChannelContext<SP>, closure_reason: ClosureReason) {
- let mut pending_events_lock = self.pending_events.lock().unwrap();
- pending_events_lock.push_back((events::Event::ChannelClosed {
- channel_id: context.channel_id(),
- user_channel_id: context.get_user_id(),
- reason: closure_reason,
- counterparty_node_id: Some(context.get_counterparty_node_id()),
- channel_capacity_sats: Some(context.get_value_satoshis()),
- }, None));
- }
-
fn close_channel_internal(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>, override_shutdown_script: Option<ShutdownScript>) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
peer_state_lock, peer_state, per_peer_state, chan);
}
} else {
- self.issue_channel_close_events(chan_phase_entry.get().context(), ClosureReason::HolderForceClosed);
let mut chan_phase = remove_channel_phase!(self, chan_phase_entry);
- shutdown_result = Some(chan_phase.context_mut().force_shutdown(false));
+ shutdown_result = Some(chan_phase.context_mut().force_shutdown(false, ClosureReason::HolderForceClosed));
}
},
hash_map::Entry::Vacant(_) => {
let logger = WithContext::from(
&self.logger, Some(shutdown_res.counterparty_node_id), Some(shutdown_res.channel_id),
);
+
log_debug!(logger, "Finishing closure of channel with {} HTLCs to fail", shutdown_res.dropped_outbound_htlcs.len());
for htlc_source in shutdown_res.dropped_outbound_htlcs.drain(..) {
let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
let mut peer_state = peer_state_mutex.lock().unwrap();
if let Some(mut chan) = peer_state.channel_by_id.remove(&channel_id) {
update_maps_on_chan_removal!(self, &chan.context());
- self.issue_channel_close_events(&chan.context(), ClosureReason::FundingBatchClosure);
- shutdown_results.push(chan.context_mut().force_shutdown(false));
+ shutdown_results.push(chan.context_mut().force_shutdown(false, ClosureReason::FundingBatchClosure));
}
}
has_uncompleted_channel = Some(has_uncompleted_channel.map_or(!state, |v| v || !state));
{
let mut pending_events = self.pending_events.lock().unwrap();
+ pending_events.push_back((events::Event::ChannelClosed {
+ channel_id: shutdown_res.channel_id,
+ user_channel_id: shutdown_res.user_channel_id,
+ reason: shutdown_res.closure_reason,
+ counterparty_node_id: Some(shutdown_res.counterparty_node_id),
+ channel_capacity_sats: Some(shutdown_res.channel_capacity_satoshis),
+ }, None));
+
if let Some(transaction) = shutdown_res.unbroadcasted_funding_tx {
pending_events.push_back((events::Event::DiscardFunding {
channel_id: shutdown_res.channel_id, transaction
let logger = WithContext::from(&self.logger, Some(*peer_node_id), Some(*channel_id));
if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(channel_id.clone()) {
log_error!(logger, "Force-closing channel {}", channel_id);
- self.issue_channel_close_events(&chan_phase_entry.get().context(), closure_reason);
let mut chan_phase = remove_channel_phase!(self, chan_phase_entry);
mem::drop(peer_state);
mem::drop(per_peer_state);
match chan_phase {
ChannelPhase::Funded(mut chan) => {
- self.finish_close_channel(chan.context.force_shutdown(broadcast));
+ self.finish_close_channel(chan.context.force_shutdown(broadcast, closure_reason));
(self.get_channel_update_for_broadcast(&chan).ok(), chan.context.get_counterparty_node_id())
},
ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) => {
- self.finish_close_channel(chan_phase.context_mut().force_shutdown(false));
+ self.finish_close_channel(chan_phase.context_mut().force_shutdown(false, closure_reason));
// Unfunded channel has no update
(None, chan_phase.context().get_counterparty_node_id())
},
.map_err(|(mut chan, e)| if let ChannelError::Close(msg) = e {
let channel_id = chan.context.channel_id();
let user_id = chan.context.get_user_id();
- let shutdown_res = chan.context.force_shutdown(false);
+ let reason = ClosureReason::ProcessingError { err: msg.clone() };
+ let shutdown_res = chan.context.force_shutdown(false, reason);
let channel_capacity = chan.context.get_value_satoshis();
(chan, MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, user_id, shutdown_res, None, channel_capacity))
} else { unreachable!(); });
.and_then(|mut peer_state| peer_state.channel_by_id.remove(&channel_id))
.map(|mut chan| {
update_maps_on_chan_removal!(self, &chan.context());
- self.issue_channel_close_events(&chan.context(), ClosureReason::ProcessingError { err: e.clone() });
- shutdown_results.push(chan.context_mut().force_shutdown(false));
+ let closure_reason = ClosureReason::ProcessingError { err: e.clone() };
+ shutdown_results.push(chan.context_mut().force_shutdown(false, closure_reason));
});
}
}
log_error!(logger,
"Force-closing pending channel with ID {} for not establishing in a timely manner", chan_id);
update_maps_on_chan_removal!(self, &context);
- self.issue_channel_close_events(&context, ClosureReason::HolderForceClosed);
- shutdown_channels.push(context.force_shutdown(false));
+ shutdown_channels.push(context.force_shutdown(false, ClosureReason::HolderForceClosed));
pending_msg_events.push(MessageSendEvent::HandleError {
node_id: counterparty_node_id,
action: msgs::ErrorAction::SendErrorMessage {
let context = phase.context_mut();
let logger = WithChannelContext::from(&self.logger, context);
log_error!(logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id);
- self.issue_channel_close_events(&context, ClosureReason::CounterpartyCoopClosedUnfundedChannel);
let mut chan = remove_channel_phase!(self, chan_phase_entry);
- finish_shutdown = Some(chan.context_mut().force_shutdown(false));
+ finish_shutdown = Some(chan.context_mut().force_shutdown(false, ClosureReason::CounterpartyCoopClosedUnfundedChannel));
},
}
} else {
msg: update
});
}
- self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure);
}
mem::drop(per_peer_state);
if let Some(shutdown_result) = shutdown_result {
let pending_msg_events = &mut peer_state.pending_msg_events;
if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(funding_outpoint.to_channel_id()) {
if let ChannelPhase::Funded(mut chan) = remove_channel_phase!(self, chan_phase_entry) {
- failed_channels.push(chan.context.force_shutdown(false));
+ failed_channels.push(chan.context.force_shutdown(false, ClosureReason::HolderForceClosed));
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
- self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed);
pending_msg_events.push(events::MessageSendEvent::HandleError {
node_id: chan.context.get_counterparty_node_id(),
action: msgs::ErrorAction::DisconnectPeer {
});
}
- self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure);
-
log_info!(logger, "Broadcasting {}", log_tx!(tx));
self.tx_broadcaster.broadcast_transactions(&[&tx]);
update_maps_on_chan_removal!(self, &chan.context);
update_maps_on_chan_removal!(self, &channel.context);
// It looks like our counterparty went on-chain or funding transaction was
// reorged out of the main chain. Close the channel.
- failed_channels.push(channel.context.force_shutdown(true));
+ let reason_message = format!("{}", reason);
+ failed_channels.push(channel.context.force_shutdown(true, reason));
if let Ok(update) = self.get_channel_update_for_broadcast(&channel) {
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
- let reason_message = format!("{}", reason);
- self.issue_channel_close_events(&channel.context, reason);
pending_msg_events.push(events::MessageSendEvent::HandleError {
node_id: channel.context.get_counterparty_node_id(),
action: msgs::ErrorAction::DisconnectPeer {
};
// Clean up for removal.
update_maps_on_chan_removal!(self, &context);
- self.issue_channel_close_events(&context, ClosureReason::DisconnectedPeer);
- failed_channels.push(context.force_shutdown(false));
+ failed_channels.push(context.force_shutdown(false, ClosureReason::DisconnectedPeer));
false
});
// Note that we don't bother generating any events for pre-accept channels -
log_error!(logger, " The ChannelMonitor for channel {} is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.",
&channel.context.channel_id(), monitor.get_cur_counterparty_commitment_number(), channel.get_cur_counterparty_commitment_transaction_number());
}
- let mut shutdown_result = channel.context.force_shutdown(true);
+ let mut shutdown_result = channel.context.force_shutdown(true, ClosureReason::OutdatedChannelManager);
if shutdown_result.unbroadcasted_batch_funding_txid.is_some() {
return Err(DecodeError::InvalidValue);
}
// If we were persisted and shut down while the initial ChannelMonitor persistence
// was in-progress, we never broadcasted the funding transaction and can still
// safely discard the channel.
- let _ = channel.context.force_shutdown(false);
+ let _ = channel.context.force_shutdown(false, ClosureReason::DisconnectedPeer);
channel_closures.push_back((events::Event::ChannelClosed {
channel_id: channel.context.channel_id(),
user_channel_id: channel.context.get_user_id(),
let events = nodes[1].node.get_and_clear_pending_events();
assert_eq!(events.len(), if deliver_bs_raa { 3 + nodes.len() - 1 } else { 4 + nodes.len() });
- match events[0] {
- Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => { },
- _ => panic!("Unexepected event"),
- }
- match events[1] {
- Event::PaymentPathFailed { ref payment_hash, .. } => {
- assert_eq!(*payment_hash, fourth_payment_hash);
- },
- _ => panic!("Unexpected event"),
- }
- match events[2] {
- Event::PaymentFailed { ref payment_hash, .. } => {
- assert_eq!(*payment_hash, fourth_payment_hash);
- },
- _ => panic!("Unexpected event"),
- }
+ assert!(events.iter().any(|ev| matches!(
+ ev,
+ Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. }
+ )));
+ assert!(events.iter().any(|ev| matches!(
+ ev,
+ Event::PaymentPathFailed { ref payment_hash, .. } if *payment_hash == fourth_payment_hash
+ )));
+ assert!(events.iter().any(|ev| matches!(
+ ev,
+ Event::PaymentFailed { ref payment_hash, .. } if *payment_hash == fourth_payment_hash
+ )));
nodes[1].node.process_pending_htlc_forwards();
check_added_monitors!(nodes[1], 1);
chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap()
},
_ => panic!("Unexpected ChannelPhase variant"),
- }
+ }.unwrap()
};
check_added_monitors!(nodes[0], 0);
- nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created.unwrap());
+ nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
// At this point we'll look up if the channel_id is present and immediately fail the channel
// without trying to persist the `ChannelMonitor`.
check_added_monitors!(nodes[1], 0);
check_closed_events(&nodes[1], &[
- ExpectedCloseEvent::from_id_reason(channel_id, false, ClosureReason::ProcessingError {
+ ExpectedCloseEvent::from_id_reason(funding_created.temporary_channel_id, false, ClosureReason::ProcessingError {
err: "Already had channel with the new channel_id".to_owned()
})
]);