In general, we should never be automatically force-closing our
users' channels unless there is some immediate risk of funds loss
(ie because of some HTLC(s) which are timing out soon). In any
other case, we should trust the user to be able to figure out what
is going on and close their channels manually instead of trying to
be overly clever and automate closures if we think the channel is
useless.
In this case, even if a peer has some required feature that does
not allow us to communicate with them, there is a strong
possibility that some LDK upgrade may allow us to in the future. In
the mean time, there is no reason to go on-chain unless the user
needs funds immediately. In such a case, the user should already
have logic to force-close channels with peers which are not
available for any reason.
let channel_state = &mut *channel_state_lock;
let pending_msg_events = &mut channel_state.pending_msg_events;
let short_to_id = &mut channel_state.short_to_id;
let channel_state = &mut *channel_state_lock;
let pending_msg_events = &mut channel_state.pending_msg_events;
let short_to_id = &mut channel_state.short_to_id;
- if no_connection_possible {
- log_debug!(self.logger, "Failing all channels with {} due to no_connection_possible", log_pubkey!(counterparty_node_id));
- channel_state.by_id.retain(|_, chan| {
- if chan.get_counterparty_node_id() == *counterparty_node_id {
+ log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates. We believe we {} make future connections to this peer.",
+ log_pubkey!(counterparty_node_id), if no_connection_possible { "cannot" } else { "can" });
+ channel_state.by_id.retain(|_, chan| {
+ if chan.get_counterparty_node_id() == *counterparty_node_id {
+ chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
+ if chan.is_shutdown() {
update_maps_on_chan_removal!(self, short_to_id, chan);
update_maps_on_chan_removal!(self, short_to_id, chan);
- failed_channels.push(chan.force_shutdown(true));
- if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
- pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
- msg: update
- });
- }
self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer);
self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer);
- true
- }
- });
- } else {
- log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates", log_pubkey!(counterparty_node_id));
- channel_state.by_id.retain(|_, chan| {
- if chan.get_counterparty_node_id() == *counterparty_node_id {
- chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
- if chan.is_shutdown() {
- update_maps_on_chan_removal!(self, short_to_id, chan);
- self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer);
- return false;
- } else {
- no_channels_remain = false;
- }
+ no_channels_remain = false;
pending_msg_events.retain(|msg| {
match msg {
&events::MessageSendEvent::SendAcceptChannel { ref node_id, .. } => node_id != counterparty_node_id,
pending_msg_events.retain(|msg| {
match msg {
&events::MessageSendEvent::SendAcceptChannel { ref node_id, .. } => node_id != counterparty_node_id,
send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
// Simple case with no pending HTLCs:
send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
// Simple case with no pending HTLCs:
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), true);
+ nodes[1].node.force_close_channel(&chan_1.2).unwrap();
check_added_monitors!(nodes[1], 1);
check_added_monitors!(nodes[1], 1);
- check_closed_broadcast!(nodes[1], false);
+ check_closed_broadcast!(nodes[1], true);
{
let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE);
assert_eq!(node_txn.len(), 1);
{
let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE);
assert_eq!(node_txn.len(), 1);
assert_eq!(nodes[0].node.list_channels().len(), 0);
assert_eq!(nodes[1].node.list_channels().len(), 1);
check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
assert_eq!(nodes[0].node.list_channels().len(), 0);
assert_eq!(nodes[1].node.list_channels().len(), 1);
check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
- check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
+ check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
// One pending HTLC is discarded by the force-close:
let payment_preimage_1 = route_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 3000000).0;
// Simple case of one pending HTLC to HTLC-Timeout (note that the HTLC-Timeout is not
// broadcasted until we reach the timelock time).
// One pending HTLC is discarded by the force-close:
let payment_preimage_1 = route_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 3000000).0;
// Simple case of one pending HTLC to HTLC-Timeout (note that the HTLC-Timeout is not
// broadcasted until we reach the timelock time).
- nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), true);
- check_closed_broadcast!(nodes[1], false);
+ nodes[1].node.force_close_channel(&chan_2.2).unwrap();
+ check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
{
let mut node_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::NONE);
check_added_monitors!(nodes[1], 1);
{
let mut node_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::NONE);
check_closed_broadcast!(nodes[2], true);
assert_eq!(nodes[1].node.list_channels().len(), 0);
assert_eq!(nodes[2].node.list_channels().len(), 1);
check_closed_broadcast!(nodes[2], true);
assert_eq!(nodes[1].node.list_channels().len(), 0);
assert_eq!(nodes[2].node.list_channels().len(), 1);
- check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
+ check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
macro_rules! claim_funds {
check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
macro_rules! claim_funds {
// nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2]
// HTLC-Timeout and a nodes[3] claim against it (+ its own announces)
// nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2]
// HTLC-Timeout and a nodes[3] claim against it (+ its own announces)
- nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id(), true);
+ nodes[2].node.force_close_channel(&chan_3.2).unwrap();
check_added_monitors!(nodes[2], 1);
check_added_monitors!(nodes[2], 1);
- check_closed_broadcast!(nodes[2], false);
+ check_closed_broadcast!(nodes[2], true);
let node2_commitment_txid;
{
let node_txn = test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::NONE);
let node2_commitment_txid;
{
let node_txn = test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::NONE);
check_closed_broadcast!(nodes[3], true);
assert_eq!(nodes[2].node.list_channels().len(), 0);
assert_eq!(nodes[3].node.list_channels().len(), 1);
check_closed_broadcast!(nodes[3], true);
assert_eq!(nodes[2].node.list_channels().len(), 0);
assert_eq!(nodes[3].node.list_channels().len(), 1);
- check_closed_event!(nodes[2], 1, ClosureReason::DisconnectedPeer);
+ check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed);
check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed);
// Drop the ChannelMonitor for the previous channel to avoid it broadcasting transactions and
check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed);
// Drop the ChannelMonitor for the previous channel to avoid it broadcasting transactions and
/// descriptor.
#[derive(Clone)]
pub struct PeerHandleError {
/// descriptor.
#[derive(Clone)]
pub struct PeerHandleError {
- /// Used to indicate that we probably can't make any future connections to this peer, implying
- /// we should go ahead and force-close any channels we have with it.
+ /// Used to indicate that we probably can't make any future connections to this peer (e.g.
+ /// because we required features that our peer was missing, or vice versa).
+ ///
+ /// While LDK's [`ChannelManager`] will not do it automatically, you likely wish to force-close
+ /// any channels with this peer or check for new versions of LDK.
+ ///
+ /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
pub no_connection_possible: bool,
}
impl fmt::Debug for PeerHandleError {
pub no_connection_possible: bool,
}
impl fmt::Debug for PeerHandleError {
/// A developer-readable error message which we generated.
err: String,
},
/// A developer-readable error message which we generated.
err: String,
},
- /// The `PeerManager` informed us that we've disconnected from the peer. We close channels
- /// if the `PeerManager` informed us that it is unlikely we'll be able to connect to the
- /// peer again in the future or if the peer disconnected before we finished negotiating
- /// the channel open. The first case may be caused by incompatible features which our
- /// counterparty, or we, require.
- //TODO: split between PeerUnconnectable/PeerDisconnected ?
+ /// The peer disconnected prior to funding completing. In this case the spec mandates that we
+ /// forget the channel entirely - we can attempt again if the peer reconnects.
+ ///
+ /// In LDK versions prior to 0.0.107 this could also occur if we were unable to connect to the
+ /// peer because of mutual incompatibility between us and our channel counterparty.
DisconnectedPeer,
/// Closure generated from `ChannelManager::read` if the ChannelMonitor is newer than
/// the ChannelManager deserialized.
DisconnectedPeer,
/// Closure generated from `ChannelManager::read` if the ChannelMonitor is newer than
/// the ChannelManager deserialized.