]> git.bitcoin.ninja Git - rust-lightning/commitdiff
Do not force-close channels when we cannot communicate with peers 2022-04-drop-no-conn-possible
authorMatt Corallo <git@bluematt.me>
Mon, 18 Apr 2022 02:10:44 +0000 (02:10 +0000)
committerMatt Corallo <git@bluematt.me>
Thu, 28 Apr 2022 02:50:06 +0000 (02:50 +0000)
In general, we should never be automatically force-closing our
users' channels unless there is some immediate risk of funds loss
(ie because of some HTLC(s) which are timing out soon). In any
other case, we should trust the user to be able to figure out what
is going on and close their channels manually instead of trying to
be overly clever and automate closures if we think the channel is
useless.

In this case, even if a peer has some required feature that does
not allow us to communicate with them, there is a strong
possibility that some LDK upgrade may allow us to in the future. In
the mean time, there is no reason to go on-chain unless the user
needs funds immediately. In such a case, the user should already
have logic to force-close channels with peers which are not
available for any reason.

lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_tests.rs
lightning/src/ln/peer_handler.rs
lightning/src/util/events.rs

index d7f2aeb6e4132374cc7a16d1c04361f65a3db358..cc299607236f676392f587a6253561be521b1050 100644 (file)
@@ -5699,39 +5699,21 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
                        let channel_state = &mut *channel_state_lock;
                        let pending_msg_events = &mut channel_state.pending_msg_events;
                        let short_to_id = &mut channel_state.short_to_id;
-                       if no_connection_possible {
-                               log_debug!(self.logger, "Failing all channels with {} due to no_connection_possible", log_pubkey!(counterparty_node_id));
-                               channel_state.by_id.retain(|_, chan| {
-                                       if chan.get_counterparty_node_id() == *counterparty_node_id {
+                       log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates. We believe we {} make future connections to this peer.",
+                               log_pubkey!(counterparty_node_id), if no_connection_possible { "cannot" } else { "can" });
+                       channel_state.by_id.retain(|_, chan| {
+                               if chan.get_counterparty_node_id() == *counterparty_node_id {
+                                       chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
+                                       if chan.is_shutdown() {
                                                update_maps_on_chan_removal!(self, short_to_id, chan);
-                                               failed_channels.push(chan.force_shutdown(true));
-                                               if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
-                                                       pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
-                                                               msg: update
-                                                       });
-                                               }
                                                self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer);
-                                               false
+                                               return false;
                                        } else {
-                                               true
-                                       }
-                               });
-                       } else {
-                               log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates", log_pubkey!(counterparty_node_id));
-                               channel_state.by_id.retain(|_, chan| {
-                                       if chan.get_counterparty_node_id() == *counterparty_node_id {
-                                               chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
-                                               if chan.is_shutdown() {
-                                                       update_maps_on_chan_removal!(self, short_to_id, chan);
-                                                       self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer);
-                                                       return false;
-                                               } else {
-                                                       no_channels_remain = false;
-                                               }
+                                               no_channels_remain = false;
                                        }
-                                       true
-                               })
-                       }
+                               }
+                               true
+                       });
                        pending_msg_events.retain(|msg| {
                                match msg {
                                        &events::MessageSendEvent::SendAcceptChannel { ref node_id, .. } => node_id != counterparty_node_id,
index d7bebca0e4dff41088c4583632a302f700124314..6cc0c45bd0180b74d86b11d20c348d196d323117 100644 (file)
@@ -2156,9 +2156,9 @@ fn channel_monitor_network_test() {
        send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
 
        // Simple case with no pending HTLCs:
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), true);
+       nodes[1].node.force_close_channel(&chan_1.2).unwrap();
        check_added_monitors!(nodes[1], 1);
-       check_closed_broadcast!(nodes[1], false);
+       check_closed_broadcast!(nodes[1], true);
        {
                let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE);
                assert_eq!(node_txn.len(), 1);
@@ -2170,15 +2170,15 @@ fn channel_monitor_network_test() {
        assert_eq!(nodes[0].node.list_channels().len(), 0);
        assert_eq!(nodes[1].node.list_channels().len(), 1);
        check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
-       check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
+       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
 
        // One pending HTLC is discarded by the force-close:
        let payment_preimage_1 = route_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 3000000).0;
 
        // Simple case of one pending HTLC to HTLC-Timeout (note that the HTLC-Timeout is not
        // broadcasted until we reach the timelock time).
-       nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), true);
-       check_closed_broadcast!(nodes[1], false);
+       nodes[1].node.force_close_channel(&chan_2.2).unwrap();
+       check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
        {
                let mut node_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::NONE);
@@ -2191,7 +2191,7 @@ fn channel_monitor_network_test() {
        check_closed_broadcast!(nodes[2], true);
        assert_eq!(nodes[1].node.list_channels().len(), 0);
        assert_eq!(nodes[2].node.list_channels().len(), 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
+       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
        check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
 
        macro_rules! claim_funds {
@@ -2216,9 +2216,9 @@ fn channel_monitor_network_test() {
 
        // nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2]
        // HTLC-Timeout and a nodes[3] claim against it (+ its own announces)
-       nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id(), true);
+       nodes[2].node.force_close_channel(&chan_3.2).unwrap();
        check_added_monitors!(nodes[2], 1);
-       check_closed_broadcast!(nodes[2], false);
+       check_closed_broadcast!(nodes[2], true);
        let node2_commitment_txid;
        {
                let node_txn = test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::NONE);
@@ -2235,7 +2235,7 @@ fn channel_monitor_network_test() {
        check_closed_broadcast!(nodes[3], true);
        assert_eq!(nodes[2].node.list_channels().len(), 0);
        assert_eq!(nodes[3].node.list_channels().len(), 1);
-       check_closed_event!(nodes[2], 1, ClosureReason::DisconnectedPeer);
+       check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed);
        check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed);
 
        // Drop the ChannelMonitor for the previous channel to avoid it broadcasting transactions and
index c09df17525972ea242fefc1184a5babf070c9802..99d302b5d292a337bdf67edd00395d85b08f4910 100644 (file)
@@ -257,8 +257,13 @@ pub trait SocketDescriptor : cmp::Eq + hash::Hash + Clone {
 /// descriptor.
 #[derive(Clone)]
 pub struct PeerHandleError {
-       /// Used to indicate that we probably can't make any future connections to this peer, implying
-       /// we should go ahead and force-close any channels we have with it.
+       /// Used to indicate that we probably can't make any future connections to this peer (e.g.
+       /// because we required features that our peer was missing, or vice versa).
+       ///
+       /// While LDK's [`ChannelManager`] will not do it automatically, you likely wish to force-close
+       /// any channels with this peer or check for new versions of LDK.
+       ///
+       /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
        pub no_connection_possible: bool,
 }
 impl fmt::Debug for PeerHandleError {
index f35dc14e03555bed83d94e48c9702e6d50a36bc9..fe4e77439e0431e1c433e8ca5e7db09f3cd2dd6e 100644 (file)
@@ -100,12 +100,11 @@ pub enum ClosureReason {
                /// A developer-readable error message which we generated.
                err: String,
        },
-       /// The `PeerManager` informed us that we've disconnected from the peer. We close channels
-       /// if the `PeerManager` informed us that it is unlikely we'll be able to connect to the
-       /// peer again in the future or if the peer disconnected before we finished negotiating
-       /// the channel open. The first case may be caused by incompatible features which our
-       /// counterparty, or we, require.
-       //TODO: split between PeerUnconnectable/PeerDisconnected ?
+       /// The peer disconnected prior to funding completing. In this case the spec mandates that we
+       /// forget the channel entirely - we can attempt again if the peer reconnects.
+       ///
+       /// In LDK versions prior to 0.0.107 this could also occur if we were unable to connect to the
+       /// peer because of mutual incompatibility between us and our channel counterparty.
        DisconnectedPeer,
        /// Closure generated from `ChannelManager::read` if the ChannelMonitor is newer than
        /// the ChannelManager deserialized.