let channel_state = &mut *channel_state_lock;
let pending_msg_events = &mut channel_state.pending_msg_events;
let short_to_id = &mut channel_state.short_to_id;
- if no_connection_possible {
- log_debug!(self.logger, "Failing all channels with {} due to no_connection_possible", log_pubkey!(counterparty_node_id));
- channel_state.by_id.retain(|_, chan| {
- if chan.get_counterparty_node_id() == *counterparty_node_id {
+ log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates. We believe we {} make future connections to this peer.",
+ log_pubkey!(counterparty_node_id), if no_connection_possible { "cannot" } else { "can" });
+ channel_state.by_id.retain(|_, chan| {
+ if chan.get_counterparty_node_id() == *counterparty_node_id {
+ chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
+ if chan.is_shutdown() {
update_maps_on_chan_removal!(self, short_to_id, chan);
- failed_channels.push(chan.force_shutdown(true));
- if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
- pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
- msg: update
- });
- }
self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer);
- false
+ return false;
} else {
- true
- }
- });
- } else {
- log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates", log_pubkey!(counterparty_node_id));
- channel_state.by_id.retain(|_, chan| {
- if chan.get_counterparty_node_id() == *counterparty_node_id {
- chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
- if chan.is_shutdown() {
- update_maps_on_chan_removal!(self, short_to_id, chan);
- self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer);
- return false;
- } else {
- no_channels_remain = false;
- }
+ no_channels_remain = false;
}
- true
- })
- }
+ }
+ true
+ });
pending_msg_events.retain(|msg| {
match msg {
&events::MessageSendEvent::SendAcceptChannel { ref node_id, .. } => node_id != counterparty_node_id,
send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
// Simple case with no pending HTLCs:
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), true);
+ nodes[1].node.force_close_channel(&chan_1.2).unwrap();
check_added_monitors!(nodes[1], 1);
- check_closed_broadcast!(nodes[1], false);
+ check_closed_broadcast!(nodes[1], true);
{
let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE);
assert_eq!(node_txn.len(), 1);
assert_eq!(nodes[0].node.list_channels().len(), 0);
assert_eq!(nodes[1].node.list_channels().len(), 1);
check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
- check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
+ check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
// One pending HTLC is discarded by the force-close:
let payment_preimage_1 = route_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 3000000).0;
// Simple case of one pending HTLC to HTLC-Timeout (note that the HTLC-Timeout is not
// broadcasted until we reach the timelock time).
- nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), true);
- check_closed_broadcast!(nodes[1], false);
+ nodes[1].node.force_close_channel(&chan_2.2).unwrap();
+ check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
{
let mut node_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::NONE);
check_closed_broadcast!(nodes[2], true);
assert_eq!(nodes[1].node.list_channels().len(), 0);
assert_eq!(nodes[2].node.list_channels().len(), 1);
- check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
+ check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
macro_rules! claim_funds {
// nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2]
// HTLC-Timeout and a nodes[3] claim against it (+ its own announces)
- nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id(), true);
+ nodes[2].node.force_close_channel(&chan_3.2).unwrap();
check_added_monitors!(nodes[2], 1);
- check_closed_broadcast!(nodes[2], false);
+ check_closed_broadcast!(nodes[2], true);
let node2_commitment_txid;
{
let node_txn = test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::NONE);
check_closed_broadcast!(nodes[3], true);
assert_eq!(nodes[2].node.list_channels().len(), 0);
assert_eq!(nodes[3].node.list_channels().len(), 1);
- check_closed_event!(nodes[2], 1, ClosureReason::DisconnectedPeer);
+ check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed);
check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed);
// Drop the ChannelMonitor for the previous channel to avoid it broadcasting transactions and
/// A developer-readable error message which we generated.
err: String,
},
- /// The `PeerManager` informed us that we've disconnected from the peer. We close channels
- /// if the `PeerManager` informed us that it is unlikely we'll be able to connect to the
- /// peer again in the future or if the peer disconnected before we finished negotiating
- /// the channel open. The first case may be caused by incompatible features which our
- /// counterparty, or we, require.
- //TODO: split between PeerUnconnectable/PeerDisconnected ?
+ /// The peer disconnected prior to funding completing. In this case the spec mandates that we
+ /// forget the channel entirely - we can attempt again if the peer reconnects.
+ ///
+ /// In LDK versions prior to 0.0.107 this could also occur if we were unable to connect to the
+ /// peer because of mutual incompatibility between us and our channel counterparty.
DisconnectedPeer,
/// Closure generated from `ChannelManager::read` if the ChannelMonitor is newer than
/// the ChannelManager deserialized.