}
// Force all channels onto the chain (and time out claim txn)
- self.manager.force_close_all_channels();
+ self.manager.force_close_all_channels_broadcasting_latest_txn();
}
}
}
let channel_id = get_slice!(1)[0] as usize;
if channel_id >= channels.len() { return; }
channels.sort_by(|a, b| { a.channel_id.cmp(&b.channel_id) });
- channelmanager.force_close_channel(&channels[channel_id].channel_id, &channels[channel_id].counterparty.node_id).unwrap();
+ channelmanager.force_close_broadcasting_latest_txn(&channels[channel_id].channel_id, &channels[channel_id].counterparty.node_id).unwrap();
},
// 15 is above
_ => return,
}
// Force-close the channel.
- nodes[0].node.force_close_channel(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();
+ nodes[0].node.force_close_broadcasting_latest_txn(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();
// Check that the force-close updates are persisted.
check_persisted_data!(nodes[0].node, filepath.clone());
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
// Force close the channel and check that the SpendableOutputs event was handled.
- nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
+ nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
let commitment_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap();
confirm_transaction_depth(&mut nodes[0], &commitment_tx, BREAKDOWN_TIMEOUT as u32);
let event = receiver
// Force close because cooperative close doesn't result in any persisted
// updates.
- nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
+ nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
- nodes[1].node.force_close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
+ nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
- nodes[1].node.force_close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
+ nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
}
// ...and make sure we can force-close a frozen channel
- nodes[0].node.force_close_channel(&channel_id, &nodes[1].node.get_our_node_id()).unwrap();
+ nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[1].node.get_our_node_id()).unwrap();
check_added_monitors!(nodes[0], 1);
check_closed_broadcast!(nodes[0], true);
/// `peer_msg` should be set when we receive a message from a peer, but not set when the
/// user closes, which will be re-exposed as the `ChannelClosed` reason.
- fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: &PublicKey, peer_msg: Option<&String>) -> Result<PublicKey, APIError> {
+ fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: &PublicKey, peer_msg: Option<&String>, broadcast: bool)
+ -> Result<PublicKey, APIError> {
let mut chan = {
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
}
};
log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
- self.finish_force_close_channel(chan.force_shutdown(true));
+ self.finish_force_close_channel(chan.force_shutdown(broadcast));
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
let mut channel_state = self.channel_state.lock().unwrap();
channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
Ok(chan.get_counterparty_node_id())
}
- /// Force closes a channel, immediately broadcasting the latest local commitment transaction to
- /// the chain and rejecting new HTLCs on the given channel. Fails if `channel_id` is unknown to
- /// the manager, or if the `counterparty_node_id` isn't the counterparty of the corresponding
- /// channel.
- pub fn force_close_channel(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey) -> Result<(), APIError> {
+ fn force_close_sending_error(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, broadcast: bool) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- match self.force_close_channel_with_peer(channel_id, counterparty_node_id, None) {
+ match self.force_close_channel_with_peer(channel_id, counterparty_node_id, None, broadcast) {
Ok(counterparty_node_id) => {
self.channel_state.lock().unwrap().pending_msg_events.push(
events::MessageSendEvent::HandleError {
}
}
+ /// Force closes a channel, immediately broadcasting the latest local transaction(s) and
+ /// rejecting new HTLCs on the given channel. Fails if `channel_id` is unknown to
+ /// the manager, or if the `counterparty_node_id` isn't the counterparty of the corresponding
+ /// channel.
+ pub fn force_close_broadcasting_latest_txn(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey)
+ -> Result<(), APIError> {
+ self.force_close_sending_error(channel_id, counterparty_node_id, true)
+ }
+
+ /// Force closes a channel, rejecting new HTLCs on the given channel but skips broadcasting
+ /// the latest local transaction(s). Fails if `channel_id` is unknown to the manager, or if the
+ /// `counterparty_node_id` isn't the counterparty of the corresponding channel.
+ ///
+ /// You can always get the latest local transaction(s) to broadcast from
+ /// [`ChannelMonitor::get_latest_holder_commitment_txn`].
+ pub fn force_close_without_broadcasting_txn(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey)
+ -> Result<(), APIError> {
+ self.force_close_sending_error(channel_id, counterparty_node_id, false)
+ }
+
/// Force close all channels, immediately broadcasting the latest local commitment transaction
/// for each to the chain and rejecting new HTLCs on each.
- pub fn force_close_all_channels(&self) {
+ pub fn force_close_all_channels_broadcasting_latest_txn(&self) {
+ for chan in self.list_channels() {
+ let _ = self.force_close_broadcasting_latest_txn(&chan.channel_id, &chan.counterparty.node_id);
+ }
+ }
+
+ /// Force close all channels rejecting new HTLCs on each but without broadcasting the latest
+ /// local transaction(s).
+ pub fn force_close_all_channels_without_broadcasting_txn(&self) {
for chan in self.list_channels() {
- let _ = self.force_close_channel(&chan.channel_id, &chan.counterparty.node_id);
+ let _ = self.force_close_without_broadcasting_txn(&chan.channel_id, &chan.counterparty.node_id);
}
}
for chan in self.list_channels() {
if chan.counterparty.node_id == *counterparty_node_id {
// Untrusted messages from peer, we throw away the error if id points to a non-existent channel
- let _ = self.force_close_channel_with_peer(&chan.channel_id, counterparty_node_id, Some(&msg.data));
+ let _ = self.force_close_channel_with_peer(&chan.channel_id, counterparty_node_id, Some(&msg.data), true);
}
}
} else {
}
// Untrusted messages from peer, we throw away the error if id points to a non-existent channel
- let _ = self.force_close_channel_with_peer(&msg.channel_id, counterparty_node_id, Some(&msg.data));
+ let _ = self.force_close_channel_with_peer(&msg.channel_id, counterparty_node_id, Some(&msg.data), true);
}
}
}
send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
// Simple case with no pending HTLCs:
- nodes[1].node.force_close_channel(&chan_1.2, &nodes[0].node.get_our_node_id()).unwrap();
+ nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &nodes[0].node.get_our_node_id()).unwrap();
check_added_monitors!(nodes[1], 1);
check_closed_broadcast!(nodes[1], true);
{
// Simple case of one pending HTLC to HTLC-Timeout (note that the HTLC-Timeout is not
// broadcasted until we reach the timelock time).
- nodes[1].node.force_close_channel(&chan_2.2, &nodes[2].node.get_our_node_id()).unwrap();
+ nodes[1].node.force_close_broadcasting_latest_txn(&chan_2.2, &nodes[2].node.get_our_node_id()).unwrap();
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
{
// nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2]
// HTLC-Timeout and a nodes[3] claim against it (+ its own announces)
- nodes[2].node.force_close_channel(&chan_3.2, &nodes[3].node.get_our_node_id()).unwrap();
+ nodes[2].node.force_close_broadcasting_latest_txn(&chan_3.2, &nodes[3].node.get_our_node_id()).unwrap();
check_added_monitors!(nodes[2], 1);
check_closed_broadcast!(nodes[2], true);
let node2_commitment_txid;
create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
route_payment(&nodes[0], &[&nodes[1]], 10000000);
- nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
+ nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
// state or updated nodes[1]' state. Now force-close and broadcast that commitment/HTLC
// transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!).
- nodes[2].node.force_close_channel(&payment_event.commitment_msg.channel_id, &nodes[1].node.get_our_node_id()).unwrap();
+ nodes[2].node.force_close_broadcasting_latest_txn(&payment_event.commitment_msg.channel_id, &nodes[1].node.get_our_node_id()).unwrap();
check_closed_broadcast!(nodes[2], true);
check_added_monitors!(nodes[2], 1);
check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000, InitFeatures::known(), InitFeatures::known());
- nodes[1].node.force_close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
+ nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000, InitFeatures::known(), InitFeatures::known());
- nodes[0].node.force_close_channel(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
+ nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
_ => panic!("Unexpected event"),
}
- nodes[1].node.force_close_channel(&temp_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
+ nodes[1].node.force_close_broadcasting_latest_txn(&temp_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
let close_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(close_msg_ev.len(), 1);
let events = nodes[1].node.get_and_clear_pending_events();
match events[0] {
Event::OpenChannelRequest { temporary_channel_id, .. } => {
- nodes[1].node.force_close_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
+ nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
}
_ => panic!("Unexpected event"),
}
force_closing_node = 1;
counterparty_node = 0;
}
- nodes[force_closing_node].node.force_close_channel(&chan_ab.2, &nodes[counterparty_node].node.get_our_node_id()).unwrap();
+ nodes[force_closing_node].node.force_close_broadcasting_latest_txn(&chan_ab.2, &nodes[counterparty_node].node.get_our_node_id()).unwrap();
check_closed_broadcast!(nodes[force_closing_node], true);
check_added_monitors!(nodes[force_closing_node], 1);
check_closed_event!(nodes[force_closing_node], 1, ClosureReason::HolderForceClosed);
nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false);
nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[1].node.force_close_channel(&channel_id, &nodes[2].node.get_our_node_id()).unwrap();
+ nodes[1].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[2].node.get_our_node_id()).unwrap();
check_closed_broadcast!(nodes[1], true);
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
check_added_monitors!(nodes[1], 1);
// Route a payment, but force-close the channel before the HTLC fulfill message arrives at
// nodes[0].
let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 10_000_000);
- nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
+ nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
// We can use the channel immediately, but won't generate a channel_update until we get confs
send_payment(&nodes[0], &[&nodes[1]], 100_000);
- nodes[0].node.force_close_all_channels();
+ nodes[0].node.force_close_all_channels_broadcasting_latest_txn();
check_added_monitors!(nodes[0], 1);
check_closed_event!(&nodes[0], 1, ClosureReason::HolderForceClosed);
let _ = get_err_msg!(nodes[0], nodes[1].node.get_our_node_id());
/// Indicates a request to open a new channel by a peer.
///
/// To accept the request, call [`ChannelManager::accept_inbound_channel`]. To reject the
- /// request, call [`ChannelManager::force_close_channel`].
+ /// request, call [`ChannelManager::force_close_without_broadcasting_txn`].
///
/// The event is only triggered when a new open channel request is received and the
/// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true.
///
/// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel
- /// [`ChannelManager::force_close_channel`]: crate::ln::channelmanager::ChannelManager::force_close_channel
+ /// [`ChannelManager::force_close_without_broadcasting_txn`]: crate::ln::channelmanager::ChannelManager::force_close_without_broadcasting_txn
/// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels
OpenChannelRequest {
/// The temporary channel ID of the channel requested to be opened.
///
/// When responding to the request, the `temporary_channel_id` should be passed
/// back to the ChannelManager through [`ChannelManager::accept_inbound_channel`] to accept,
- /// or through [`ChannelManager::force_close_channel`] to reject.
+ /// or through [`ChannelManager::force_close_without_broadcasting_txn`] to reject.
///
/// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel
- /// [`ChannelManager::force_close_channel`]: crate::ln::channelmanager::ChannelManager::force_close_channel
+ /// [`ChannelManager::force_close_without_broadcasting_txn`]: crate::ln::channelmanager::ChannelManager::force_close_without_broadcasting_txn
temporary_channel_id: [u8; 32],
/// The node_id of the counterparty requesting to open the channel.
///
/// When responding to the request, the `counterparty_node_id` should be passed
/// back to the `ChannelManager` through [`ChannelManager::accept_inbound_channel`] to
- /// accept the request, or through [`ChannelManager::force_close_channel`] to reject the
+ /// accept the request, or through [`ChannelManager::force_close_without_broadcasting_txn`] to reject the
/// request.
///
/// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel
- /// [`ChannelManager::force_close_channel`]: crate::ln::channelmanager::ChannelManager::force_close_channel
+ /// [`ChannelManager::force_close_without_broadcasting_txn`]: crate::ln::channelmanager::ChannelManager::force_close_without_broadcasting_txn
counterparty_node_id: PublicKey,
/// The channel value of the requested channel.
funding_satoshis: u64,