}
// Force all channels onto the chain (and time out claim txn)
- self.manager.force_close_all_channels();
+ self.manager.force_close_all_channels_broadcasting_latest_txn();
}
}
}
let channel_id = get_slice!(1)[0] as usize;
if channel_id >= channels.len() { return; }
channels.sort_by(|a, b| { a.channel_id.cmp(&b.channel_id) });
- channelmanager.force_close_channel(&channels[channel_id].channel_id, &channels[channel_id].counterparty.node_id).unwrap();
+ channelmanager.force_close_broadcasting_latest_txn(&channels[channel_id].channel_id, &channels[channel_id].counterparty.node_id).unwrap();
},
// 15 is above
_ => return,
}
// Force-close the channel.
- nodes[0].node.force_close_channel(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();
+ nodes[0].node.force_close_broadcasting_latest_txn(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();
// Check that the force-close updates are persisted.
check_persisted_data!(nodes[0].node, filepath.clone());
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
// Force close the channel and check that the SpendableOutputs event was handled.
- nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
+ nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
let commitment_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap();
confirm_transaction_depth(&mut nodes[0], &commitment_tx, BREAKDOWN_TIMEOUT as u32);
let event = receiver
// Force close because cooperative close doesn't result in any persisted
// updates.
- nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
+ nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
- nodes[1].node.force_close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
+ nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
- nodes[1].node.force_close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
+ nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
}
// ...and make sure we can force-close a frozen channel
- nodes[0].node.force_close_channel(&channel_id, &nodes[1].node.get_our_node_id()).unwrap();
+ nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[1].node.get_our_node_id()).unwrap();
check_added_monitors!(nodes[0], 1);
check_closed_broadcast!(nodes[0], true);
Ignore(String),
Warn(String),
Close(String),
- CloseDelayBroadcast(String),
}
impl fmt::Debug for ChannelError {
&ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
&ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
&ChannelError::Close(ref e) => write!(f, "Close : {}", e),
- &ChannelError::CloseDelayBroadcast(ref e) => write!(f, "CloseDelayBroadcast : {}", e)
}
}
}
/// May panic if some calls other than message-handling calls (which will all Err immediately)
/// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
+ ///
+ /// Some links printed in log lines are included here to check them during build (when run with
+ /// `cargo doc --document-private-items`):
+ /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
+ /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
pub fn channel_reestablish<L: Deref>(&mut self, msg: &msgs::ChannelReestablish, logger: &L,
node_pk: PublicKey, genesis_block_hash: BlockHash, best_block: &BestBlock)
-> Result<ReestablishResponses, ChannelError> where L::Target: Logger {
return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
}
if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.cur_holder_commitment_transaction_number {
- return Err(ChannelError::CloseDelayBroadcast(
- "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can't do any automated broadcasting".to_owned()
- ));
+ macro_rules! log_and_panic {
+ ($err_msg: expr) => {
+ log_error!(logger, $err_msg, log_bytes!(self.channel_id), log_pubkey!(self.counterparty_node_id));
+ panic!($err_msg, log_bytes!(self.channel_id), log_pubkey!(self.counterparty_node_id));
+ }
+ }
+ log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
+ This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
+ More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
+ If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
+ ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
+ ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
+ Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
+ See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
}
},
OptionalField::Absent => {}
// now!
match self.free_holding_cell_htlcs(logger) {
Err(ChannelError::Close(msg)) => Err(ChannelError::Close(msg)),
- Err(ChannelError::Warn(_)) | Err(ChannelError::Ignore(_)) | Err(ChannelError::CloseDelayBroadcast(_)) =>
+ Err(ChannelError::Warn(_)) | Err(ChannelError::Ignore(_)) =>
panic!("Got non-channel-failing result from free_holding_cell_htlcs"),
Ok((Some((commitment_update, monitor_update)), holding_cell_failed_htlcs)) => {
Ok(ReestablishResponses {
},
},
},
- ChannelError::CloseDelayBroadcast(msg) => LightningError {
- err: msg.clone(),
- action: msgs::ErrorAction::SendErrorMessage {
- msg: msgs::ErrorMessage {
- channel_id,
- data: msg
- },
- },
- },
},
chan_id: None,
shutdown_finish: None,
(true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.get_user_id(),
shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
},
- ChannelError::CloseDelayBroadcast(msg) => {
- log_error!($self.logger, "Channel {} need to be shutdown but closing transactions not broadcast due to {}", log_bytes!($channel_id[..]), msg);
- update_maps_on_chan_removal!($self, $short_to_id, $channel);
- let shutdown_res = $channel.force_shutdown(false);
- (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.get_user_id(),
- shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
- }
}
}
}
/// `peer_msg` should be set when we receive a message from a peer, but not set when the
/// user closes, which will be re-exposed as the `ChannelClosed` reason.
- fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: &PublicKey, peer_msg: Option<&String>) -> Result<PublicKey, APIError> {
+ fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: &PublicKey, peer_msg: Option<&String>, broadcast: bool)
+ -> Result<PublicKey, APIError> {
let mut chan = {
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
}
};
log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
- self.finish_force_close_channel(chan.force_shutdown(true));
+ self.finish_force_close_channel(chan.force_shutdown(broadcast));
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
let mut channel_state = self.channel_state.lock().unwrap();
channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
Ok(chan.get_counterparty_node_id())
}
- /// Force closes a channel, immediately broadcasting the latest local commitment transaction to
- /// the chain and rejecting new HTLCs on the given channel. Fails if `channel_id` is unknown to
- /// the manager, or if the `counterparty_node_id` isn't the counterparty of the corresponding
- /// channel.
- pub fn force_close_channel(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey) -> Result<(), APIError> {
+ fn force_close_sending_error(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, broadcast: bool) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- match self.force_close_channel_with_peer(channel_id, counterparty_node_id, None) {
+ match self.force_close_channel_with_peer(channel_id, counterparty_node_id, None, broadcast) {
Ok(counterparty_node_id) => {
self.channel_state.lock().unwrap().pending_msg_events.push(
events::MessageSendEvent::HandleError {
}
}
+ /// Force closes a channel, immediately broadcasting the latest local transaction(s) and
+ /// rejecting new HTLCs on the given channel. Fails if `channel_id` is unknown to
+ /// the manager, or if the `counterparty_node_id` isn't the counterparty of the corresponding
+ /// channel.
+ pub fn force_close_broadcasting_latest_txn(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey)
+ -> Result<(), APIError> {
+ self.force_close_sending_error(channel_id, counterparty_node_id, true)
+ }
+
+ /// Force closes a channel, rejecting new HTLCs on the given channel but skips broadcasting
+ /// the latest local transaction(s). Fails if `channel_id` is unknown to the manager, or if the
+ /// `counterparty_node_id` isn't the counterparty of the corresponding channel.
+ ///
+ /// You can always get the latest local transaction(s) to broadcast from
+ /// [`ChannelMonitor::get_latest_holder_commitment_txn`].
+ pub fn force_close_without_broadcasting_txn(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey)
+ -> Result<(), APIError> {
+ self.force_close_sending_error(channel_id, counterparty_node_id, false)
+ }
+
/// Force close all channels, immediately broadcasting the latest local commitment transaction
/// for each to the chain and rejecting new HTLCs on each.
- pub fn force_close_all_channels(&self) {
+ pub fn force_close_all_channels_broadcasting_latest_txn(&self) {
+ for chan in self.list_channels() {
+ let _ = self.force_close_broadcasting_latest_txn(&chan.channel_id, &chan.counterparty.node_id);
+ }
+ }
+
+ /// Force close all channels rejecting new HTLCs on each but without broadcasting the latest
+ /// local transaction(s).
+ pub fn force_close_all_channels_without_broadcasting_txn(&self) {
for chan in self.list_channels() {
- let _ = self.force_close_channel(&chan.channel_id, &chan.counterparty.node_id);
+ let _ = self.force_close_without_broadcasting_txn(&chan.channel_id, &chan.counterparty.node_id);
}
}
// ChannelClosed event is generated by handle_error for us.
Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel.channel_id(), channel.get_user_id(), channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok()))
},
- ChannelError::CloseDelayBroadcast(_) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); }
};
handle_errors.push((counterparty_node_id, err));
continue;
for chan in self.list_channels() {
if chan.counterparty.node_id == *counterparty_node_id {
// Untrusted messages from peer, we throw away the error if id points to a non-existent channel
- let _ = self.force_close_channel_with_peer(&chan.channel_id, counterparty_node_id, Some(&msg.data));
+ let _ = self.force_close_channel_with_peer(&chan.channel_id, counterparty_node_id, Some(&msg.data), true);
}
}
} else {
}
// Untrusted messages from peer, we throw away the error if id points to a non-existent channel
- let _ = self.force_close_channel_with_peer(&msg.channel_id, counterparty_node_id, Some(&msg.data));
+ let _ = self.force_close_channel_with_peer(&msg.channel_id, counterparty_node_id, Some(&msg.data), true);
}
}
}
send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
// Simple case with no pending HTLCs:
- nodes[1].node.force_close_channel(&chan_1.2, &nodes[0].node.get_our_node_id()).unwrap();
+ nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &nodes[0].node.get_our_node_id()).unwrap();
check_added_monitors!(nodes[1], 1);
check_closed_broadcast!(nodes[1], true);
{
// Simple case of one pending HTLC to HTLC-Timeout (note that the HTLC-Timeout is not
// broadcasted until we reach the timelock time).
- nodes[1].node.force_close_channel(&chan_2.2, &nodes[2].node.get_our_node_id()).unwrap();
+ nodes[1].node.force_close_broadcasting_latest_txn(&chan_2.2, &nodes[2].node.get_our_node_id()).unwrap();
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
{
// nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2]
// HTLC-Timeout and a nodes[3] claim against it (+ its own announces)
- nodes[2].node.force_close_channel(&chan_3.2, &nodes[3].node.get_our_node_id()).unwrap();
+ nodes[2].node.force_close_broadcasting_latest_txn(&chan_3.2, &nodes[3].node.get_our_node_id()).unwrap();
check_added_monitors!(nodes[2], 1);
check_closed_broadcast!(nodes[2], true);
let node2_commitment_txid;
create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
route_payment(&nodes[0], &[&nodes[1]], 10000000);
- nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
+ nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
// state or updated nodes[1]' state. Now force-close and broadcast that commitment/HTLC
// transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!).
- nodes[2].node.force_close_channel(&payment_event.commitment_msg.channel_id, &nodes[1].node.get_our_node_id()).unwrap();
+ nodes[2].node.force_close_broadcasting_latest_txn(&payment_event.commitment_msg.channel_id, &nodes[1].node.get_our_node_id()).unwrap();
check_closed_broadcast!(nodes[2], true);
check_added_monitors!(nodes[2], 1);
check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000, InitFeatures::known(), InitFeatures::known());
- nodes[1].node.force_close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
+ nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000, InitFeatures::known(), InitFeatures::known());
- nodes[0].node.force_close_channel(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
+ nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
} else { assert!(false); }
}
-#[test]
-fn test_data_loss_protect() {
- // We want to be sure that :
- // * we don't broadcast our Local Commitment Tx in case of fallen behind
- // (but this is not quite true - we broadcast during Drop because chanmon is out of sync with chanmgr)
- // * we close channel in case of detecting other being fallen behind
- // * we are able to claim our own outputs thanks to to_remote being static
- // TODO: this test is incomplete and the data_loss_protect implementation is incomplete - see issue #775
+fn do_test_data_loss_protect(reconnect_panicing: bool) {
+ // When we get a data_loss_protect proving we're behind, we immediately panic as the
+ // chain::Watch API requirements have been violated (e.g. the user restored from a backup). The
+ // panic message informs the user they should force-close without broadcasting, which is tested
+ // if `reconnect_panicing` is not set.
let persister;
let logger;
let fee_estimator;
check_added_monitors!(nodes[0], 1);
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
+ if reconnect_panicing {
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
- let reestablish_0 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
+ let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
- // Check we don't broadcast any transactions following learning of per_commitment_point from B
- nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_0[0]);
- check_added_monitors!(nodes[0], 1);
+ // Check we close channel detecting A is fallen-behind
+ // Check that we sent the warning message when we detected that A has fallen behind,
+ // and give the possibility for A to recover from the warning.
+ nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
+ let warn_msg = "Peer attempted to reestablish channel with a very old local commitment transaction".to_owned();
+ assert!(check_warn_msg!(nodes[1], nodes[0].node.get_our_node_id(), chan.2).contains(&warn_msg));
+
+ {
+ let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
+ // The node B should not broadcast the transaction to force close the channel!
+ assert!(node_txn.is_empty());
+ }
+
+ let reestablish_0 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
+ // Check A panics upon seeing proof it has fallen behind.
+ nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_0[0]);
+ return; // By this point we should have panic'ed!
+ }
+ nodes[0].node.force_close_without_broadcasting_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
{
- let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
+ let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn.len(), 0);
}
- let mut reestablish_1 = Vec::with_capacity(1);
for msg in nodes[0].node.get_and_clear_pending_msg_events() {
- if let MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } = msg {
- assert_eq!(*node_id, nodes[1].node.get_our_node_id());
- reestablish_1.push(msg.clone());
- } else if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg {
+ if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg {
} else if let MessageSendEvent::HandleError { ref action, .. } = msg {
match action {
&ErrorAction::SendErrorMessage { ref msg } => {
- assert_eq!(msg.data, "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can't do any automated broadcasting");
+ assert_eq!(msg.data, "Channel force-closed");
},
_ => panic!("Unexpected event!"),
}
} else {
- panic!("Unexpected event")
+ panic!("Unexpected event {:?}", msg)
}
}
- // Check we close channel detecting A is fallen-behind
- // Check that we sent the warning message when we detected that A has fallen behind,
- // and give the possibility for A to recover from the warning.
- nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
- let warn_msg = "Peer attempted to reestablish channel with a very old local commitment transaction".to_owned();
- assert!(check_warn_msg!(nodes[1], nodes[0].node.get_our_node_id(), chan.2).contains(&warn_msg));
-
- // Check A is able to claim to_remote output
- let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
- // The node B should not broadcast the transaction to force close the channel!
- assert!(node_txn.is_empty());
- // B should now detect that there is something wrong and should force close the channel.
- let exp_err = "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can\'t do any automated broadcasting";
- check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: exp_err.to_string() });
-
// after the warning message sent by B, we should not able to
// use the channel, or reconnect with success to the channel.
assert!(nodes[0].node.list_usable_channels().is_empty());
check_closed_broadcast!(nodes[1], false);
}
+#[test]
+#[should_panic]
+fn test_data_loss_protect_showing_stale_state_panics() {
+ do_test_data_loss_protect(true);
+}
+
+#[test]
+fn test_force_close_without_broadcast() {
+ do_test_data_loss_protect(false);
+}
+
#[test]
fn test_check_htlc_underpaying() {
// Send payment through A -> B but A is maliciously
_ => panic!("Unexpected event"),
}
- nodes[1].node.force_close_channel(&temp_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
+ nodes[1].node.force_close_broadcasting_latest_txn(&temp_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
let close_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(close_msg_ev.len(), 1);
let events = nodes[1].node.get_and_clear_pending_events();
match events[0] {
Event::OpenChannelRequest { temporary_channel_id, .. } => {
- nodes[1].node.force_close_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
+ nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
}
_ => panic!("Unexpected event"),
}
force_closing_node = 1;
counterparty_node = 0;
}
- nodes[force_closing_node].node.force_close_channel(&chan_ab.2, &nodes[counterparty_node].node.get_our_node_id()).unwrap();
+ nodes[force_closing_node].node.force_close_broadcasting_latest_txn(&chan_ab.2, &nodes[counterparty_node].node.get_our_node_id()).unwrap();
check_closed_broadcast!(nodes[force_closing_node], true);
check_added_monitors!(nodes[force_closing_node], 1);
check_closed_event!(nodes[force_closing_node], 1, ClosureReason::HolderForceClosed);
nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false);
nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[1].node.force_close_channel(&channel_id, &nodes[2].node.get_our_node_id()).unwrap();
+ nodes[1].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[2].node.get_our_node_id()).unwrap();
check_closed_broadcast!(nodes[1], true);
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
check_added_monitors!(nodes[1], 1);
// Route a payment, but force-close the channel before the HTLC fulfill message arrives at
// nodes[0].
let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 10_000_000);
- nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
+ nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
// We can use the channel immediately, but won't generate a channel_update until we get confs
send_payment(&nodes[0], &[&nodes[1]], 100_000);
- nodes[0].node.force_close_all_channels();
+ nodes[0].node.force_close_all_channels_broadcasting_latest_txn();
check_added_monitors!(nodes[0], 1);
check_closed_event!(&nodes[0], 1, ClosureReason::HolderForceClosed);
let _ = get_err_msg!(nodes[0], nodes[1].node.get_our_node_id());
/// Indicates a request to open a new channel by a peer.
///
/// To accept the request, call [`ChannelManager::accept_inbound_channel`]. To reject the
- /// request, call [`ChannelManager::force_close_channel`].
+ /// request, call [`ChannelManager::force_close_without_broadcasting_txn`].
///
/// The event is only triggered when a new open channel request is received and the
/// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true.
///
/// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel
- /// [`ChannelManager::force_close_channel`]: crate::ln::channelmanager::ChannelManager::force_close_channel
+ /// [`ChannelManager::force_close_without_broadcasting_txn`]: crate::ln::channelmanager::ChannelManager::force_close_without_broadcasting_txn
/// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels
OpenChannelRequest {
/// The temporary channel ID of the channel requested to be opened.
///
/// When responding to the request, the `temporary_channel_id` should be passed
/// back to the ChannelManager through [`ChannelManager::accept_inbound_channel`] to accept,
- /// or through [`ChannelManager::force_close_channel`] to reject.
+ /// or through [`ChannelManager::force_close_without_broadcasting_txn`] to reject.
///
/// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel
- /// [`ChannelManager::force_close_channel`]: crate::ln::channelmanager::ChannelManager::force_close_channel
+ /// [`ChannelManager::force_close_without_broadcasting_txn`]: crate::ln::channelmanager::ChannelManager::force_close_without_broadcasting_txn
temporary_channel_id: [u8; 32],
/// The node_id of the counterparty requesting to open the channel.
///
/// When responding to the request, the `counterparty_node_id` should be passed
/// back to the `ChannelManager` through [`ChannelManager::accept_inbound_channel`] to
- /// accept the request, or through [`ChannelManager::force_close_channel`] to reject the
+ /// accept the request, or through [`ChannelManager::force_close_without_broadcasting_txn`] to reject the
/// request.
///
/// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel
- /// [`ChannelManager::force_close_channel`]: crate::ln::channelmanager::ChannelManager::force_close_channel
+ /// [`ChannelManager::force_close_without_broadcasting_txn`]: crate::ln::channelmanager::ChannelManager::force_close_without_broadcasting_txn
counterparty_node_id: PublicKey,
/// The channel value of the requested channel.
funding_satoshis: u64,