let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
let error_message = "Channel force-closed";
nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
- check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[0].node.get_our_node_id()], 100000);
let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
// Set the store's directory to read-only, which should result in
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
let error_message = "Channel force-closed";
nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
- check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[0].node.get_our_node_id()], 100000);
let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
let update_id = update_map.get(&added_monitors[0].1.channel_id()).unwrap();
// updates.
let error_message = "Channel force-closed";
nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
- check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
F::Target: FeeEstimator,
L::Target: Logger,
{
- let (claimable_outpoints, _) = self.generate_claimable_outpoints_and_watch_outputs(ClosureReason::HolderForceClosed);
+ let (claimable_outpoints, _) = self.generate_claimable_outpoints_and_watch_outputs(ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) });
self.onchain_tx_handler.update_claims_view_from_requests(
claimable_outpoints, self.best_block.height, self.best_block.height, broadcaster,
fee_estimator, logger
/// Closure generated from [`ChannelManager::force_close_channel`], called by the user.
///
/// [`ChannelManager::force_close_channel`]: crate::ln::channelmanager::ChannelManager::force_close_channel.
- HolderForceClosed,
+ HolderForceClosed {
+ /// Whether or not the latest transaction was broadcasted when the channel was force
+ /// closed.
+ ///
+ /// Channels closed using [`ChannelManager::force_close_broadcasting_latest_txn`] will have
+ /// this field set to true, whereas channels closed using [`ChannelManager::force_close_without_broadcasting_txn`]
+ /// or force-closed prior to being funded will have this field set to false.
+ ///
+ /// This will be `None` for objects generated or written by LDK 0.0.123 and
+ /// earlier.
+ ///
+ /// [`ChannelManager::force_close_broadcasting_latest_txn`]: crate::ln::channelmanager::ChannelManager::force_close_broadcasting_latest_txn.
+ /// [`ChannelManager::force_close_without_broadcasting_txn`]: crate::ln::channelmanager::ChannelManager::force_close_without_broadcasting_txn.
+ broadcasted_latest_txn: Option<bool>
+ },
/// The channel was closed after negotiating a cooperative close and we've now broadcasted
/// the cooperative close transaction. Note the shutdown may have been initiated by us.
///
ClosureReason::CounterpartyForceClosed { peer_msg } => {
f.write_fmt(format_args!("counterparty force-closed with message: {}", peer_msg))
},
- ClosureReason::HolderForceClosed => f.write_str("user force-closed the channel"),
+ ClosureReason::HolderForceClosed { broadcasted_latest_txn } => {
+ f.write_str("user force-closed the channel")?;
+ if let Some(brodcasted) = broadcasted_latest_txn {
+ write!(f, " and {} the latest transaction", if *brodcasted { "broadcasted" } else { "did not broadcast" })
+ } else {
+ Ok(())
+ }
+ },
ClosureReason::LegacyCooperativeClosure => f.write_str("the channel was cooperatively closed"),
ClosureReason::CounterpartyInitiatedCooperativeClosure => f.write_str("the channel was cooperatively closed by our peer"),
ClosureReason::LocallyInitiatedCooperativeClosure => f.write_str("the channel was cooperatively closed by us"),
impl_writeable_tlv_based_enum_upgradable!(ClosureReason,
(0, CounterpartyForceClosed) => { (1, peer_msg, required) },
(1, FundingTimedOut) => {},
- (2, HolderForceClosed) => {},
+ (2, HolderForceClosed) => { (1, broadcasted_latest_txn, option) },
(6, CommitmentTxConfirmed) => {},
(4, LegacyCooperativeClosure) => {},
(8, ProcessingError) => { (1, err, required) },
nodes[1].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
check_added_monitors(&nodes[1], 1);
check_closed_broadcast(&nodes[1], 1, true);
- check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100_000);
+ check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, &[nodes[0].node.get_our_node_id()], 100_000);
} else {
// We'll connect blocks until the sender has to go onchain to time out the HTLC.
connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
// PaymentPathFailed event
assert_eq!(nodes[0].node.list_channels().len(), 0);
- check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000);
}
#[test]
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
nodes[1].node.force_close_broadcasting_latest_txn(&chan_id_bc, &nodes[2].node.get_our_node_id(), error_message.to_string()).unwrap();
check_closed_broadcast(&nodes[1], 1, true);
- check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[2].node.get_our_node_id()], 100000);
+ check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, &[nodes[2].node.get_our_node_id()], 100000);
}
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
nodes[1].node.force_close_broadcasting_latest_txn(&chan_id_ab, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
check_closed_broadcast(&nodes[1], 1, true);
- check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000);
+ check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, &[nodes[0].node.get_our_node_id()], 100000);
}
// Now reload node B
let error_message = "Channel force-closed";
nodes[0].node.force_close_broadcasting_latest_txn(&chan_id_ab, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
- check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, &[nodes[1].node.get_our_node_id()], 100000);
let as_closing_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(as_closing_tx.len(), 1);
nodes[0].node.force_close_broadcasting_latest_txn(&chan_id_ab, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
check_added_monitors!(nodes[0], 1);
check_closed_broadcast!(nodes[0], true);
- check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100_000);
+ check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, &[nodes[1].node.get_our_node_id()], 100_000);
let as_closing_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
mine_transaction_without_consistency_checks(&nodes[1], &as_closing_tx[0]);
}
}
} else {
let mut chan_phase = remove_channel_phase!(self, chan_phase_entry);
- shutdown_result = Some(chan_phase.context_mut().force_shutdown(false, ClosureReason::HolderForceClosed));
+ shutdown_result = Some(chan_phase.context_mut().force_shutdown(false, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }));
}
},
hash_map::Entry::Vacant(_) => {
let closure_reason = if let Some(peer_msg) = peer_msg {
ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(peer_msg.to_string()) }
} else {
- ClosureReason::HolderForceClosed
+ ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(broadcast) }
};
let logger = WithContext::from(&self.logger, Some(*peer_node_id), Some(*channel_id), None);
if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(channel_id.clone()) {
log_error!(logger,
"Force-closing pending channel with ID {} for not establishing in a timely manner", chan_id);
update_maps_on_chan_removal!(self, &context);
- shutdown_channels.push(context.force_shutdown(false, ClosureReason::HolderForceClosed));
+ shutdown_channels.push(context.force_shutdown(false, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }));
pending_msg_events.push(MessageSendEvent::HandleError {
node_id: counterparty_node_id,
action: msgs::ErrorAction::SendErrorMessage {
let reason = if let MonitorEvent::HolderForceClosedWithInfo { reason, .. } = monitor_event {
reason
} else {
- ClosureReason::HolderForceClosed
+ ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }
};
failed_channels.push(chan.context.force_shutdown(false, reason.clone()));
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
nodes[0].node.force_close_channel_with_peer(&chan.2, &nodes[1].node.get_our_node_id(), None, true).unwrap();
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000);
// Confirm that the channel_update was not sent immediately to node[1] but was cached.
let node_1_events = nodes[1].node.get_and_clear_pending_msg_events();
nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000);
{
// Assert that nodes[1] is awaiting removal for nodes[0] once nodes[1] has been
nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
check_closed_broadcast(&nodes[0], 1, true);
check_added_monitors(&nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000);
{
let txn = nodes[0].tx_broadcaster.txn_broadcast();
assert_eq!(txn.len(), 1);
nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
check_added_monitors!(nodes[1], 1);
check_closed_broadcast!(nodes[1], true);
- check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[0].node.get_our_node_id()], 100000);
{
let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE);
assert_eq!(node_txn.len(), 1);
check_closed_broadcast!(nodes[2], true);
assert_eq!(nodes[1].node.list_channels().len(), 0);
assert_eq!(nodes[2].node.list_channels().len(), 1);
- check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[2].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[2].node.get_our_node_id()], 100000);
check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
macro_rules! claim_funds {
check_closed_broadcast!(nodes[3], true);
assert_eq!(nodes[2].node.list_channels().len(), 0);
assert_eq!(nodes[3].node.list_channels().len(), 1);
- check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed, [nodes[3].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[3].node.get_our_node_id()], 100000);
check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
// Drop the ChannelMonitor for the previous channel to avoid it broadcasting transactions and
connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000);
let node_txn = nodes[0].tx_broadcaster.unique_txn_broadcast();
assert_eq!(node_txn.len(), 2);
nodes[2].node.force_close_broadcasting_latest_txn(&payment_event.commitment_msg.channel_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
check_closed_broadcast!(nodes[2], true);
check_added_monitors!(nodes[2], 1);
- check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000);
let commitment_tx = {
let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
// Note that we don't bother broadcasting the HTLC-Success transaction here as we don't
nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[0].node.get_our_node_id()], 100000);
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
assert_eq!(node_txn.len(), 1);
check_spends!(node_txn[0], chan.3);
nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000);
let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(node_txn.len(), 1);
nodes[force_closing_node].node.force_close_broadcasting_latest_txn(&chan_ab.2, &nodes[counterparty_node].node.get_our_node_id(), error_message.to_string()).unwrap();
check_closed_broadcast!(nodes[force_closing_node], true);
check_added_monitors!(nodes[force_closing_node], 1);
- check_closed_event!(nodes[force_closing_node], 1, ClosureReason::HolderForceClosed, [nodes[counterparty_node].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[force_closing_node], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[counterparty_node].node.get_our_node_id()], 100000);
if go_onchain_before_fulfill {
let txn_to_broadcast = match broadcast_alice {
true => alice_txn.clone(),
let error_message = "Channel force-closed";
nodes[1].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[2].node.get_our_node_id(), error_message.to_string()).unwrap();
check_closed_broadcast!(nodes[1], true);
- check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[2].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[2].node.get_our_node_id()], 100000);
check_added_monitors!(nodes[1], 1);
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(node_txn.len(), 1);
},
_ => panic!("Unexpected event"),
}
- check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, false, &[nodes[1].node.get_our_node_id()], 100000);
}
#[test]
},
_ => panic!("Unexpected event"),
}
- check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000);
+ check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, false, &[nodes[0].node.get_our_node_id()], 100000);
}
#[test]
// Since we disconnected from peer and did not connect back within time,
// we should have forced-closed the channel by now.
- check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, [nodes[1].node.get_our_node_id()], 100000);
assert_eq!(nodes[0].node.list_channels().len(), 0);
{
_ => panic!("Unexpected event"),
}
check_added_monitors(closing_node, 1);
- check_closed_event(closing_node, 1, ClosureReason::HolderForceClosed, false, &[other_node.node.get_our_node_id()], 1_000_000);
+ check_closed_event(closing_node, 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, &[other_node.node.get_our_node_id()], 1_000_000);
let commitment_tx = {
let mut txn = closing_node.tx_broadcaster.txn_broadcast();
assert_eq!(nodes[0].chain_monitor.chain_monitor.list_monitors().len(), 1);
// First archive should set balances_empty_height to current block height
- nodes[0].chain_monitor.chain_monitor.archive_fully_resolved_channel_monitors();
+ nodes[0].chain_monitor.chain_monitor.archive_fully_resolved_channel_monitors();
assert_eq!(nodes[0].chain_monitor.chain_monitor.list_monitors().len(), 1);
connect_blocks(&nodes[0], 4032);
// Second call after 4032 blocks, should archive the monitor
nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
check_added_monitors!(nodes[0], 1);
check_closed_broadcast!(nodes[0], true);
- check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 1000000);
+ check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 1000000);
let commitment_tx = {
let mut txn = nodes[0].tx_broadcaster.unique_txn_broadcast();
assert_eq!(txn.len(), 1);
nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
check_added_monitors(&nodes[0], 1);
check_closed_broadcast(&nodes[0], 1, true);
- check_closed_event!(&nodes[0], 1, ClosureReason::HolderForceClosed, false,
+ check_closed_event!(&nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false,
[nodes[1].node.get_our_node_id()], 100000);
let commitment_tx = {
nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
check_added_monitors(&nodes[0], 1);
check_closed_broadcast(&nodes[0], 1, true);
- check_closed_event!(&nodes[0], 1, ClosureReason::HolderForceClosed, false,
+ check_closed_event!(&nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false,
[nodes[1].node.get_our_node_id()], 100000);
let commitment_tx = {
nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000);
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
pass_failed_payment_back(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_hash, PaymentFailureReason::RecipientRejected);
} else {
nodes[1].node.force_close_broadcasting_latest_txn(&chan_bd, &nodes[3].node.get_our_node_id(), error_message.to_string()).unwrap();
- check_closed_event!(&nodes[1], 1, ClosureReason::HolderForceClosed, false,
+ check_closed_event!(&nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false,
[nodes[3].node.get_our_node_id()], 1000000);
check_closed_broadcast(&nodes[1], 1, true);
let bs_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
nodes[0].node.force_close_all_channels_broadcasting_latest_txn(error_message.to_string());
check_added_monitors!(nodes[0], 1);
- check_closed_event!(&nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(&nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000);
let _ = get_err_msg(&nodes[0], &nodes[1].node.get_our_node_id());
}
nodes[0].node.force_close_without_broadcasting_txn(&chan.2, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 1000000);
+ check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, [nodes[1].node.get_our_node_id()], 1000000);
{
let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn.len(), 0);
assert_eq!(cs_commitment_tx.len(), if claim_htlc { 2 } else { 1 });
check_added_monitors!(nodes[2], 1);
- check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000);
check_closed_broadcast!(nodes[2], true);
let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id_1).encode();
nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
check_closed_broadcast(&nodes[0], 1, true);
check_added_monitors(&nodes[0], 1);
- check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, &[nodes[1].node.get_our_node_id()], 100000);
nodes[1].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
check_closed_broadcast(&nodes[1], 1, true);
check_added_monitors(&nodes[1], 1);
- check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000);
+ check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, &[nodes[0].node.get_our_node_id()], 100000);
let mut txn = nodes[0].tx_broadcaster.txn_broadcast();
assert_eq!(txn.len(), 1);
nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
check_closed_broadcast(&nodes[0], 1, true);
check_added_monitors(&nodes[0], 1);
- check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, &[nodes[1].node.get_our_node_id()], 100000);
let mut txn = nodes[0].tx_broadcaster.txn_broadcast();
assert_eq!(txn.len(), 1);
nodes[1].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
check_closed_broadcast(&nodes[1], 1, true);
check_added_monitors(&nodes[1], 1);
- check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100_000);
+ check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, &[nodes[0].node.get_our_node_id()], 100_000);
let commitment_b = {
let mut txn = nodes[1].tx_broadcaster.txn_broadcast();
let _open_chan = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
nodes[0].node.close_channel(&chan_id, &nodes[1].node.get_our_node_id()).unwrap();
- check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 1_000_000);
+ check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, [nodes[1].node.get_our_node_id()], 1_000_000);
}
#[test]
assert!(nodes[1].node.list_channels().is_empty());
check_closed_broadcast!(nodes[0], true);
check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
- check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[0].node.get_our_node_id()], 100000);
}
#[test]
let error_message = "Channel force-closed";
nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
- check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, &[nodes[1].node.get_our_node_id()], 100000);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
let error_message = "Channel force-closed";
nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
- check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000);
+ check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, &[nodes[0].node.get_our_node_id()], 100000);
{
let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
let cmu_map = nodes[1].chain_monitor.monitor_updates.lock().unwrap();
// Force close.
let error_message = "Channel force-closed";
nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
- check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, &[nodes[1].node.get_our_node_id()], 100000);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);