/// much smaller than a full [`ChannelMonitor`]. However, for large single commitment transaction
/// updates (e.g. ones during which there are hundreds of HTLCs pending on the commitment
/// transaction), a single update may reach upwards of 1 MiB in serialized size.
-#[cfg_attr(any(test, fuzzing, feature = "_test_utils"), derive(PartialEq, Eq))]
-#[derive(Clone)]
+#[derive(Clone, PartialEq, Eq)]
#[must_use]
pub struct ChannelMonitorUpdate {
pub(crate) updates: Vec<ChannelMonitorUpdateStep>,
);
-#[cfg_attr(any(test, fuzzing, feature = "_test_utils"), derive(PartialEq, Eq))]
-#[derive(Clone)]
+#[derive(Clone, PartialEq, Eq)]
pub(crate) enum ChannelMonitorUpdateStep {
LatestHolderCommitmentTXInfo {
commitment_tx: HolderCommitmentTransaction,
{
log_info!(logger, "Applying update to monitor {}, bringing update_id from {} to {} with {} changes.",
log_funding_info!(self), self.latest_update_id, updates.update_id, updates.updates.len());
- // ChannelMonitor updates may be applied after force close if we receive a
- // preimage for a broadcasted commitment transaction HTLC output that we'd
- // like to claim on-chain. If this is the case, we no longer have guaranteed
- // access to the monitor's update ID, so we use a sentinel value instead.
+ // ChannelMonitor updates may be applied after force close if we receive a preimage for a
+ // broadcasted commitment transaction HTLC output that we'd like to claim on-chain. If this
+ // is the case, we no longer have guaranteed access to the monitor's update ID, so we use a
+ // sentinel value instead.
+ //
+ // The `ChannelManager` may also queue redundant `ChannelForceClosed` updates if it still
+ // thinks the channel needs to have its commitment transaction broadcast, so we'll allow
+ // them as well.
if updates.update_id == CLOSED_CHANNEL_UPDATE_ID {
assert_eq!(updates.updates.len(), 1);
match updates.updates[0] {
let mut id_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
let mut channel_closures = Vec::new();
+ let mut pending_background_events = Vec::new();
for _ in 0..channel_count {
let mut channel: Channel<<SP::Target as SignerProvider>::Signer> = Channel::read(reader, (
&args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config)
log_error!(args.logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast.");
log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id());
- let (_, mut new_failed_htlcs) = channel.force_shutdown(true);
+ let (monitor_update, mut new_failed_htlcs) = channel.force_shutdown(true);
+ if let Some(monitor_update) = monitor_update {
+ pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate(monitor_update));
+ }
failed_htlcs.append(&mut new_failed_htlcs);
- monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger);
channel_closures.push(events::Event::ChannelClosed {
channel_id: channel.channel_id(),
user_channel_id: channel.get_user_id(),
}
}
- for (funding_txo, monitor) in args.channel_monitors.iter_mut() {
+ for (funding_txo, _) in args.channel_monitors.iter() {
if !funding_txo_set.contains(funding_txo) {
- log_info!(args.logger, "Broadcasting latest holder commitment transaction for closed channel {}", log_bytes!(funding_txo.to_channel_id()));
- monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger);
+ let monitor_update = ChannelMonitorUpdate {
+ update_id: CLOSED_CHANNEL_UPDATE_ID,
+ updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }],
+ };
+ pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate((*funding_txo, monitor_update)));
}
}
}
let background_event_count: u64 = Readable::read(reader)?;
- let mut pending_background_events_read: Vec<BackgroundEvent> = Vec::with_capacity(cmp::min(background_event_count as usize, MAX_ALLOC_SIZE/mem::size_of::<BackgroundEvent>()));
for _ in 0..background_event_count {
match <u8 as Readable>::read(reader)? {
- 0 => pending_background_events_read.push(BackgroundEvent::ClosingMonitorUpdate((Readable::read(reader)?, Readable::read(reader)?))),
+ 0 => {
+ let (funding_txo, monitor_update): (OutPoint, ChannelMonitorUpdate) = (Readable::read(reader)?, Readable::read(reader)?);
+ if pending_background_events.iter().find(|e| {
+ let BackgroundEvent::ClosingMonitorUpdate((pending_funding_txo, pending_monitor_update)) = e;
+ *pending_funding_txo == funding_txo && *pending_monitor_update == monitor_update
+ }).is_none() {
+ pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate((funding_txo, monitor_update)));
+ }
+ }
_ => return Err(DecodeError::InvalidValue),
}
}
per_peer_state: FairRwLock::new(per_peer_state),
pending_events: Mutex::new(pending_events_read),
- pending_background_events: Mutex::new(pending_background_events_read),
+ pending_background_events: Mutex::new(pending_background_events),
total_consistency_lock: RwLock::new(()),
persistence_notifier: Notifier::new(),
let chan_a = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 20_000_000);
let chan_b = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 20_000_000);
+ // Serialize Bob with the initial state of both channels, which we'll use later.
+ let bob_serialized = nodes[1].node.encode();
+
// Route two payments for each channel from Alice to Bob to lock in the HTLCs.
let payment_a = route_payment(&nodes[0], &[&nodes[1]], 50_000_000);
let payment_b = route_payment(&nodes[0], &[&nodes[1]], 50_000_000);
let payment_c = route_payment(&nodes[0], &[&nodes[1]], 50_000_000);
let payment_d = route_payment(&nodes[0], &[&nodes[1]], 50_000_000);
- // Serialize Bob with the HTLCs locked in. We'll restart Bob later on with the state at this
- // point such that he broadcasts a revoked commitment transaction.
- let bob_serialized = nodes[1].node.encode();
+ // Serialize Bob's monitors with the HTLCs locked in. We'll restart Bob later on with the state
+ // at this point such that he broadcasts a revoked commitment transaction with the HTLCs
+ // present.
let bob_serialized_monitor_a = get_monitor!(nodes[1], chan_a.2).encode();
let bob_serialized_monitor_b = get_monitor!(nodes[1], chan_b.2).encode();
}
}
- // Bob force closes by broadcasting his revoked state for each channel.
- nodes[1].node.force_close_broadcasting_latest_txn(&chan_a.2, &nodes[0].node.get_our_node_id()).unwrap();
- check_added_monitors(&nodes[1], 1);
- check_closed_broadcast(&nodes[1], 1, true);
- check_closed_event!(&nodes[1], 1, ClosureReason::HolderForceClosed);
- let revoked_commitment_a = {
- let mut txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
- assert_eq!(txn.len(), 1);
- let revoked_commitment = txn.pop().unwrap();
- assert_eq!(revoked_commitment.output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs
- check_spends!(revoked_commitment, chan_a.3);
- revoked_commitment
- };
- nodes[1].node.force_close_broadcasting_latest_txn(&chan_b.2, &nodes[0].node.get_our_node_id()).unwrap();
- check_added_monitors(&nodes[1], 1);
- check_closed_broadcast(&nodes[1], 1, true);
- check_closed_event!(&nodes[1], 1, ClosureReason::HolderForceClosed);
- let revoked_commitment_b = {
- let mut txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
- assert_eq!(txn.len(), 1);
- let revoked_commitment = txn.pop().unwrap();
- assert_eq!(revoked_commitment.output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs
- check_spends!(revoked_commitment, chan_b.3);
- revoked_commitment
+ // Bob force closes by restarting with the outdated state, prompting the ChannelMonitors to
+ // broadcast the latest commitment transaction known to them, which in our case is the one with
+ // the HTLCs still pending.
+ nodes[1].node.timer_tick_occurred();
+ check_added_monitors(&nodes[1], 2);
+ check_closed_event!(&nodes[1], 2, ClosureReason::OutdatedChannelManager);
+ let (revoked_commitment_a, revoked_commitment_b) = {
+ let txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+ assert_eq!(txn.len(), 2);
+ assert_eq!(txn[0].output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs
+ assert_eq!(txn[1].output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs
+ if txn[0].input[0].previous_output.txid == chan_a.3.txid() {
+ check_spends!(&txn[0], &chan_a.3);
+ check_spends!(&txn[1], &chan_b.3);
+ (txn[0].clone(), txn[1].clone())
+ } else {
+ check_spends!(&txn[1], &chan_a.3);
+ check_spends!(&txn[0], &chan_b.3);
+ (txn[1].clone(), txn[0].clone())
+ }
};
// Bob should now receive two events to bump his revoked commitment transaction fees.
check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager);
assert!(nodes[0].node.list_channels().is_empty());
assert!(nodes[0].node.has_pending_payments());
- let as_broadcasted_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
- assert_eq!(as_broadcasted_txn.len(), 1);
- assert_eq!(as_broadcasted_txn[0], as_commitment_tx);
+ nodes[0].node.timer_tick_occurred();
+ if !confirm_before_reload {
+ let as_broadcasted_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+ assert_eq!(as_broadcasted_txn.len(), 1);
+ assert_eq!(as_broadcasted_txn[0], as_commitment_tx);
+ } else {
+ assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
+ }
+ check_added_monitors!(nodes[0], 1);
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
// On reload, the ChannelManager should realize it is stale compared to the ChannelMonitor and
// force-close the channel.
check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager);
+ nodes[0].node.timer_tick_occurred();
assert!(nodes[0].node.list_channels().is_empty());
assert!(nodes[0].node.has_pending_payments());
assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
+ check_added_monitors!(nodes[0], 1);
nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
if let Event::PaymentSent { payment_preimage, .. } = events[1] { assert_eq!(payment_preimage, our_payment_preimage); } else { panic!(); }
// Note that we don't get a PaymentPathSuccessful here as we leave the HTLC pending to avoid
// the double-claim that would otherwise appear at the end of this test.
+ nodes[0].node.timer_tick_occurred();
let as_broadcasted_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(as_broadcasted_txn.len(), 1);
nodes_0_deserialized = nodes_0_deserialized_tmp;
assert!(nodes_0_read.is_empty());
- { // Channel close should result in a commitment tx
- let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
- assert_eq!(txn.len(), 1);
- check_spends!(txn[0], funding_tx);
- assert_eq!(txn[0].input[0].previous_output.txid, funding_tx.txid());
- }
-
for monitor in node_0_monitors.drain(..) {
assert_eq!(nodes[0].chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor),
ChannelMonitorUpdateStatus::Completed);
check_added_monitors!(nodes[0], 1);
}
nodes[0].node = &nodes_0_deserialized;
+
check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager);
+ { // Channel close should result in a commitment tx
+ nodes[0].node.timer_tick_occurred();
+ let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(txn.len(), 1);
+ check_spends!(txn[0], funding_tx);
+ assert_eq!(txn[0].input[0].previous_output.txid, funding_tx.txid());
+ }
+ check_added_monitors!(nodes[0], 1);
// nodes[1] and nodes[2] have no lost state with nodes[0]...
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
});
}
+ nodes[1].node.timer_tick_occurred();
let bs_commitment_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(bs_commitment_tx.len(), 1);
+ check_added_monitors!(nodes[1], 1);
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
let chan_0_monitor_serialized = get_monitor!(nodes[0], chan.2).encode();
reload_node!(nodes[0], *nodes[0].node.get_current_default_configuration(), &nodes_0_serialized, &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
- if !reorg_after_reload {
- // If the channel is already closed when we reload the node, we'll broadcast a closing
- // transaction via the ChannelMonitor which is missing a corresponding channel.
- assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
- nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
- }
+ assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
}
if reorg_after_reload {