From bd4eb0da76207a68396de8b36fb640467255cc31 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Tue, 28 Feb 2023 10:45:48 -0800 Subject: [PATCH] Queue BackgroundEvent to force close channels upon ChannelManager::read This results in a new, potentially redundant, `ChannelMonitorUpdate` that must be applied to `ChannelMonitor`s to broadcast the holder's latest commitment transaction. This is a behavior change for anchor channels since their commitments may require additional fees to be attached through a child anchor transaction. Recall that anchor transactions are only generated by the event consumer after processing a `BumpTransactionEvent::ChannelClose` event, which is yielded after applying a `ChannelMonitorUpdateStep::ChannelForceClosed` monitor update. Assuming the node operator is not watching the mempool to generate these anchor transactions without LDK, an anchor channel which we had to fail when deserializing our `ChannelManager` would have its commitment transaction broadcast by itself, potentially exposing the node operator to loss of funds if the commitment transaction's fee is not enough to be accepted into the network's mempools. --- lightning/src/chain/channelmonitor.rs | 18 +++++---- lightning/src/ln/channelmanager.rs | 29 +++++++++++---- lightning/src/ln/monitor_tests.rs | 53 +++++++++++++-------------- lightning/src/ln/payment_tests.rs | 15 ++++++-- lightning/src/ln/reload_tests.rs | 18 +++++---- lightning/src/ln/reorg_tests.rs | 7 +--- 6 files changed, 81 insertions(+), 59 deletions(-) diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index cd6b4c941..df5160df0 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -69,8 +69,7 @@ use crate::sync::{Mutex, LockTestExt}; /// much smaller than a full [`ChannelMonitor`]. However, for large single commitment transaction /// updates (e.g. ones during which there are hundreds of HTLCs pending on the commitment /// transaction), a single update may reach upwards of 1 MiB in serialized size. -#[cfg_attr(any(test, fuzzing, feature = "_test_utils"), derive(PartialEq, Eq))] -#[derive(Clone)] +#[derive(Clone, PartialEq, Eq)] #[must_use] pub struct ChannelMonitorUpdate { pub(crate) updates: Vec, @@ -491,8 +490,7 @@ impl_writeable_tlv_based_enum_upgradable!(OnchainEvent, ); -#[cfg_attr(any(test, fuzzing, feature = "_test_utils"), derive(PartialEq, Eq))] -#[derive(Clone)] +#[derive(Clone, PartialEq, Eq)] pub(crate) enum ChannelMonitorUpdateStep { LatestHolderCommitmentTXInfo { commitment_tx: HolderCommitmentTransaction, @@ -2268,10 +2266,14 @@ impl ChannelMonitorImpl { { log_info!(logger, "Applying update to monitor {}, bringing update_id from {} to {} with {} changes.", log_funding_info!(self), self.latest_update_id, updates.update_id, updates.updates.len()); - // ChannelMonitor updates may be applied after force close if we receive a - // preimage for a broadcasted commitment transaction HTLC output that we'd - // like to claim on-chain. If this is the case, we no longer have guaranteed - // access to the monitor's update ID, so we use a sentinel value instead. + // ChannelMonitor updates may be applied after force close if we receive a preimage for a + // broadcasted commitment transaction HTLC output that we'd like to claim on-chain. If this + // is the case, we no longer have guaranteed access to the monitor's update ID, so we use a + // sentinel value instead. + // + // The `ChannelManager` may also queue redundant `ChannelForceClosed` updates if it still + // thinks the channel needs to have its commitment transaction broadcast, so we'll allow + // them as well. if updates.update_id == CLOSED_CHANNEL_UPDATE_ID { assert_eq!(updates.updates.len(), 1); match updates.updates[0] { diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 90afc0169..2c62cc74d 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -7310,6 +7310,7 @@ where let mut id_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); let mut channel_closures = Vec::new(); + let mut pending_background_events = Vec::new(); for _ in 0..channel_count { let mut channel: Channel<::Signer> = Channel::read(reader, ( &args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config) @@ -7339,9 +7340,11 @@ where log_error!(args.logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast."); log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.", log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id()); - let (_, mut new_failed_htlcs) = channel.force_shutdown(true); + let (monitor_update, mut new_failed_htlcs) = channel.force_shutdown(true); + if let Some(monitor_update) = monitor_update { + pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate(monitor_update)); + } failed_htlcs.append(&mut new_failed_htlcs); - monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger); channel_closures.push(events::Event::ChannelClosed { channel_id: channel.channel_id(), user_channel_id: channel.get_user_id(), @@ -7406,10 +7409,13 @@ where } } - for (funding_txo, monitor) in args.channel_monitors.iter_mut() { + for (funding_txo, _) in args.channel_monitors.iter() { if !funding_txo_set.contains(funding_txo) { - log_info!(args.logger, "Broadcasting latest holder commitment transaction for closed channel {}", log_bytes!(funding_txo.to_channel_id())); - monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger); + let monitor_update = ChannelMonitorUpdate { + update_id: CLOSED_CHANNEL_UPDATE_ID, + updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }], + }; + pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate((*funding_txo, monitor_update))); } } @@ -7462,10 +7468,17 @@ where } let background_event_count: u64 = Readable::read(reader)?; - let mut pending_background_events_read: Vec = Vec::with_capacity(cmp::min(background_event_count as usize, MAX_ALLOC_SIZE/mem::size_of::())); for _ in 0..background_event_count { match ::read(reader)? { - 0 => pending_background_events_read.push(BackgroundEvent::ClosingMonitorUpdate((Readable::read(reader)?, Readable::read(reader)?))), + 0 => { + let (funding_txo, monitor_update): (OutPoint, ChannelMonitorUpdate) = (Readable::read(reader)?, Readable::read(reader)?); + if pending_background_events.iter().find(|e| { + let BackgroundEvent::ClosingMonitorUpdate((pending_funding_txo, pending_monitor_update)) = e; + *pending_funding_txo == funding_txo && *pending_monitor_update == monitor_update + }).is_none() { + pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate((funding_txo, monitor_update))); + } + } _ => return Err(DecodeError::InvalidValue), } } @@ -7840,7 +7853,7 @@ where per_peer_state: FairRwLock::new(per_peer_state), pending_events: Mutex::new(pending_events_read), - pending_background_events: Mutex::new(pending_background_events_read), + pending_background_events: Mutex::new(pending_background_events), total_consistency_lock: RwLock::new(()), persistence_notifier: Notifier::new(), diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 3bd50293f..7ed996820 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -1859,15 +1859,18 @@ fn test_anchors_aggregated_revoked_htlc_tx() { let chan_a = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 20_000_000); let chan_b = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 20_000_000); + // Serialize Bob with the initial state of both channels, which we'll use later. + let bob_serialized = nodes[1].node.encode(); + // Route two payments for each channel from Alice to Bob to lock in the HTLCs. let payment_a = route_payment(&nodes[0], &[&nodes[1]], 50_000_000); let payment_b = route_payment(&nodes[0], &[&nodes[1]], 50_000_000); let payment_c = route_payment(&nodes[0], &[&nodes[1]], 50_000_000); let payment_d = route_payment(&nodes[0], &[&nodes[1]], 50_000_000); - // Serialize Bob with the HTLCs locked in. We'll restart Bob later on with the state at this - // point such that he broadcasts a revoked commitment transaction. - let bob_serialized = nodes[1].node.encode(); + // Serialize Bob's monitors with the HTLCs locked in. We'll restart Bob later on with the state + // at this point such that he broadcasts a revoked commitment transaction with the HTLCs + // present. let bob_serialized_monitor_a = get_monitor!(nodes[1], chan_a.2).encode(); let bob_serialized_monitor_b = get_monitor!(nodes[1], chan_b.2).encode(); @@ -1897,30 +1900,26 @@ fn test_anchors_aggregated_revoked_htlc_tx() { } } - // Bob force closes by broadcasting his revoked state for each channel. - nodes[1].node.force_close_broadcasting_latest_txn(&chan_a.2, &nodes[0].node.get_our_node_id()).unwrap(); - check_added_monitors(&nodes[1], 1); - check_closed_broadcast(&nodes[1], 1, true); - check_closed_event!(&nodes[1], 1, ClosureReason::HolderForceClosed); - let revoked_commitment_a = { - let mut txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); - assert_eq!(txn.len(), 1); - let revoked_commitment = txn.pop().unwrap(); - assert_eq!(revoked_commitment.output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs - check_spends!(revoked_commitment, chan_a.3); - revoked_commitment - }; - nodes[1].node.force_close_broadcasting_latest_txn(&chan_b.2, &nodes[0].node.get_our_node_id()).unwrap(); - check_added_monitors(&nodes[1], 1); - check_closed_broadcast(&nodes[1], 1, true); - check_closed_event!(&nodes[1], 1, ClosureReason::HolderForceClosed); - let revoked_commitment_b = { - let mut txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); - assert_eq!(txn.len(), 1); - let revoked_commitment = txn.pop().unwrap(); - assert_eq!(revoked_commitment.output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs - check_spends!(revoked_commitment, chan_b.3); - revoked_commitment + // Bob force closes by restarting with the outdated state, prompting the ChannelMonitors to + // broadcast the latest commitment transaction known to them, which in our case is the one with + // the HTLCs still pending. + nodes[1].node.timer_tick_occurred(); + check_added_monitors(&nodes[1], 2); + check_closed_event!(&nodes[1], 2, ClosureReason::OutdatedChannelManager); + let (revoked_commitment_a, revoked_commitment_b) = { + let txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); + assert_eq!(txn.len(), 2); + assert_eq!(txn[0].output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs + assert_eq!(txn[1].output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs + if txn[0].input[0].previous_output.txid == chan_a.3.txid() { + check_spends!(&txn[0], &chan_a.3); + check_spends!(&txn[1], &chan_b.3); + (txn[0].clone(), txn[1].clone()) + } else { + check_spends!(&txn[1], &chan_a.3); + check_spends!(&txn[0], &chan_b.3); + (txn[1].clone(), txn[0].clone()) + } }; // Bob should now receive two events to bump his revoked commitment transaction fees. diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 15361b98a..8d5c4b54b 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -334,9 +334,15 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager); assert!(nodes[0].node.list_channels().is_empty()); assert!(nodes[0].node.has_pending_payments()); - let as_broadcasted_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); - assert_eq!(as_broadcasted_txn.len(), 1); - assert_eq!(as_broadcasted_txn[0], as_commitment_tx); + nodes[0].node.timer_tick_occurred(); + if !confirm_before_reload { + let as_broadcasted_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); + assert_eq!(as_broadcasted_txn.len(), 1); + assert_eq!(as_broadcasted_txn[0], as_commitment_tx); + } else { + assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty()); + } + check_added_monitors!(nodes[0], 1); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); @@ -499,9 +505,11 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { // On reload, the ChannelManager should realize it is stale compared to the ChannelMonitor and // force-close the channel. check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager); + nodes[0].node.timer_tick_occurred(); assert!(nodes[0].node.list_channels().is_empty()); assert!(nodes[0].node.has_pending_payments()); assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1); + check_added_monitors!(nodes[0], 1); nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -2794,6 +2802,7 @@ fn do_no_missing_sent_on_midpoint_reload(persist_manager_with_payment: bool) { if let Event::PaymentSent { payment_preimage, .. } = events[1] { assert_eq!(payment_preimage, our_payment_preimage); } else { panic!(); } // Note that we don't get a PaymentPathSuccessful here as we leave the HTLC pending to avoid // the double-claim that would otherwise appear at the end of this test. + nodes[0].node.timer_tick_occurred(); let as_broadcasted_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(as_broadcasted_txn.len(), 1); diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index 14deefd3f..70570c68b 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -422,20 +422,22 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { nodes_0_deserialized = nodes_0_deserialized_tmp; assert!(nodes_0_read.is_empty()); - { // Channel close should result in a commitment tx - let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); - assert_eq!(txn.len(), 1); - check_spends!(txn[0], funding_tx); - assert_eq!(txn[0].input[0].previous_output.txid, funding_tx.txid()); - } - for monitor in node_0_monitors.drain(..) { assert_eq!(nodes[0].chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor), ChannelMonitorUpdateStatus::Completed); check_added_monitors!(nodes[0], 1); } nodes[0].node = &nodes_0_deserialized; + check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager); + { // Channel close should result in a commitment tx + nodes[0].node.timer_tick_occurred(); + let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); + assert_eq!(txn.len(), 1); + check_spends!(txn[0], funding_tx); + assert_eq!(txn[0].input[0].previous_output.txid, funding_tx.txid()); + } + check_added_monitors!(nodes[0], 1); // nodes[1] and nodes[2] have no lost state with nodes[0]... reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); @@ -920,8 +922,10 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht }); } + nodes[1].node.timer_tick_occurred(); let bs_commitment_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(bs_commitment_tx.len(), 1); + check_added_monitors!(nodes[1], 1); nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index 69e0c9afa..c22458830 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -320,12 +320,7 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_ let chan_0_monitor_serialized = get_monitor!(nodes[0], chan.2).encode(); reload_node!(nodes[0], *nodes[0].node.get_current_default_configuration(), &nodes_0_serialized, &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized); - if !reorg_after_reload { - // If the channel is already closed when we reload the node, we'll broadcast a closing - // transaction via the ChannelMonitor which is missing a corresponding channel. - assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); - nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); - } + assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty()); } if reorg_after_reload { -- 2.39.5