From a96e2fe144383ea6fd670153fb895ee07a3245ef Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 30 Aug 2023 18:22:33 +0000 Subject: [PATCH] Rename `MonitorEvent::CommitmentTxConfirmed` to `HolderForceClosed` The `MonitorEvent::CommitmentTxConfirmed` has always been a result of us force-closing the channel, not the counterparty doing so. Thus, it was always a bit of a misnomer. Worse, it carried over into the channel's `ClosureReason` in the event API. Here we simply rename it and use the proper `ClosureReason`. --- lightning/src/chain/channelmonitor.rs | 16 ++++++++-------- lightning/src/ln/channelmanager.rs | 4 ++-- lightning/src/ln/functional_tests.rs | 12 ++++++------ lightning/src/ln/monitor_tests.rs | 4 ++-- lightning/src/ln/reorg_tests.rs | 2 +- 5 files changed, 19 insertions(+), 19 deletions(-) diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 3e03c2fd5..3f9c83bb5 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -134,7 +134,7 @@ pub enum MonitorEvent { HTLCEvent(HTLCUpdate), /// A monitor event that the Channel's commitment transaction was confirmed. - CommitmentTxConfirmed(OutPoint), + HolderForceClosed(OutPoint), /// Indicates a [`ChannelMonitor`] update has completed. See /// [`ChannelMonitorUpdateStatus::InProgress`] for more information on how this is used. @@ -160,7 +160,7 @@ impl_writeable_tlv_based_enum_upgradable!(MonitorEvent, }, ; (2, HTLCEvent), - (4, CommitmentTxConfirmed), + (4, HolderForceClosed), // 6 was `UpdateFailed` until LDK 0.0.117 ); @@ -1031,7 +1031,7 @@ impl Writeable for ChannelMonitorImpl true, - MonitorEvent::CommitmentTxConfirmed(_) => true, + MonitorEvent::HolderForceClosed(_) => true, _ => false, }).count() as u64).to_be_bytes())?; for event in self.pending_monitor_events.iter() { @@ -1040,7 +1040,7 @@ impl Writeable for ChannelMonitorImpl 1u8.write(writer)?, + MonitorEvent::HolderForceClosed(_) => 1u8.write(writer)?, _ => {}, // Covered in the TLV writes below } } @@ -2526,7 +2526,7 @@ impl ChannelMonitorImpl { txs.push(tx); } broadcaster.broadcast_transactions(&txs); - self.pending_monitor_events.push(MonitorEvent::CommitmentTxConfirmed(self.funding_info.0)); + self.pending_monitor_events.push(MonitorEvent::HolderForceClosed(self.funding_info.0)); } pub fn update_monitor(&mut self, updates: &ChannelMonitorUpdate, broadcaster: &B, fee_estimator: F, logger: &L) -> Result<(), ()> @@ -2639,7 +2639,7 @@ impl ChannelMonitorImpl { log_error!(logger, " in channel monitor for channel {}!", &self.funding_info.0.to_channel_id()); log_error!(logger, " Read the docs for ChannelMonitor::get_latest_holder_commitment_txn and take manual action!"); } else { - // If we generated a MonitorEvent::CommitmentTxConfirmed, the ChannelManager + // If we generated a MonitorEvent::HolderForceClosed, the ChannelManager // will still give us a ChannelForceClosed event with !should_broadcast, but we // shouldn't print the scary warning above. log_info!(logger, "Channel off-chain state closed after we broadcasted our latest commitment transaction."); @@ -3484,7 +3484,7 @@ impl ChannelMonitorImpl { let funding_outp = HolderFundingOutput::build(self.funding_redeemscript.clone(), self.channel_value_satoshis, self.onchain_tx_handler.channel_type_features().clone()); let commitment_package = PackageTemplate::build_package(self.funding_info.0.txid.clone(), self.funding_info.0.index as u32, PackageSolvingData::HolderFundingOutput(funding_outp), self.best_block.height(), self.best_block.height()); claimable_outpoints.push(commitment_package); - self.pending_monitor_events.push(MonitorEvent::CommitmentTxConfirmed(self.funding_info.0)); + self.pending_monitor_events.push(MonitorEvent::HolderForceClosed(self.funding_info.0)); // Although we aren't signing the transaction directly here, the transaction will be signed // in the claim that is queued to OnchainTxHandler. We set holder_tx_signed here to reject // new channel updates. @@ -4248,7 +4248,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP for _ in 0..pending_monitor_events_len { let ev = match ::read(reader)? { 0 => MonitorEvent::HTLCEvent(Readable::read(reader)?), - 1 => MonitorEvent::CommitmentTxConfirmed(funding_info.0), + 1 => MonitorEvent::HolderForceClosed(funding_info.0), _ => return Err(DecodeError::InvalidValue) }; pending_monitor_events.as_mut().unwrap().push(ev); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index b068b7a74..458736312 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -6729,7 +6729,7 @@ where self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver); } }, - MonitorEvent::CommitmentTxConfirmed(funding_outpoint) => { + MonitorEvent::HolderForceClosed(funding_outpoint) => { let counterparty_node_id_opt = match counterparty_node_id { Some(cp_id) => Some(cp_id), None => { @@ -6753,7 +6753,7 @@ where msg: update }); } - self.issue_channel_close_events(&chan.context, ClosureReason::CommitmentTxConfirmed); + self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed); pending_msg_events.push(events::MessageSendEvent::HandleError { node_id: chan.context.get_counterparty_node_id(), action: msgs::ErrorAction::SendErrorMessage { diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index cf27d6787..fa766f0ab 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -2408,6 +2408,7 @@ fn channel_monitor_network_test() { } check_added_monitors!(nodes[4], 1); test_txn_broadcast(&nodes[4], &chan_4, None, HTLCType::SUCCESS); + check_closed_event!(nodes[4], 1, ClosureReason::HolderForceClosed, [nodes[3].node.get_our_node_id()], 100000); mine_transaction(&nodes[4], &node_txn[0]); check_preimage_claim(&nodes[4], &node_txn); @@ -2420,8 +2421,7 @@ fn channel_monitor_network_test() { assert_eq!(nodes[3].chain_monitor.chain_monitor.watch_channel(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon), Ok(ChannelMonitorUpdateStatus::Completed)); - check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed, [nodes[4].node.get_our_node_id()], 100000); - check_closed_event!(nodes[4], 1, ClosureReason::CommitmentTxConfirmed, [nodes[3].node.get_our_node_id()], 100000); + check_closed_event!(nodes[3], 1, ClosureReason::HolderForceClosed, [nodes[4].node.get_our_node_id()], 100000); } #[test] @@ -5660,7 +5660,7 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) { test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS }); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000); } fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) { @@ -5691,7 +5691,7 @@ fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) { test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000); } fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) { @@ -5737,7 +5737,7 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000); } else { expect_payment_failed!(nodes[0], our_payment_hash, true); } @@ -8603,7 +8603,7 @@ fn test_concurrent_monitor_claim() { let height = HTLC_TIMEOUT_BROADCAST + 1; connect_blocks(&nodes[0], height - nodes[0].best_block_info().1); check_closed_broadcast(&nodes[0], 1, true); - check_closed_event!(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, false, + check_closed_event!(&nodes[0], 1, ClosureReason::HolderForceClosed, false, [nodes[1].node.get_our_node_id()], 100000); watchtower_alice.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, vec![bob_state_y.clone()]), height); check_added_monitors(&nodes[0], 1); diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index ece24794f..cb78cda71 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -1046,14 +1046,14 @@ fn do_test_revoked_counterparty_commitment_balances(confirm_htlc_spend_first: bo assert!(failed_payments.is_empty()); if let Event::PendingHTLCsForwardable { .. } = events[0] {} else { panic!(); } match &events[1] { - Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}, + Event::ChannelClosed { reason: ClosureReason::HolderForceClosed, .. } => {}, _ => panic!(), } connect_blocks(&nodes[1], htlc_cltv_timeout + 1 - 10); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 1000000); + check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 1000000); // Prior to channel closure, B considers the preimage HTLC as its own, and otherwise only // lists the two on-chain timeout-able HTLCs as claimable balances. diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index 7c57b5530..cb3471763 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -462,7 +462,7 @@ fn test_set_outpoints_partial_claiming() { // Connect blocks on node B connect_blocks(&nodes[1], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1); check_closed_broadcast!(nodes[1], true); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 1000000); + check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 1000000); check_added_monitors!(nodes[1], 1); // Verify node B broadcast 2 HTLC-timeout txn let partial_claim_tx = { -- 2.39.5