Allowing user-specified error message during force close channel
authorLalitmohansharma1 <lalit_ms@ar.iitr.ac.in>
Sun, 11 Feb 2024 13:38:53 +0000 (19:08 +0530)
committerLalitmohansharma1 <lalit_ms@ar.iitr.ac.in>
Sat, 20 Apr 2024 13:42:59 +0000 (19:12 +0530)
In this commit i added additional parameter `error_message` to
`force_close_sending_error`. This parameter will allow users to
configure error message and send to peers during the force closing
of channel.I have also updated the tests for this updated function.

16 files changed:
fuzz/src/full_stack.rs
lightning-background-processor/src/lib.rs
lightning-persister/src/fs_store.rs
lightning-persister/src/test_utils.rs
lightning/src/ln/async_signer_tests.rs
lightning/src/ln/blinded_payment_tests.rs
lightning/src/ln/chanmon_update_fail_tests.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_tests.rs
lightning/src/ln/monitor_tests.rs
lightning/src/ln/payment_tests.rs
lightning/src/ln/priv_short_conf_tests.rs
lightning/src/ln/reload_tests.rs
lightning/src/ln/reorg_tests.rs
lightning/src/ln/shutdown_tests.rs
lightning/src/util/persist.rs

index e128d91810a0ebdb9d548f99b232b50b4a6a7fed..d6519c3c5ee2e62e29c1362126e84f9ba0c02d2d 100644 (file)
@@ -216,6 +216,7 @@ struct MoneyLossDetector<'a> {
        height: usize,
        max_height: usize,
        blocks_connected: u32,
+       error_message: String,
 }
 impl<'a> MoneyLossDetector<'a> {
        pub fn new(peers: &'a RefCell<[bool; 256]>,
@@ -234,6 +235,7 @@ impl<'a> MoneyLossDetector<'a> {
                        height: 0,
                        max_height: 0,
                        blocks_connected: 0,
+                       error_message: "Channel force-closed".to_string(),
                }
        }
 
@@ -288,7 +290,7 @@ impl<'a> Drop for MoneyLossDetector<'a> {
                        }
 
                        // Force all channels onto the chain (and time out claim txn)
-                       self.manager.force_close_all_channels_broadcasting_latest_txn();
+                       self.manager.force_close_all_channels_broadcasting_latest_txn(self.error_message.to_string());
                }
        }
 }
@@ -731,9 +733,10 @@ pub fn do_test(mut data: &[u8], logger: &Arc<dyn Logger>) {
                        14 => {
                                let mut channels = channelmanager.list_channels();
                                let channel_id = get_slice!(1)[0] as usize;
+                               let error_message = "Channel force-closed";
                                if channel_id >= channels.len() { return; }
                                channels.sort_by(|a, b| { a.channel_id.cmp(&b.channel_id) });
-                               channelmanager.force_close_broadcasting_latest_txn(&channels[channel_id].channel_id, &channels[channel_id].counterparty.node_id).unwrap();
+                               channelmanager.force_close_broadcasting_latest_txn(&channels[channel_id].channel_id, &channels[channel_id].counterparty.node_id, error_message.to_string()).unwrap();
                        },
                        // 15, 16, 17, 18 is above
                        19 => {
index 3736bd603e5bd65977bfc874919b5f9c61ee48bc..e8d7f588174074617af3d94eb5dd5e5f5ec9cf91 100644 (file)
@@ -1416,7 +1416,8 @@ mod tests {
                }
 
                // Force-close the channel.
-               nodes[0].node.force_close_broadcasting_latest_txn(&ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.txid(), index: 0 }), &nodes[1].node.get_our_node_id()).unwrap();
+               let error_message = "Channel force-closed";
+               nodes[0].node.force_close_broadcasting_latest_txn(&ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.txid(), index: 0 }), &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
 
                // Check that the force-close updates are persisted.
                check_persisted_data!(nodes[0].node, filepath.clone());
@@ -1609,7 +1610,8 @@ mod tests {
                let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
 
                // Force close the channel and check that the SpendableOutputs event was handled.
-               nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
+               let error_message = "Channel force-closed";
+               nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
                let commitment_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap();
                confirm_transaction_depth(&mut nodes[0], &commitment_tx, BREAKDOWN_TIMEOUT as u32);
 
index 364a3ee706f5288fd61dc52df53d6c135799f1cd..88a464e2d4181a60512a35d4b52385ea5a9a4faa 100644 (file)
@@ -445,7 +445,8 @@ mod tests {
                let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
                let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
                let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
-               nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
+               let error_message = "Channel force-closed";
+               nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
                check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
                let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
                let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
@@ -484,7 +485,8 @@ mod tests {
                let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
                let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
                let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
-               nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
+               let error_message = "Channel force-closed";
+               nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
                check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
                let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
                let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
index 360fa3492bff7196682e447f94ae52745e1e0678..42569c7a07959314b8c0a064e69d16da7e33ef01 100644 (file)
@@ -104,7 +104,8 @@ pub(crate) fn do_test_store<K: KVStore>(store_0: &K, store_1: &K) {
 
        // Force close because cooperative close doesn't result in any persisted
        // updates.
-       nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
+       let error_message = "Channel force-closed";
+       nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
        check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
index 613df570d4e3bf40051d25e59a5d6fa45823dbdb..d600372807e3f15fb68bdf0916cafd98cd335e50 100644 (file)
@@ -363,12 +363,13 @@ fn do_test_async_holder_signatures(anchors: bool, remote_commitment: bool) {
        // Route an HTLC and set the signer as unavailable.
        let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
        route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+       let error_message = "Channel force-closed";
 
        nodes[0].set_channel_signer_available(&nodes[1].node.get_our_node_id(), &chan_id, false);
 
        if remote_commitment {
                // Make the counterparty broadcast its latest commitment.
-               nodes[1].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[0].node.get_our_node_id()).unwrap();
+               nodes[1].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
                check_added_monitors(&nodes[1], 1);
                check_closed_broadcast(&nodes[1], 1, true);
                check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100_000);
index eb31be9eecf334ead7ece1d1d6ecd7b587534f61..b3f6a610d5dc832a8707e5a831c5807cf99c8702 100644 (file)
@@ -457,6 +457,7 @@ fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck,
                (chan.0.contents, chan.2)
        };
 
+       let error_message = "Channel force-closed";
        let amt_msat = 5000;
        let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[2], Some(amt_msat), None);
        let route_params = get_blinded_route_parameters(amt_msat, payment_secret, 1, 1_0000_0000,
@@ -489,7 +490,7 @@ fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck,
                                ProcessPendingHTLCsCheck::FwdChannelClosed => {
                                        // Force close the next-hop channel so when we go to forward in process_pending_htlc_forwards,
                                        // the intro node will error backwards.
-                                       $curr_node.node.force_close_broadcasting_latest_txn(&$failed_chan_id, &$next_node.node.get_our_node_id()).unwrap();
+                                       $curr_node.node.force_close_broadcasting_latest_txn(&$failed_chan_id, &$next_node.node.get_our_node_id(), error_message.to_string()).unwrap();
                                        let events = $curr_node.node.get_and_clear_pending_events();
                                        match events[0] {
                                                crate::events::Event::PendingHTLCsForwardable { .. } => {},
index 2e95f5c63ff7e173bb5a107f0719fb89ca3e3301..a165c3b69d59b50d9ec9a8256cc907954386de47 100644 (file)
@@ -206,7 +206,8 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
        }
 
        // ...and make sure we can force-close a frozen channel
-       nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[1].node.get_our_node_id()).unwrap();
+       let error_message = "Channel force-closed";
+       nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
        check_added_monitors!(nodes[0], 1);
        check_closed_broadcast!(nodes[0], true);
 
@@ -3221,17 +3222,18 @@ fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool,
        let _ = get_revoke_commit_msgs!(nodes[1], nodes[2].node.get_our_node_id());
 
        let mon_bc = get_monitor!(nodes[1], chan_id_bc).encode();
+       let error_message = "Channel force-closed";
 
        if close_chans_before_reload {
                if !close_only_a {
                        chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
-                       nodes[1].node.force_close_broadcasting_latest_txn(&chan_id_bc, &nodes[2].node.get_our_node_id()).unwrap();
+                       nodes[1].node.force_close_broadcasting_latest_txn(&chan_id_bc, &nodes[2].node.get_our_node_id(), error_message.to_string()).unwrap();
                        check_closed_broadcast(&nodes[1], 1, true);
                        check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[2].node.get_our_node_id()], 100000);
                }
 
                chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
-               nodes[1].node.force_close_broadcasting_latest_txn(&chan_id_ab, &nodes[0].node.get_our_node_id()).unwrap();
+               nodes[1].node.force_close_broadcasting_latest_txn(&chan_id_ab, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
                check_closed_broadcast(&nodes[1], 1, true);
                check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000);
        }
@@ -3252,8 +3254,9 @@ fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool,
                        assert_eq!(bs_close_txn.len(), 3);
                }
        }
+       let error_message = "Channel force-closed";
 
-       nodes[0].node.force_close_broadcasting_latest_txn(&chan_id_ab, &nodes[1].node.get_our_node_id()).unwrap();
+       nodes[0].node.force_close_broadcasting_latest_txn(&chan_id_ab, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
        check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
        let as_closing_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
        assert_eq!(as_closing_tx.len(), 1);
@@ -3390,10 +3393,11 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) {
        let manager_b = nodes[1].node.encode();
        reload_node!(nodes[1], &manager_b, &[&mon_ab, &mon_bc], persister, new_chain_monitor, nodes_1_deserialized);
 
+       let error_message = "Channel force-closed";
        if close_during_reload {
                // Test that we still free the B<->C channel if the A<->B channel closed while we reloaded
                // (as learned about during the on-reload block connection).
-               nodes[0].node.force_close_broadcasting_latest_txn(&chan_id_ab, &nodes[1].node.get_our_node_id()).unwrap();
+               nodes[0].node.force_close_broadcasting_latest_txn(&chan_id_ab, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
                check_added_monitors!(nodes[0], 1);
                check_closed_broadcast!(nodes[0], true);
                check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100_000);
index abbab5ff0a70d8f0d5b4b7bf58b3bf4702647566..0dd2bc41de0f83bec4b2c15e16b8e5700c2aec13 100644 (file)
@@ -1347,11 +1347,12 @@ where
 /// #
 /// # fn example<T: AChannelManager>(channel_manager: T) {
 /// # let channel_manager = channel_manager.get_cm();
+/// # let error_message = "Channel force-closed";
 /// channel_manager.process_pending_events(&|event| match event {
 ///     Event::OpenChannelRequest { temporary_channel_id, counterparty_node_id, ..  } => {
 ///         if !is_trusted(counterparty_node_id) {
 ///             match channel_manager.force_close_without_broadcasting_txn(
-///                 &temporary_channel_id, &counterparty_node_id
+///                 &temporary_channel_id, &counterparty_node_id, error_message.to_string()
 ///             ) {
 ///                 Ok(()) => println!("Rejecting channel {}", temporary_channel_id),
 ///                 Err(e) => println!("Error rejecting channel {}: {:?}", temporary_channel_id, e),
@@ -3682,8 +3683,11 @@ where
                Ok(counterparty_node_id)
        }
 
-       fn force_close_sending_error(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, broadcast: bool) -> Result<(), APIError> {
+       fn force_close_sending_error(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, broadcast: bool, error_message: String)
+       -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
+               log_debug!(self.logger,
+                       "Force-closing channel, The error message sent to the peer : {}", error_message);
                match self.force_close_channel_with_peer(channel_id, counterparty_node_id, None, broadcast) {
                        Ok(counterparty_node_id) => {
                                let per_peer_state = self.per_peer_state.read().unwrap();
@@ -3693,7 +3697,7 @@ where
                                                events::MessageSendEvent::HandleError {
                                                        node_id: counterparty_node_id,
                                                        action: msgs::ErrorAction::DisconnectPeer {
-                                                               msg: Some(msgs::ErrorMessage { channel_id: *channel_id, data: "Channel force-closed".to_owned() })
+                                                               msg: Some(msgs::ErrorMessage { channel_id: *channel_id, data: error_message})
                                                        },
                                                }
                                        );
@@ -3704,39 +3708,53 @@ where
                }
        }
 
-       /// Force closes a channel, immediately broadcasting the latest local transaction(s) and
-       /// rejecting new HTLCs on the given channel. Fails if `channel_id` is unknown to
-       /// the manager, or if the `counterparty_node_id` isn't the counterparty of the corresponding
-       /// channel.
-       pub fn force_close_broadcasting_latest_txn(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey)
+       /// Force closes a channel, immediately broadcasting the latest local transaction(s),
+       /// rejecting new HTLCs.
+       ///
+       /// The provided `error_message` is sent to connected peers for closing
+       /// channels and should be a human-readable description of what went wrong.
+       ///
+       /// Fails if `channel_id` is unknown to the manager, or if the `counterparty_node_id`
+       /// isn't the counterparty of the corresponding channel.
+       pub fn force_close_broadcasting_latest_txn(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, error_message: String)
        -> Result<(), APIError> {
-               self.force_close_sending_error(channel_id, counterparty_node_id, true)
+               self.force_close_sending_error(channel_id, counterparty_node_id, true, error_message)
        }
 
        /// Force closes a channel, rejecting new HTLCs on the given channel but skips broadcasting
-       /// the latest local transaction(s). Fails if `channel_id` is unknown to the manager, or if the
-       /// `counterparty_node_id` isn't the counterparty of the corresponding channel.
+       /// the latest local transaction(s).
        ///
+       /// The provided `error_message` is sent to connected peers for closing channels and should
+       /// be a human-readable description of what went wrong.
+       ///
+       /// Fails if `channel_id` is unknown to the manager, or if the
+       /// `counterparty_node_id` isn't the counterparty of the corresponding channel.
        /// You can always broadcast the latest local transaction(s) via
        /// [`ChannelMonitor::broadcast_latest_holder_commitment_txn`].
-       pub fn force_close_without_broadcasting_txn(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey)
+       pub fn force_close_without_broadcasting_txn(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, error_message: String)
        -> Result<(), APIError> {
-               self.force_close_sending_error(channel_id, counterparty_node_id, false)
+               self.force_close_sending_error(channel_id, counterparty_node_id, false, error_message)
        }
 
        /// Force close all channels, immediately broadcasting the latest local commitment transaction
        /// for each to the chain and rejecting new HTLCs on each.
-       pub fn force_close_all_channels_broadcasting_latest_txn(&self) {
+       ///
+       /// The provided `error_message` is sent to connected peers for closing channels and should
+       /// be a human-readable description of what went wrong.
+       pub fn force_close_all_channels_broadcasting_latest_txn(&self, error_message: String) {
                for chan in self.list_channels() {
-                       let _ = self.force_close_broadcasting_latest_txn(&chan.channel_id, &chan.counterparty.node_id);
+                       let _ = self.force_close_broadcasting_latest_txn(&chan.channel_id, &chan.counterparty.node_id, error_message.clone());
                }
        }
 
        /// Force close all channels rejecting new HTLCs on each but without broadcasting the latest
        /// local transaction(s).
-       pub fn force_close_all_channels_without_broadcasting_txn(&self) {
+       ///
+       /// The provided `error_message` is sent to connected peers for closing channels and
+       /// should be a human-readable description of what went wrong.
+       pub fn force_close_all_channels_without_broadcasting_txn(&self, error_message: String) {
                for chan in self.list_channels() {
-                       let _ = self.force_close_without_broadcasting_txn(&chan.channel_id, &chan.counterparty.node_id);
+                       let _ = self.force_close_without_broadcasting_txn(&chan.channel_id, &chan.counterparty.node_id, error_message.clone());
                }
        }
 
@@ -12892,8 +12910,8 @@ mod tests {
 
                nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
                nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
-
-               nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
+               let error_message = "Channel force-closed";
+               nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
                check_closed_broadcast!(nodes[0], true);
                check_added_monitors!(nodes[0], 1);
                check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
@@ -13110,6 +13128,7 @@ mod tests {
                let channel_id = ChannelId::from_bytes([4; 32]);
                let unkown_public_key = PublicKey::from_secret_key(&Secp256k1::signing_only(), &SecretKey::from_slice(&[42; 32]).unwrap());
                let intercept_id = InterceptId([0; 32]);
+               let error_message = "Channel force-closed";
 
                // Test the API functions.
                check_not_connected_to_peer_error(nodes[0].node.create_channel(unkown_public_key, 1_000_000, 500_000_000, 42, None, None), unkown_public_key);
@@ -13118,9 +13137,9 @@ mod tests {
 
                check_unkown_peer_error(nodes[0].node.close_channel(&channel_id, &unkown_public_key), unkown_public_key);
 
-               check_unkown_peer_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &unkown_public_key), unkown_public_key);
+               check_unkown_peer_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &unkown_public_key, error_message.to_string()), unkown_public_key);
 
-               check_unkown_peer_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &unkown_public_key), unkown_public_key);
+               check_unkown_peer_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &unkown_public_key, error_message.to_string()), unkown_public_key);
 
                check_unkown_peer_error(nodes[0].node.forward_intercepted_htlc(intercept_id, &channel_id, unkown_public_key, 1_000_000), unkown_public_key);
 
@@ -13142,15 +13161,16 @@ mod tests {
 
                // Dummy values
                let channel_id = ChannelId::from_bytes([4; 32]);
+               let error_message = "Channel force-closed";
 
                // Test the API functions.
                check_api_misuse_error(nodes[0].node.accept_inbound_channel(&channel_id, &counterparty_node_id, 42));
 
                check_channel_unavailable_error(nodes[0].node.close_channel(&channel_id, &counterparty_node_id), channel_id, counterparty_node_id);
 
-               check_channel_unavailable_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &counterparty_node_id), channel_id, counterparty_node_id);
+               check_channel_unavailable_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &counterparty_node_id, error_message.to_string()), channel_id, counterparty_node_id);
 
-               check_channel_unavailable_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &counterparty_node_id), channel_id, counterparty_node_id);
+               check_channel_unavailable_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &counterparty_node_id, error_message.to_string()), channel_id, counterparty_node_id);
 
                check_channel_unavailable_error(nodes[0].node.forward_intercepted_htlc(InterceptId([0; 32]), &channel_id, counterparty_node_id, 1_000_000), channel_id, counterparty_node_id);
 
@@ -13504,6 +13524,7 @@ mod tests {
                anchors_config.manually_accept_inbound_channels = true;
                let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(anchors_config.clone()), Some(anchors_config.clone())]);
                let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+               let error_message = "Channel force-closed";
 
                nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 0, None, None).unwrap();
                let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
@@ -13513,7 +13534,7 @@ mod tests {
                let events = nodes[1].node.get_and_clear_pending_events();
                match events[0] {
                        Event::OpenChannelRequest { temporary_channel_id, .. } => {
-                               nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
+                               nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
                        }
                        _ => panic!("Unexpected event"),
                }
@@ -13621,12 +13642,13 @@ mod tests {
                let user_config = test_default_channel_config();
                let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[Some(user_config), Some(user_config)]);
                let nodes = create_network(2, &node_cfg, &node_chanmgr);
+               let error_message = "Channel force-closed";
 
                // Open a channel, immediately disconnect each other, and broadcast Alice's latest state.
                let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
                nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
                nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
-               nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id()).unwrap();
+               nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
                check_closed_broadcast(&nodes[0], 1, true);
                check_added_monitors(&nodes[0], 1);
                check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
index 8dd3f1fc9124f9ed72b105b14dff7b0ee9e858af..7f06a2bb56142c3e61203abc2254fb2adc5ef4d2 100644 (file)
@@ -2267,7 +2267,8 @@ fn channel_monitor_network_test() {
        send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
 
        // Simple case with no pending HTLCs:
-       nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &nodes[0].node.get_our_node_id()).unwrap();
+       let error_message = "Channel force-closed";
+       nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
        check_added_monitors!(nodes[1], 1);
        check_closed_broadcast!(nodes[1], true);
        check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
@@ -2293,7 +2294,8 @@ fn channel_monitor_network_test() {
 
        // Simple case of one pending HTLC to HTLC-Timeout (note that the HTLC-Timeout is not
        // broadcasted until we reach the timelock time).
-       nodes[1].node.force_close_broadcasting_latest_txn(&chan_2.2, &nodes[2].node.get_our_node_id()).unwrap();
+       let error_message = "Channel force-closed";
+       nodes[1].node.force_close_broadcasting_latest_txn(&chan_2.2, &nodes[2].node.get_our_node_id(), error_message.to_string()).unwrap();
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
        {
@@ -2333,7 +2335,8 @@ fn channel_monitor_network_test() {
 
        // nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2]
        // HTLC-Timeout and a nodes[3] claim against it (+ its own announces)
-       nodes[2].node.force_close_broadcasting_latest_txn(&chan_3.2, &nodes[3].node.get_our_node_id()).unwrap();
+       let error_message = "Channel force-closed";
+       nodes[2].node.force_close_broadcasting_latest_txn(&chan_3.2, &nodes[3].node.get_our_node_id(), error_message.to_string()).unwrap();
        check_added_monitors!(nodes[2], 1);
        check_closed_broadcast!(nodes[2], true);
        let node2_commitment_txid;
@@ -3559,9 +3562,9 @@ fn test_htlc_ignore_latest_remote_commitment() {
                return;
        }
        let funding_tx = create_announced_chan_between_nodes(&nodes, 0, 1).3;
-
+       let error_message = "Channel force-closed";
        route_payment(&nodes[0], &[&nodes[1]], 10000000);
-       nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
+       nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
        connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
@@ -3624,8 +3627,8 @@ fn test_force_close_fail_back() {
        // nodes[2] now has the latest commitment transaction, but hasn't revoked its previous
        // state or updated nodes[1]' state. Now force-close and broadcast that commitment/HTLC
        // transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!).
-
-       nodes[2].node.force_close_broadcasting_latest_txn(&payment_event.commitment_msg.channel_id, &nodes[1].node.get_our_node_id()).unwrap();
+       let error_message = "Channel force-closed";
+       nodes[2].node.force_close_broadcasting_latest_txn(&payment_event.commitment_msg.channel_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
        check_closed_broadcast!(nodes[2], true);
        check_added_monitors!(nodes[2], 1);
        check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
@@ -4503,7 +4506,8 @@ fn test_claim_sizeable_push_msat() {
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000);
-       nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
+       let error_message = "Channel force-closed";
+       nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
        check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
@@ -4530,9 +4534,10 @@ fn test_claim_on_remote_sizeable_push_msat() {
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+       let error_message = "Channel force-closed";
 
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000);
-       nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
+       nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
        check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
@@ -8040,8 +8045,8 @@ fn test_manually_accept_inbound_channel_request() {
                }
                _ => panic!("Unexpected event"),
        }
-
-       nodes[1].node.force_close_broadcasting_latest_txn(&temp_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
+       let error_message = "Channel force-closed";
+       nodes[1].node.force_close_broadcasting_latest_txn(&temp_channel_id, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
 
        let close_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(close_msg_ev.len(), 1);
@@ -8072,11 +8077,11 @@ fn test_manually_reject_inbound_channel_request() {
        // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before
        // rejecting the inbound channel request.
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
-
+       let error_message = "Channel force-closed";
        let events = nodes[1].node.get_and_clear_pending_events();
        match events[0] {
                Event::OpenChannelRequest { temporary_channel_id, .. } => {
-                       nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
+                       nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
                }
                _ => panic!("Unexpected event"),
        }
@@ -8799,7 +8804,8 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
                force_closing_node = 1;
                counterparty_node = 0;
        }
-       nodes[force_closing_node].node.force_close_broadcasting_latest_txn(&chan_ab.2, &nodes[counterparty_node].node.get_our_node_id()).unwrap();
+       let error_message = "Channel force-closed";
+       nodes[force_closing_node].node.force_close_broadcasting_latest_txn(&chan_ab.2, &nodes[counterparty_node].node.get_our_node_id(), error_message.to_string()).unwrap();
        check_closed_broadcast!(nodes[force_closing_node], true);
        check_added_monitors!(nodes[force_closing_node], 1);
        check_closed_event!(nodes[force_closing_node], 1, ClosureReason::HolderForceClosed, [nodes[counterparty_node].node.get_our_node_id()], 100000);
@@ -9532,8 +9538,8 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t
        let (_, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
        nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
        nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
-
-       nodes[1].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[2].node.get_our_node_id()).unwrap();
+       let error_message = "Channel force-closed";
+       nodes[1].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[2].node.get_our_node_id(), error_message.to_string()).unwrap();
        check_closed_broadcast!(nodes[1], true);
        check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[2].node.get_our_node_id()], 100000);
        check_added_monitors!(nodes[1], 1);
@@ -10793,8 +10799,8 @@ fn test_close_in_funding_batch() {
        let funding_txo_2 = OutPoint { txid: tx.txid(), index: 1 };
        let channel_id_1 = ChannelId::v1_from_funding_outpoint(funding_txo_1);
        let channel_id_2 = ChannelId::v1_from_funding_outpoint(funding_txo_2);
-
-       nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id()).unwrap();
+       let error_message = "Channel force-closed";
+       nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
 
        // The monitor should become closed.
        check_added_monitors(&nodes[0], 1);
@@ -10882,7 +10888,8 @@ fn test_batch_funding_close_after_funding_signed() {
        let funding_txo_2 = OutPoint { txid: tx.txid(), index: 1 };
        let channel_id_1 = ChannelId::v1_from_funding_outpoint(funding_txo_1);
        let channel_id_2 = ChannelId::v1_from_funding_outpoint(funding_txo_2);
-       nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id()).unwrap();
+       let error_message = "Channel force-closed";
+       nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
        check_added_monitors(&nodes[0], 2);
        {
                let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap();
@@ -10951,8 +10958,8 @@ fn do_test_funding_and_commitment_tx_confirm_same_block(confirm_remote_commitmen
        } else {
                (&nodes[0], &nodes[1])
        };
-
-       closing_node.node.force_close_broadcasting_latest_txn(&chan_id, &other_node.node.get_our_node_id()).unwrap();
+       let error_message = "Channel force-closed";
+       closing_node.node.force_close_broadcasting_latest_txn(&chan_id, &other_node.node.get_our_node_id(), error_message.to_string()).unwrap();
        let mut msg_events = closing_node.node.get_and_clear_pending_msg_events();
        assert_eq!(msg_events.len(), 1);
        match msg_events.pop().unwrap() {
index d5f0dc153fc1a1ba9c544c4cfeb552708f860e04..27bfa17930a5a598f4bff09662be069de356900a 100644 (file)
@@ -724,8 +724,9 @@ fn do_test_balances_on_local_commitment_htlcs(anchors: bool) {
 
        // First confirm the commitment transaction on nodes[0], which should leave us with three
        // claimable balances.
+       let error_message = "Channel force-closed";
        let node_a_commitment_claimable = nodes[0].best_block_info().1 + BREAKDOWN_TIMEOUT as u32;
-       nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id()).unwrap();
+       nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
        check_added_monitors!(nodes[0], 1);
        check_closed_broadcast!(nodes[0], true);
        check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 1000000);
@@ -1982,8 +1983,8 @@ fn do_test_restored_packages_retry(check_old_monitor_retries_after_upgrade: bool
        // ensures that the HTLC timeout package is held until we reach its expiration height.
        let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 50_000_000);
        route_payment(&nodes[0], &[&nodes[1]], 10_000_000);
-
-       nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id()).unwrap();
+       let error_message = "Channel force-closed";
+       nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
        check_added_monitors(&nodes[0], 1);
        check_closed_broadcast(&nodes[0], 1, true);
        check_closed_event!(&nodes[0], 1, ClosureReason::HolderForceClosed, false,
@@ -2655,7 +2656,8 @@ fn do_test_anchors_monitor_fixes_counterparty_payment_script_on_reload(confirm_c
 
        // Confirm the counterparty's commitment and reload the monitor (either before or after) such
        // that we arrive at the correct `counterparty_payment_script` after the reload.
-       nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id()).unwrap();
+       let error_message = "Channel force-closed";
+       nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
        check_added_monitors(&nodes[0], 1);
        check_closed_broadcast(&nodes[0], 1, true);
        check_closed_event!(&nodes[0], 1, ClosureReason::HolderForceClosed, false,
index a75120797cafacb1dd86f07baf7afd2842844af2..d93f55931a8f50ef480a7bcd4b7238569bb69e49 100644 (file)
@@ -1049,11 +1049,12 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
        let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
+       let error_message = "Channel force-closed";
 
        // Route a payment, but force-close the channel before the HTLC fulfill message arrives at
        // nodes[0].
        let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 10_000_000);
-       nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
+       nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
        check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
@@ -3592,6 +3593,7 @@ fn do_claim_from_closed_chan(fail_payment: bool) {
        // height.
        connect_blocks(&nodes[3], final_cltv - HTLC_FAIL_BACK_BUFFER - nodes[3].best_block_info().1
                - if fail_payment { 0 } else { 2 });
+       let error_message = "Channel force-closed";
        if fail_payment {
                // We fail the HTLC on the A->B->D path first as it expires 4 blocks earlier. We go ahead
                // and expire both immediately, though, by connecting another 4 blocks.
@@ -3601,7 +3603,7 @@ fn do_claim_from_closed_chan(fail_payment: bool) {
                expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[3], [reason]);
                pass_failed_payment_back(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_hash, PaymentFailureReason::RecipientRejected);
        } else {
-               nodes[1].node.force_close_broadcasting_latest_txn(&chan_bd, &nodes[3].node.get_our_node_id()).unwrap();
+               nodes[1].node.force_close_broadcasting_latest_txn(&chan_bd, &nodes[3].node.get_our_node_id(), error_message.to_string()).unwrap();
                check_closed_event!(&nodes[1], 1, ClosureReason::HolderForceClosed, false,
                        [nodes[3].node.get_our_node_id()], 1000000);
                check_closed_broadcast(&nodes[1], 1, true);
index 6fd8623d317376b5b6d156869c2a5949bca7e33d..c9b0b7d4d7966aa83f1f6e827a9c827c0e88f1af 100644 (file)
@@ -753,6 +753,7 @@ fn test_0conf_close_no_early_chan_update() {
 
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(chan_config)]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+       let error_message = "Channel force-closed";
 
        // This is the default but we force it on anyway
        chan_config.channel_handshake_config.announced_channel = true;
@@ -761,7 +762,7 @@ fn test_0conf_close_no_early_chan_update() {
        // We can use the channel immediately, but won't generate a channel_update until we get confs
        send_payment(&nodes[0], &[&nodes[1]], 100_000);
 
-       nodes[0].node.force_close_all_channels_broadcasting_latest_txn();
+       nodes[0].node.force_close_all_channels_broadcasting_latest_txn(error_message.to_string());
        check_added_monitors!(nodes[0], 1);
        check_closed_event!(&nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
        let _ = get_err_msg(&nodes[0], &nodes[1].node.get_our_node_id());
index 8b25f7701be5a2a48dad594c5bc48442c49e3253..a490b145e4b79652e09caf282a46037c7d3175a8 100644 (file)
@@ -627,9 +627,10 @@ fn do_test_data_loss_protect(reconnect_panicing: bool, substantially_old: bool,
                        std::mem::forget(nodes);
                }
        } else {
+               let error_message = "Channel force-closed";
                assert!(!not_stale, "We only care about the stale case when not testing panicking");
 
-               nodes[0].node.force_close_without_broadcasting_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
+               nodes[0].node.force_close_without_broadcasting_txn(&chan.2, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
                check_added_monitors!(nodes[0], 1);
                check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 1000000);
                {
@@ -1024,8 +1025,9 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht
        assert!(nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
 
        let _ = nodes[2].node.get_and_clear_pending_msg_events();
+       let error_message = "Channel force-closed";
 
-       nodes[2].node.force_close_broadcasting_latest_txn(&chan_id_2, &nodes[1].node.get_our_node_id()).unwrap();
+       nodes[2].node.force_close_broadcasting_latest_txn(&chan_id_2, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
        let cs_commitment_tx = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
        assert_eq!(cs_commitment_tx.len(), if claim_htlc { 2 } else { 1 });
 
index 62c82b01f59d7a36edd81cefb1cd45d407788d85..f5670f5c9d8ec8d1ad99216df612472dcaca0240 100644 (file)
@@ -635,15 +635,16 @@ fn test_htlc_preimage_claim_holder_commitment_after_counterparty_commitment_reor
 
        // Route an HTLC which we will claim onchain with the preimage.
        let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+       let error_message = "Channel force-closed";
 
        // Force close with the latest counterparty commitment, confirm it, and reorg it with the latest
        // holder commitment.
-       nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id()).unwrap();
+       nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
        check_closed_broadcast(&nodes[0], 1, true);
        check_added_monitors(&nodes[0], 1);
        check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
 
-       nodes[1].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[0].node.get_our_node_id()).unwrap();
+       nodes[1].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
        check_closed_broadcast(&nodes[1], 1, true);
        check_added_monitors(&nodes[1], 1);
        check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000);
@@ -718,9 +719,10 @@ fn test_htlc_preimage_claim_prev_counterparty_commitment_after_current_counterpa
        // commitment is still valid (unrevoked).
        nodes[1].node().handle_update_fee(&nodes[0].node.get_our_node_id(), &update_fee);
        let _last_revoke_and_ack = commitment_signed_dance!(nodes[1], nodes[0], commit_sig, false, true, false, true);
+       let error_message = "Channel force-closed";
 
        // Force close with the latest commitment, confirm it, and reorg it with the previous commitment.
-       nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id()).unwrap();
+       nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
        check_closed_broadcast(&nodes[0], 1, true);
        check_added_monitors(&nodes[0], 1);
        check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
@@ -783,6 +785,7 @@ fn do_test_retries_own_commitment_broadcast_after_reorg(anchors: bool, revoked_c
 
        // Route a payment so we have an HTLC to claim as well.
        let _ = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+       let error_message = "Channel force-closed";
 
        if revoked_counterparty_commitment {
                // Trigger a fee update such that we advance the state. We will have B broadcast its state
@@ -825,7 +828,7 @@ fn do_test_retries_own_commitment_broadcast_after_reorg(anchors: bool, revoked_c
        };
 
        // B will also broadcast its own commitment.
-       nodes[1].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[0].node.get_our_node_id()).unwrap();
+       nodes[1].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
        check_closed_broadcast(&nodes[1], 1, true);
        check_added_monitors(&nodes[1], 1);
        check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100_000);
index 7f004948ffeb5cb10790673e5fa36680950bef23..f35b2c85cef60e1c63eaa1d2ea409dcbf67e9f85 100644 (file)
@@ -299,11 +299,12 @@ fn expect_channel_shutdown_state_with_force_closure() {
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
+       let error_message = "Channel force-closed";
 
        expect_channel_shutdown_state!(nodes[0], chan_1.2, ChannelShutdownState::NotShuttingDown);
        expect_channel_shutdown_state!(nodes[1], chan_1.2, ChannelShutdownState::NotShuttingDown);
 
-       nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &nodes[0].node.get_our_node_id()).unwrap();
+       nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
 
index 6fd0048daf7ddeec9e3f754c0f368e44572891d4..83b15af3cf9ea0c35d4b295bbf7111375065b37d 100644 (file)
@@ -1006,7 +1006,9 @@ mod tests {
 
                // Force close because cooperative close doesn't result in any persisted
                // updates.
-               nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
+
+               let error_message = "Channel force-closed";
+               nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
 
                check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
                check_closed_broadcast!(nodes[0], true);
@@ -1044,7 +1046,8 @@ mod tests {
                let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
                let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
                let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
-               nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
+               let error_message = "Channel force-closed";
+               nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
                check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000);
                {
                        let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
@@ -1162,7 +1165,8 @@ mod tests {
                        .is_err());
 
                // Force close.
-               nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
+               let error_message = "Channel force-closed";
+               nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
                check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
                check_closed_broadcast!(nodes[0], true);
                check_added_monitors!(nodes[0], 1);