Add ChannelClosed generation at cooperative/force-close/error processing
authorAntoine Riard <dev@ariard.me>
Tue, 21 Sep 2021 16:25:38 +0000 (12:25 -0400)
committerAntoine Riard <dev@ariard.me>
Tue, 21 Sep 2021 19:46:42 +0000 (15:46 -0400)
When we detect a channel `is_shutdown()` or call on it
`force_shutdown()`, we notify the user with a Event::ChannelClosed
informing about the id and closure reason.

lightning-background-processor/src/lib.rs
lightning-persister/src/lib.rs
lightning/src/ln/chanmon_update_fail_tests.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/functional_tests.rs
lightning/src/ln/monitor_tests.rs
lightning/src/ln/reorg_tests.rs
lightning/src/ln/shutdown_tests.rs

index 4e6fb6f02dfdf7a2f43f21d9f176f5f528f270d3..c7d55ae61b7ecdc9ebb586654efd76a0dd650397 100644 (file)
@@ -614,6 +614,7 @@ mod tests {
                        .expect("SpendableOutputs not handled within deadline");
                match event {
                        Event::SpendableOutputs { .. } => {},
+                       Event::ChannelClosed { .. } => {},
                        _ => panic!("Unexpected event: {:?}", event),
                }
 
index d68a06d71b4fe58ec1af2bcb1117df8f1dddd7e5..e0a8fbb9aab28d40f1d9ab9f01a02aa4823e8707 100644 (file)
@@ -182,11 +182,11 @@ mod tests {
        use bitcoin::Txid;
        use lightning::chain::channelmonitor::{Persist, ChannelMonitorUpdateErr};
        use lightning::chain::transaction::OutPoint;
-       use lightning::{check_closed_broadcast, check_added_monitors};
+       use lightning::{check_closed_broadcast, check_closed_event, check_added_monitors};
        use lightning::ln::features::InitFeatures;
        use lightning::ln::functional_test_utils::*;
        use lightning::ln::msgs::ErrorAction;
-       use lightning::util::events::{MessageSendEventsProvider, MessageSendEvent};
+       use lightning::util::events::{ClosureReason, Event, MessageSendEventsProvider, MessageSendEvent};
        use lightning::util::test_utils;
        use std::fs;
        #[cfg(target_os = "windows")]
@@ -259,6 +259,7 @@ mod tests {
                // Force close because cooperative close doesn't result in any persisted
                // updates.
                nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id).unwrap();
+               check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
                check_closed_broadcast!(nodes[0], true);
                check_added_monitors!(nodes[0], 1);
 
@@ -268,6 +269,7 @@ mod tests {
                let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
                connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[0].clone()]});
                check_closed_broadcast!(nodes[1], true);
+               check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
                check_added_monitors!(nodes[1], 1);
 
                // Make sure everything is persisted as expected after close.
@@ -291,6 +293,7 @@ mod tests {
                let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
                let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
                nodes[1].node.force_close_channel(&chan.2).unwrap();
+               check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
                let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
 
                // Set the persister's directory to read-only, which should result in
index 9a5d2eaa90525e1ee205e83c5f736db35c699ece..36f226823945b3a4cc5f0e07485788198b7719d8 100644 (file)
@@ -28,7 +28,7 @@ use ln::msgs::{ChannelMessageHandler, ErrorAction, RoutingMessageHandler};
 use routing::router::get_route;
 use util::config::UserConfig;
 use util::enforcing_trait_impls::EnforcingSigner;
-use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose};
+use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason};
 use util::errors::APIError;
 use util::ser::{ReadableArgs, Writeable};
 use util::test_utils::TestBroadcaster;
@@ -81,6 +81,7 @@ fn do_test_simple_monitor_permanent_update_fail(persister_fail: bool) {
        // PaymentFailed event
 
        assert_eq!(nodes[0].node.list_channels().len(), 0);
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
 }
 
 #[test]
@@ -269,6 +270,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool, persister_fail
        // PaymentFailed event
 
        assert_eq!(nodes[0].node.list_channels().len(), 0);
+       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
 }
 
 #[test]
@@ -1985,6 +1987,8 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf:
 
        send_payment(&nodes[0], &[&nodes[1]], 8000000);
        close_channel(&nodes[0], &nodes[1], &channel_id, funding_tx, true);
+       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
 }
 
 #[test]
@@ -2610,6 +2614,8 @@ fn test_temporary_error_during_shutdown() {
        assert_eq!(txn_a, txn_b);
        assert_eq!(txn_a.len(), 1);
        check_spends!(txn_a[0], funding_tx);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
 }
 
 #[test]
@@ -2630,6 +2636,7 @@ fn test_permanent_error_during_sending_shutdown() {
        assert!(nodes[0].node.close_channel(&channel_id).is_ok());
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 2);
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
 }
 
 #[test]
@@ -2652,6 +2659,7 @@ fn test_permanent_error_during_handling_shutdown() {
        nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &shutdown);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 2);
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
 }
 
 #[test]
index f1d4ffdb0dbb746d214d119e2976d866353106b9..ebffe5c0af988a6b180d5909f90d256bdad37aac 100644 (file)
@@ -52,7 +52,7 @@ use ln::onion_utils;
 use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, OptionalField};
 use chain::keysinterface::{Sign, KeysInterface, KeysManager, InMemorySigner};
 use util::config::UserConfig;
-use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
+use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
 use util::{byte_utils, events};
 use util::ser::{Readable, ReadableArgs, MaybeReadable, Writeable, Writer};
 use util::chacha20::{ChaCha20, ChaChaReader};
@@ -835,6 +835,9 @@ macro_rules! handle_error {
                                                        msg: update
                                                });
                                        }
+                                       if let Some(channel_id) = chan_id {
+                                               $self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id,  reason: ClosureReason::ProcessingError { err: err.err.clone() } });
+                                       }
                                }
 
                                log_error!($self.logger, "{}", err.err);
@@ -1368,6 +1371,12 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                msg: channel_update
                                                        });
                                                }
+                                               if let Ok(mut pending_events_lock) = self.pending_events.lock() {
+                                                       pending_events_lock.push(events::Event::ChannelClosed {
+                                                               channel_id: *channel_id,
+                                                               reason: ClosureReason::HolderForceClosed
+                                                       });
+                                               }
                                        }
                                        break Ok(());
                                },
@@ -1443,7 +1452,9 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                }
        }
 
-       fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: Option<&PublicKey>) -> Result<PublicKey, APIError> {
+       /// `peer_node_id` should be set when we receive a message from a peer, but not set when the
+       /// user closes, which will be re-exposed as the `ChannelClosed` reason.
+       fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: Option<&PublicKey>, peer_msg: Option<&String>) -> Result<PublicKey, APIError> {
                let mut chan = {
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
@@ -1456,6 +1467,14 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                if let Some(short_id) = chan.get().get_short_channel_id() {
                                        channel_state.short_to_id.remove(&short_id);
                                }
+                               let mut pending_events_lock = self.pending_events.lock().unwrap();
+                               if peer_node_id.is_some() {
+                                       if let Some(peer_msg) = peer_msg {
+                                               pending_events_lock.push(events::Event::ChannelClosed { channel_id: *channel_id, reason: ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() } });
+                                       }
+                               } else {
+                                       pending_events_lock.push(events::Event::ChannelClosed { channel_id: *channel_id, reason: ClosureReason::HolderForceClosed });
+                               }
                                chan.remove_entry().1
                        } else {
                                return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()});
@@ -1477,7 +1496,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        /// the chain and rejecting new HTLCs on the given channel. Fails if channel_id is unknown to the manager.
        pub fn force_close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
-               match self.force_close_channel_with_peer(channel_id, None) {
+               match self.force_close_channel_with_peer(channel_id, None, None) {
                        Ok(counterparty_node_id) => {
                                self.channel_state.lock().unwrap().pending_msg_events.push(
                                        events::MessageSendEvent::HandleError {
@@ -2421,6 +2440,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                        if let Some(short_id) = channel.get_short_channel_id() {
                                                                                                channel_state.short_to_id.remove(&short_id);
                                                                                        }
+                                                                                       // ChannelClosed event is generated by handle_error for us.
                                                                                        Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok()))
                                                                                },
                                                                                ChannelError::CloseDelayBroadcast(_) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); }
@@ -3550,6 +3570,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        msg: update
                                });
                        }
+                       self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: msg.channel_id,  reason: ClosureReason::CooperativeClosure });
                }
                Ok(())
        }
@@ -3961,6 +3982,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                msg: update
                                                        });
                                                }
+                                               self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(),  reason: ClosureReason::CommitmentTxConfirmed });
                                                pending_msg_events.push(events::MessageSendEvent::HandleError {
                                                        node_id: chan.get_counterparty_node_id(),
                                                        action: msgs::ErrorAction::SendErrorMessage {
@@ -4022,6 +4044,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        Err(e) => {
                                                let (close_channel, res) = convert_chan_err!(self, e, short_to_id, chan, channel_id);
                                                handle_errors.push((chan.get_counterparty_node_id(), Err(res)));
+                                               // ChannelClosed event is generated by handle_error for us
                                                !close_channel
                                        }
                                }
@@ -4075,6 +4098,13 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                });
                                                        }
 
+                                                       if let Ok(mut pending_events_lock) = self.pending_events.lock() {
+                                                               pending_events_lock.push(events::Event::ChannelClosed {
+                                                                       channel_id: *channel_id,
+                                                                       reason: ClosureReason::CooperativeClosure
+                                                               });
+                                                       }
+
                                                        log_info!(self.logger, "Broadcasting {}", log_tx!(tx));
                                                        self.tx_broadcaster.broadcast_transaction(&tx);
                                                        false
@@ -4495,6 +4525,7 @@ where
                                                        msg: update
                                                });
                                        }
+                                       self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: channel.channel_id(),  reason: ClosureReason::CommitmentTxConfirmed });
                                        pending_msg_events.push(events::MessageSendEvent::HandleError {
                                                node_id: channel.get_counterparty_node_id(),
                                                action: msgs::ErrorAction::SendErrorMessage { msg: e },
@@ -4685,6 +4716,7 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
                                                                msg: update
                                                        });
                                                }
+                                               self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(),  reason: ClosureReason::DisconnectedPeer });
                                                false
                                        } else {
                                                true
@@ -4699,6 +4731,7 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
                                                        if let Some(short_id) = chan.get_short_channel_id() {
                                                                short_to_id.remove(&short_id);
                                                        }
+                                                       self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(),  reason: ClosureReason::DisconnectedPeer });
                                                        return false;
                                                } else {
                                                        no_channels_remain = false;
@@ -4789,12 +4822,12 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
                        for chan in self.list_channels() {
                                if chan.counterparty.node_id == *counterparty_node_id {
                                        // Untrusted messages from peer, we throw away the error if id points to a non-existent channel
-                                       let _ = self.force_close_channel_with_peer(&chan.channel_id, Some(counterparty_node_id));
+                                       let _ = self.force_close_channel_with_peer(&chan.channel_id, Some(counterparty_node_id), Some(&msg.data));
                                }
                        }
                } else {
                        // Untrusted messages from peer, we throw away the error if id points to a non-existent channel
-                       let _ = self.force_close_channel_with_peer(&msg.channel_id, Some(counterparty_node_id));
+                       let _ = self.force_close_channel_with_peer(&msg.channel_id, Some(counterparty_node_id), Some(&msg.data));
                }
        }
 }
@@ -5295,6 +5328,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128));
                let mut by_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
                let mut short_to_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
+               let mut channel_closures = Vec::new();
                for _ in 0..channel_count {
                        let mut channel: Channel<Signer> = Channel::read(reader, &args.keys_manager)?;
                        let funding_txo = channel.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
@@ -5325,6 +5359,10 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                        let (_, mut new_failed_htlcs) = channel.force_shutdown(true);
                                        failed_htlcs.append(&mut new_failed_htlcs);
                                        monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger);
+                                       channel_closures.push(events::Event::ChannelClosed {
+                                               channel_id: channel.channel_id(),
+                                               reason: ClosureReason::OutdatedChannelManager
+                                       });
                                } else {
                                        if let Some(short_channel_id) = channel.get_short_channel_id() {
                                                short_to_id.insert(short_channel_id, channel.channel_id());
@@ -5432,6 +5470,10 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                let mut secp_ctx = Secp256k1::new();
                secp_ctx.seeded_randomize(&args.keys_manager.get_secure_random_bytes());
 
+               if !channel_closures.is_empty() {
+                       pending_events_read.append(&mut channel_closures);
+               }
+
                let channel_manager = ChannelManager {
                        genesis_hash,
                        fee_estimator: args.fee_estimator,
index dd1fedd9733c30b99002771ba7dffd9ccb426cbf..5690c834e73860d37729934c38c507bb648b3299 100644 (file)
@@ -743,16 +743,16 @@ macro_rules! get_closing_signed_broadcast {
 #[macro_export]
 macro_rules! check_closed_broadcast {
        ($node: expr, $with_error_msg: expr) => {{
-               let events = $node.node.get_and_clear_pending_msg_events();
-               assert_eq!(events.len(), if $with_error_msg { 2 } else { 1 });
-               match events[0] {
+               let msg_events = $node.node.get_and_clear_pending_msg_events();
+               assert_eq!(msg_events.len(), if $with_error_msg { 2 } else { 1 });
+               match msg_events[0] {
                        MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
                                assert_eq!(msg.contents.flags & 2, 2);
                        },
                        _ => panic!("Unexpected event"),
                }
                if $with_error_msg {
-                       match events[1] {
+                       match msg_events[1] {
                                MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
                                        // TODO: Check node_id
                                        Some(msg.clone())
@@ -763,6 +763,24 @@ macro_rules! check_closed_broadcast {
        }}
 }
 
+/// Check that a channel's closing channel event has been issued
+#[macro_export]
+macro_rules! check_closed_event {
+       ($node: expr, $events: expr, $reason: expr) => {{
+               let events = $node.node.get_and_clear_pending_events();
+               assert_eq!(events.len(), $events);
+               let expected_reason = $reason;
+               for event in events {
+                       match event {
+                               Event::ChannelClosed { ref reason, .. } => {
+                                       assert_eq!(*reason, expected_reason);
+                               },
+                               _ => panic!("Unexpected event"),
+                       }
+               }
+       }}
+}
+
 pub fn close_channel<'a, 'b, 'c>(outbound_node: &Node<'a, 'b, 'c>, inbound_node: &Node<'a, 'b, 'c>, channel_id: &[u8; 32], funding_tx: Transaction, close_inbound_first: bool) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, Transaction) {
        let (node_a, broadcaster_a, struct_a) = if close_inbound_first { (&inbound_node.node, &inbound_node.tx_broadcaster, inbound_node) } else { (&outbound_node.node, &outbound_node.tx_broadcaster, outbound_node) };
        let (node_b, broadcaster_b, struct_b) = if close_inbound_first { (&outbound_node.node, &outbound_node.tx_broadcaster, outbound_node) } else { (&inbound_node.node, &inbound_node.tx_broadcaster, inbound_node) };
@@ -986,6 +1004,20 @@ macro_rules! expect_pending_htlcs_forwardable {
        }}
 }
 
+#[cfg(test)]
+macro_rules! expect_pending_htlcs_forwardable_from_events {
+       ($node: expr, $events: expr, $ignore: expr) => {{
+               assert_eq!($events.len(), 1);
+               match $events[0] {
+                       Event::PendingHTLCsForwardable { .. } => { },
+                       _ => panic!("Unexpected event"),
+               };
+               if $ignore {
+                       $node.node.process_pending_htlc_forwards();
+               }
+       }}
+}
+
 #[cfg(any(test, feature = "unstable"))]
 macro_rules! expect_payment_received {
        ($node: expr, $expected_payment_hash: expr, $expected_payment_secret: expr, $expected_recv_value: expr) => {
index 85eef456c21103354b9dd90a1dc16947555bc9c6..a7714796af4f07b46fd47e7e355da1db26cb685a 100644 (file)
@@ -30,7 +30,7 @@ use ln::msgs;
 use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
 use util::enforcing_trait_impls::EnforcingSigner;
 use util::{byte_utils, test_utils};
-use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose};
+use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason};
 use util::errors::APIError;
 use util::ser::{Writeable, ReadableArgs};
 use util::config::UserConfig;
@@ -638,6 +638,7 @@ fn test_update_fee_that_funder_cannot_afford() {
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Funding remote cannot afford proposed new fee".to_string(), 1);
        check_added_monitors!(nodes[1], 1);
        check_closed_broadcast!(nodes[1], true);
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") });
 }
 
 #[test]
@@ -738,6 +739,8 @@ fn test_update_fee_with_fundee_update_add_htlc() {
        send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000);
        send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000);
        close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
+       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
 }
 
 #[test]
@@ -850,6 +853,8 @@ fn test_update_fee() {
        assert_eq!(get_feerate!(nodes[0], channel_id), feerate + 30);
        assert_eq!(get_feerate!(nodes[1], channel_id), feerate + 30);
        close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
+       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
 }
 
 #[test]
@@ -977,10 +982,20 @@ fn fake_network_test() {
 
        // Close down the channels...
        close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true);
+       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
        close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure);
        close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true);
+       check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure);
        close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure);
        close_channel(&nodes[1], &nodes[3], &chan_5.2, chan_5.3, false);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure);
 }
 
 #[test]
@@ -1176,6 +1191,7 @@ fn test_duplicate_htlc_different_direction_onchain() {
 
        mine_transaction(&nodes[0], &remote_txn[0]);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
 
        // Check we only broadcast 1 timeout tx
@@ -1457,6 +1473,7 @@ fn test_chan_reserve_violation_inbound_htlc_outbound_channel() {
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert_eq!(err_msg.data, "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value");
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string() });
 }
 
 #[test]
@@ -1583,6 +1600,7 @@ fn test_chan_reserve_violation_inbound_htlc_inbound_chan() {
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote HTLC add would put them under remote reserve value".to_string() });
 }
 
 #[test]
@@ -2039,6 +2057,8 @@ fn channel_monitor_network_test() {
        check_closed_broadcast!(nodes[0], true);
        assert_eq!(nodes[0].node.list_channels().len(), 0);
        assert_eq!(nodes[1].node.list_channels().len(), 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
 
        // One pending HTLC is discarded by the force-close:
        let payment_preimage_1 = route_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 3000000).0;
@@ -2059,6 +2079,8 @@ fn channel_monitor_network_test() {
        check_closed_broadcast!(nodes[2], true);
        assert_eq!(nodes[1].node.list_channels().len(), 0);
        assert_eq!(nodes[2].node.list_channels().len(), 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
+       check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
 
        macro_rules! claim_funds {
                ($node: expr, $prev_node: expr, $preimage: expr) => {
@@ -2101,6 +2123,8 @@ fn channel_monitor_network_test() {
        check_closed_broadcast!(nodes[3], true);
        assert_eq!(nodes[2].node.list_channels().len(), 0);
        assert_eq!(nodes[3].node.list_channels().len(), 1);
+       check_closed_event!(nodes[2], 1, ClosureReason::DisconnectedPeer);
+       check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed);
 
        // Drop the ChannelMonitor for the previous channel to avoid it broadcasting transactions and
        // confusing us in the following tests.
@@ -2172,6 +2196,8 @@ fn channel_monitor_network_test() {
        assert_eq!(nodes[4].node.list_channels().len(), 0);
 
        nodes[3].chain_monitor.chain_monitor.monitors.write().unwrap().insert(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon);
+       check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[4], 1, ClosureReason::CommitmentTxConfirmed);
 }
 
 #[test]
@@ -2221,6 +2247,7 @@ fn test_justice_tx() {
                        node_txn.truncate(1);
                }
                check_added_monitors!(nodes[1], 1);
+               check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
                test_txn_broadcast(&nodes[1], &chan_5, None, HTLCType::NONE);
 
                mine_transaction(&nodes[0], &revoked_local_txn[0]);
@@ -2228,6 +2255,7 @@ fn test_justice_tx() {
                // Verify broadcast of revoked HTLC-timeout
                let node_txn = test_txn_broadcast(&nodes[0], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT);
                check_added_monitors!(nodes[0], 1);
+               check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
                // Broadcast revoked HTLC-timeout on node 1
                mine_transaction(&nodes[1], &node_txn[1]);
                test_revoked_htlc_claim_txn_broadcast(&nodes[1], node_txn[1].clone(), revoked_local_txn[0].clone());
@@ -2269,9 +2297,11 @@ fn test_justice_tx() {
                test_txn_broadcast(&nodes[0], &chan_6, None, HTLCType::NONE);
 
                mine_transaction(&nodes[1], &revoked_local_txn[0]);
+               check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
                let node_txn = test_txn_broadcast(&nodes[1], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::SUCCESS);
                check_added_monitors!(nodes[1], 1);
                mine_transaction(&nodes[0], &node_txn[1]);
+               check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
                test_revoked_htlc_claim_txn_broadcast(&nodes[0], node_txn[1].clone(), revoked_local_txn[0].clone());
        }
        get_announce_close_broadcast_events(&nodes, 0, 1);
@@ -2299,6 +2329,7 @@ fn revoked_output_claim() {
        // Inform nodes[1] that nodes[0] broadcast a stale tx
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 2); // ChannelMonitor: justice tx against revoked to_local output, ChannelManager: local commitment tx
 
@@ -2308,7 +2339,8 @@ fn revoked_output_claim() {
        // Inform nodes[0] that a watchtower cheated on its behalf, so it will force-close the chan
        mine_transaction(&nodes[0], &revoked_local_txn[0]);
        get_announce_close_broadcast_events(&nodes, 0, 1);
-       check_added_monitors!(nodes[0], 1)
+       check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
 }
 
 #[test]
@@ -2345,8 +2377,10 @@ fn claim_htlc_outputs_shared_tx() {
        {
                mine_transaction(&nodes[0], &revoked_local_txn[0]);
                check_added_monitors!(nodes[0], 1);
+               check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
                mine_transaction(&nodes[1], &revoked_local_txn[0]);
                check_added_monitors!(nodes[1], 1);
+               check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
                connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
                expect_payment_failed!(nodes[1], payment_hash_2, true);
 
@@ -2403,7 +2437,13 @@ fn claim_htlc_outputs_single_tx() {
                check_added_monitors!(nodes[0], 1);
                confirm_transaction_at(&nodes[1], &revoked_local_txn[0], 100);
                check_added_monitors!(nodes[1], 1);
-               expect_pending_htlcs_forwardable_ignore!(nodes[0]);
+               check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+               let mut events = nodes[0].node.get_and_clear_pending_events();
+               expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
+               match events[1] {
+                       Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
+                       _ => panic!("Unexpected event"),
+               }
 
                connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
                expect_payment_failed!(nodes[1], payment_hash_2, true);
@@ -2501,6 +2541,7 @@ fn test_htlc_on_chain_success() {
        mine_transaction(&nodes[2], &commitment_tx[0]);
        check_closed_broadcast!(nodes[2], true);
        check_added_monitors!(nodes[2], 1);
+       check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
        let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 3 (commitment tx, 2*htlc-success tx), ChannelMonitor : 2 (2 * HTLC-Success tx)
        assert_eq!(node_txn.len(), 5);
        assert_eq!(node_txn[0], node_txn[3]);
@@ -2526,11 +2567,15 @@ fn test_htlc_on_chain_success() {
                added_monitors.clear();
        }
        let forwarded_events = nodes[1].node.get_and_clear_pending_events();
-       assert_eq!(forwarded_events.len(), 2);
-       if let Event::PaymentForwarded { fee_earned_msat: Some(1000), claim_from_onchain_tx: true } = forwarded_events[0] {
-               } else { panic!(); }
+       assert_eq!(forwarded_events.len(), 3);
+       match forwarded_events[0] {
+               Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
+               _ => panic!("Unexpected event"),
+       }
        if let Event::PaymentForwarded { fee_earned_msat: Some(1000), claim_from_onchain_tx: true } = forwarded_events[1] {
                } else { panic!(); }
+       if let Event::PaymentForwarded { fee_earned_msat: Some(1000), claim_from_onchain_tx: true } = forwarded_events[2] {
+               } else { panic!(); }
        let events = nodes[1].node.get_and_clear_pending_msg_events();
        {
                let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
@@ -2597,6 +2642,7 @@ fn test_htlc_on_chain_success() {
        mine_transaction(&nodes[1], &node_a_commitment_tx[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
        assert_eq!(node_txn.len(), 6); // ChannelManager : 3 (commitment tx + HTLC-Sucess * 2), ChannelMonitor : 3 (HTLC-Success, 2* RBF bumps of above HTLC txn)
        let commitment_spend =
@@ -2632,7 +2678,7 @@ fn test_htlc_on_chain_success() {
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
        let events = nodes[0].node.get_and_clear_pending_events();
-       assert_eq!(events.len(), 2);
+       assert_eq!(events.len(), 3);
        let mut first_claimed = false;
        for event in events {
                match event {
@@ -2644,6 +2690,7 @@ fn test_htlc_on_chain_success() {
                                        assert_eq!(payment_preimage, our_payment_preimage_2);
                                }
                        },
+                       Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {},
                        _ => panic!("Unexpected event"),
                }
        }
@@ -2700,6 +2747,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) {
        mine_transaction(&nodes[2], &commitment_tx[0]);
        check_closed_broadcast!(nodes[2], true);
        check_added_monitors!(nodes[2], 1);
+       check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
        let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 (commitment tx)
        assert_eq!(node_txn.len(), 1);
        check_spends!(node_txn[0], chan_2.3);
@@ -2709,6 +2757,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) {
        // Verify that B's ChannelManager is able to detect that HTLC is timeout by its own tx and react backward in consequence
        connect_blocks(&nodes[1], 200 - nodes[2].best_block_info().1);
        mine_transaction(&nodes[1], &commitment_tx[0]);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        let timeout_tx;
        {
                let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
@@ -2776,6 +2825,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) {
 
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 commitment tx, ChannelMonitor : 1 timeout tx
        assert_eq!(node_txn.len(), 2);
        check_spends!(node_txn[0], chan_1.3);
@@ -2814,6 +2864,7 @@ fn test_simple_commitment_revoked_fail_backward() {
        let (_, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
 
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
        check_added_monitors!(nodes[1], 1);
        check_closed_broadcast!(nodes[1], true);
@@ -2963,15 +3014,19 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
 
        let events = nodes[1].node.get_and_clear_pending_events();
-       assert_eq!(events.len(), if deliver_bs_raa { 1 } else { 2 });
+       assert_eq!(events.len(), if deliver_bs_raa { 2 } else { 3 });
        match events[0] {
+               Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => { },
+               _ => panic!("Unexepected event"),
+       }
+       match events[1] {
                Event::PaymentFailed { ref payment_hash, .. } => {
                        assert_eq!(*payment_hash, fourth_payment_hash);
                },
                _ => panic!("Unexpected event"),
        }
        if !deliver_bs_raa {
-               match events[1] {
+               match events[2] {
                        Event::PendingHTLCsForwardable { .. } => { },
                        _ => panic!("Unexpected event"),
                };
@@ -3131,9 +3186,21 @@ fn fail_backward_pending_htlc_upon_channel_failure() {
                };
                nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &update_add_htlc);
        }
-
+       let events = nodes[0].node.get_and_clear_pending_events();
+       assert_eq!(events.len(), 2);
        // Check that Alice fails backward the pending HTLC from the second payment.
-       expect_payment_failed!(nodes[0], failed_payment_hash, true);
+       match events[0] {
+               Event::PaymentFailed { payment_hash, .. } => {
+                       assert_eq!(payment_hash, failed_payment_hash);
+               },
+               _ => panic!("Unexpected event"),
+       }
+       match events[1] {
+               Event::ChannelClosed { reason: ClosureReason::ProcessingError { ref err }, .. } => {
+                       assert_eq!(err, "Remote side tried to send a 0-msat HTLC");
+               },
+               _ => panic!("Unexpected event {:?}", events[1]),
+       }
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
 }
@@ -3153,6 +3220,7 @@ fn test_htlc_ignore_latest_remote_commitment() {
        connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
 
        let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 3);
@@ -3162,6 +3230,7 @@ fn test_htlc_ignore_latest_remote_commitment() {
        connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[1].clone()]});
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
 
        // Duplicate the connect_block call since this may happen due to other listeners
        // registering new transactions
@@ -3216,6 +3285,7 @@ fn test_force_close_fail_back() {
        nodes[2].node.force_close_channel(&payment_event.commitment_msg.channel_id).unwrap();
        check_closed_broadcast!(nodes[2], true);
        check_added_monitors!(nodes[2], 1);
+       check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed);
        let tx = {
                let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
                // Note that we don't bother broadcasting the HTLC-Success transaction here as we don't
@@ -3230,6 +3300,7 @@ fn test_force_close_fail_back() {
        // Note no UpdateHTLCs event here from nodes[1] to nodes[0]!
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
 
        // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success..
        {
@@ -4143,6 +4214,7 @@ fn test_dup_htlc_onchain_fails_on_reload() {
        nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id).unwrap();
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
 
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
@@ -4160,6 +4232,7 @@ fn test_dup_htlc_onchain_fails_on_reload() {
        connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[1].clone(), node_txn[2].clone()]});
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        let claim_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
 
        header.prev_blockhash = nodes[0].best_block_hash();
@@ -4504,6 +4577,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
                check_added_monitors!(nodes[0], 1);
        }
        nodes[0].node = &nodes_0_deserialized;
+       check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager);
 
        // nodes[1] and nodes[2] have no lost state with nodes[0]...
        reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
@@ -4567,6 +4641,7 @@ fn test_claim_sizeable_push_msat() {
        nodes[1].node.force_close_channel(&chan.2).unwrap();
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 1);
        check_spends!(node_txn[0], chan.3);
@@ -4595,6 +4670,7 @@ fn test_claim_on_remote_sizeable_push_msat() {
        nodes[0].node.force_close_channel(&chan.2).unwrap();
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
 
        let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 1);
@@ -4604,6 +4680,7 @@ fn test_claim_on_remote_sizeable_push_msat() {
        mine_transaction(&nodes[1], &node_txn[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
 
        let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
@@ -4631,6 +4708,7 @@ fn test_claim_on_remote_revoked_sizeable_push_msat() {
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
 
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        mine_transaction(&nodes[1], &node_txn[0]);
@@ -4683,6 +4761,7 @@ fn test_static_spendable_outputs_preimage_tx() {
        check_spends!(node_txn[2], node_txn[1]);
 
        mine_transaction(&nodes[1], &node_txn[0]);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
 
        let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
@@ -4727,6 +4806,7 @@ fn test_static_spendable_outputs_timeout_tx() {
        assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
 
        mine_transaction(&nodes[1], &node_txn[1]);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
        expect_payment_failed!(nodes[1], our_payment_hash, true);
 
@@ -4757,6 +4837,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() {
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
 
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 2);
@@ -4793,6 +4874,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() {
        mine_transaction(&nodes[0], &revoked_local_txn[0]);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
 
        let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
@@ -4808,6 +4890,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() {
        connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[1].clone()] });
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
 
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 3); // ChannelMonitor: bogus justice tx, justice tx on revoked outputs, ChannelManager: local commitment tx
@@ -4864,6 +4947,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() {
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
 
        assert_eq!(revoked_htlc_txn.len(), 2);
@@ -4880,6 +4964,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() {
        connect_block(&nodes[0], &Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] });
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
 
        let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 3); // ChannelMonitor: justice tx on revoked commitment, justice tx on revoked HTLC-success, ChannelManager: local commitment tx
@@ -4960,6 +5045,7 @@ fn test_onchain_to_onchain_claim() {
        mine_transaction(&nodes[2], &commitment_tx[0]);
        check_closed_broadcast!(nodes[2], true);
        check_added_monitors!(nodes[2], 1);
+       check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
 
        let c_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 2 (commitment tx, HTLC-Success tx), ChannelMonitor : 1 (HTLC-Success tx)
        assert_eq!(c_txn.len(), 3);
@@ -4976,7 +5062,19 @@ fn test_onchain_to_onchain_claim() {
        let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
        connect_block(&nodes[1], &Block { header, txdata: vec![c_txn[1].clone(), c_txn[2].clone()]});
        check_added_monitors!(nodes[1], 1);
-       expect_payment_forwarded!(nodes[1], Some(1000), true);
+       let events = nodes[1].node.get_and_clear_pending_events();
+       assert_eq!(events.len(), 2);
+       match events[0] {
+               Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
+               _ => panic!("Unexpected event"),
+       }
+       match events[1] {
+               Event::PaymentForwarded { fee_earned_msat, claim_from_onchain_tx } => {
+                       assert_eq!(fee_earned_msat, Some(1000));
+                       assert_eq!(claim_from_onchain_tx, true);
+               },
+               _ => panic!("Unexpected event"),
+       }
        {
                let mut b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
                // ChannelMonitor: claim tx
@@ -4984,9 +5082,9 @@ fn test_onchain_to_onchain_claim() {
                check_spends!(b_txn[0], chan_2.3); // B local commitment tx, issued by ChannelManager
                b_txn.clear();
        }
+       check_added_monitors!(nodes[1], 1);
        let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(msg_events.len(), 3);
-       check_added_monitors!(nodes[1], 1);
        match msg_events[0] {
                MessageSendEvent::BroadcastChannelUpdate { .. } => {},
                _ => panic!("Unexpected event"),
@@ -5008,6 +5106,7 @@ fn test_onchain_to_onchain_claim() {
        // Broadcast A's commitment tx on B's chain to see if we are able to claim inbound HTLC with our HTLC-Success tx
        let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
        mine_transaction(&nodes[1], &commitment_tx[0]);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        let b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        // ChannelMonitor: HTLC-Success tx, ChannelManager: local commitment tx + HTLC-Success tx
        assert_eq!(b_txn.len(), 3);
@@ -5065,6 +5164,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() {
        mine_transaction(&nodes[1], &commitment_txn[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[1], TEST_FINAL_CLTV - 40 + MIN_CLTV_EXPIRY_DELTA as u32 - 1); // Confirm blocks until the HTLC expires
 
        let htlc_timeout_tx;
@@ -5091,6 +5191,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() {
        nodes[2].node.claim_funds(our_payment_preimage);
        mine_transaction(&nodes[2], &commitment_txn[0]);
        check_added_monitors!(nodes[2], 2);
+       check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
        let events = nodes[2].node.get_and_clear_pending_msg_events();
        match events[0] {
                MessageSendEvent::UpdateHTLCs { .. } => {},
@@ -5178,6 +5279,7 @@ fn test_dynamic_spendable_outputs_local_htlc_success_tx() {
        check_added_monitors!(nodes[1], 1);
        mine_transaction(&nodes[1], &local_txn[0]);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        let events = nodes[1].node.get_and_clear_pending_msg_events();
        match events[0] {
                MessageSendEvent::UpdateHTLCs { .. } => {},
@@ -5348,9 +5450,26 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
        } else {
                mine_transaction(&nodes[2], &ds_prev_commitment_tx[0]);
        }
+       let events = nodes[2].node.get_and_clear_pending_events();
+       let close_event = if deliver_last_raa {
+               assert_eq!(events.len(), 2);
+               events[1].clone()
+       } else {
+               assert_eq!(events.len(), 1);
+               events[0].clone()
+       };
+       match close_event {
+               Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
+               _ => panic!("Unexpected event"),
+       }
+
        connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1);
        check_closed_broadcast!(nodes[2], true);
-       expect_pending_htlcs_forwardable!(nodes[2]);
+       if deliver_last_raa {
+               expect_pending_htlcs_forwardable_from_events!(nodes[2], events[0..1], true);
+       } else {
+               expect_pending_htlcs_forwardable!(nodes[2]);
+       }
        check_added_monitors!(nodes[2], 3);
 
        let cs_msgs = nodes[2].node.get_and_clear_pending_msg_events();
@@ -5487,6 +5606,7 @@ fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() {
        mine_transaction(&nodes[0], &local_txn[0]);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
 
        let htlc_timeout = {
@@ -5570,6 +5690,7 @@ fn test_key_derivation_params() {
        connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
 
        let htlc_timeout = {
                let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
@@ -5610,6 +5731,7 @@ fn test_static_output_closing_tx() {
        let closing_tx = close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true).2;
 
        mine_transaction(&nodes[0], &closing_tx);
+       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
        connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
 
        let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
@@ -5617,6 +5739,7 @@ fn test_static_output_closing_tx() {
        check_spends!(spend_txn[0], closing_tx);
 
        mine_transaction(&nodes[1], &closing_tx);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
 
        let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
@@ -5667,6 +5790,7 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) {
        test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS });
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
 }
 
 fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
@@ -5699,6 +5823,7 @@ fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
        test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
 }
 
 fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) {
@@ -5747,6 +5872,7 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no
                test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
                check_closed_broadcast!(nodes[0], true);
                check_added_monitors!(nodes[0], 1);
+               check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        } else {
                expect_payment_failed!(nodes[0], our_payment_hash, true);
        }
@@ -6286,6 +6412,7 @@ fn test_update_add_htlc_bolt2_receiver_zero_value_msat() {
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Remote side tried to send a 0-msat HTLC".to_string(), 1);
        check_closed_broadcast!(nodes[1], true).unwrap();
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string() });
 }
 
 #[test]
@@ -6413,6 +6540,7 @@ fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() {
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert!(regex::Regex::new(r"Remote side tried to send less than our minimum HTLC value\. Lower limit: \(\d+\)\. Actual: \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
 }
 
 #[test]
@@ -6449,6 +6577,7 @@ fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() {
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
 }
 
 #[test]
@@ -6493,6 +6622,7 @@ fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() {
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert!(regex::Regex::new(r"Remote tried to push more than our max accepted HTLCs \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
 }
 
 #[test]
@@ -6518,6 +6648,7 @@ fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() {
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert!(regex::Regex::new("Remote HTLC add would put them over our max HTLC value").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
 }
 
 #[test]
@@ -6543,6 +6674,7 @@ fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() {
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert_eq!(err_msg.data,"Remote provided CLTV expiry in seconds instead of block height");
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
 }
 
 #[test]
@@ -6592,6 +6724,7 @@ fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() {
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert!(regex::Regex::new(r"Remote skipped HTLC ID \(skipped ID: \d+\)").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
 }
 
 #[test]
@@ -6625,6 +6758,7 @@ fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() {
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
 }
 
 #[test]
@@ -6658,6 +6792,7 @@ fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() {
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
 }
 
 #[test]
@@ -6691,6 +6826,7 @@ fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitment()
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
 }
 
 #[test]
@@ -6732,6 +6868,7 @@ fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() {
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert_eq!(err_msg.data, "Remote tried to fulfill/fail an HTLC we couldn't find");
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
 }
 
 #[test]
@@ -6773,6 +6910,7 @@ fn test_update_fulfill_htlc_bolt2_wrong_preimage() {
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert!(regex::Regex::new(r"Remote tried to fulfill HTLC \(\d+\) with an incorrect preimage").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
 }
 
 #[test]
@@ -6821,6 +6959,7 @@ fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_messag
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert_eq!(err_msg.data, "Got update_fail_malformed_htlc with BADONION not set");
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
 }
 
 #[test]
@@ -6963,6 +7102,7 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) {
 
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
 
        assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
        connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
@@ -7023,6 +7163,7 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
        if local {
                // We fail dust-HTLC 1 by broadcast of local commitment tx
                mine_transaction(&nodes[0], &as_commitment_tx[0]);
+               check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
                connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
                expect_payment_failed!(nodes[0], dust_hash, true);
 
@@ -7042,6 +7183,7 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
                mine_transaction(&nodes[0], &bs_commitment_tx[0]);
                check_closed_broadcast!(nodes[0], true);
                check_added_monitors!(nodes[0], 1);
+               check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
                assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
                connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
                timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[1].clone());
@@ -7123,14 +7265,17 @@ fn test_user_configurable_csv_delay() {
        let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
        accept_channel.to_self_delay = 200;
        nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_channel);
+       let reason_msg;
        if let MessageSendEvent::HandleError { ref action, .. } = nodes[0].node.get_and_clear_pending_msg_events()[0] {
                match action {
                        &ErrorAction::SendErrorMessage { ref msg } => {
                                assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(msg.data.as_str()));
+                               reason_msg = msg.data.clone();
                        },
-                       _ => { assert!(false); }
+                       _ => { panic!(); }
                }
-       } else { assert!(false); }
+       } else { panic!(); }
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: reason_msg });
 
        // We test msg.to_self_delay <= config.their_to_self_delay is enforced in Channel::new_from_req()
        nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
@@ -7243,10 +7388,10 @@ fn test_data_loss_protect() {
 
        // Check we close channel detecting A is fallen-behind
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Peer attempted to reestablish channel with a very old local commitment transaction".to_string() });
        assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Peer attempted to reestablish channel with a very old local commitment transaction");
        check_added_monitors!(nodes[1], 1);
 
-
        // Check A is able to claim to_remote output
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
        assert_eq!(node_txn.len(), 1);
@@ -7254,6 +7399,7 @@ fn test_data_loss_protect() {
        assert_eq!(node_txn[0].output.len(), 2);
        mine_transaction(&nodes[0], &node_txn[0]);
        connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can\'t do any automated broadcasting".to_string() });
        let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
        assert_eq!(spend_txn.len(), 1);
        check_spends!(spend_txn[0], node_txn[0]);
@@ -7696,6 +7842,7 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
        connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone()] });
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[1], 49); // Confirm blocks until the HTLC expires (note CLTV was explicitly 50 above)
 
        let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
@@ -7717,7 +7864,12 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
        connect_block(&nodes[0], &Block { header: header_11, txdata: vec![revoked_local_txn[0].clone()] });
        let header_129 = BlockHeader { version: 0x20000000, prev_blockhash: header_11.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
        connect_block(&nodes[0], &Block { header: header_129, txdata: vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[2].clone()] });
-       expect_pending_htlcs_forwardable_ignore!(nodes[0]);
+       let events = nodes[0].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
+       match events[1] {
+               Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
+               _ => panic!("Unexpected event"),
+       }
        let first;
        let feerate_1;
        let penalty_txn;
@@ -7963,6 +8115,7 @@ fn test_counterparty_raa_skip_no_crash() {
                &msgs::RevokeAndACK { channel_id, per_commitment_secret, next_per_commitment_point });
        assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Received an unexpected revoke_and_ack");
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Received an unexpected revoke_and_ack".to_string() });
 }
 
 #[test]
@@ -7995,6 +8148,7 @@ fn test_bump_txn_sanitize_tracking_maps() {
        mine_transaction(&nodes[0], &revoked_local_txn[0]);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        let penalty_txn = {
                let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
                assert_eq!(node_txn.len(), 4); //ChannelMonitor: justice txn * 3, ChannelManager: local commitment tx
@@ -8478,6 +8632,7 @@ fn test_pre_lockin_no_chan_closed_update() {
        let channel_id = ::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
        nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id, data: "Hi".to_owned() });
        assert!(nodes[0].chain_monitor.added_monitors.lock().unwrap().is_empty());
+       check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "Hi".to_string() });
 }
 
 #[test]
@@ -8512,6 +8667,7 @@ fn test_htlc_no_detection() {
        chain::Listen::block_connected(&nodes[0].chain_monitor.chain_monitor, &Block { header, txdata: vec![local_txn[0].clone()] }, nodes[0].best_block_info().1 + 1);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1);
 
        let htlc_timeout = {
@@ -8571,6 +8727,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
        nodes[force_closing_node].node.force_close_channel(&chan_ab.2).unwrap();
        check_closed_broadcast!(nodes[force_closing_node], true);
        check_added_monitors!(nodes[force_closing_node], 1);
+       check_closed_event!(nodes[force_closing_node], 1, ClosureReason::HolderForceClosed);
        if go_onchain_before_fulfill {
                let txn_to_broadcast = match broadcast_alice {
                        true => alice_txn.clone(),
@@ -8582,6 +8739,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
                if broadcast_alice {
                        check_closed_broadcast!(nodes[1], true);
                        check_added_monitors!(nodes[1], 1);
+                       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
                }
                assert_eq!(bob_txn.len(), 1);
                check_spends!(bob_txn[0], chan_ab.3);
@@ -8662,6 +8820,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
                if broadcast_alice {
                        check_closed_broadcast!(nodes[1], true);
                        check_added_monitors!(nodes[1], 1);
+                       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
                }
                let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
                if broadcast_alice {
@@ -8877,6 +9036,7 @@ fn test_error_chans_closed() {
        nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_2.2, data: "ERR".to_owned() });
        check_added_monitors!(nodes[0], 1);
        check_closed_broadcast!(nodes[0], false);
+       check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "ERR".to_string() });
        assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
        assert_eq!(nodes[0].node.list_usable_channels().len(), 2);
        assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_1.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_1.2);
@@ -8886,6 +9046,7 @@ fn test_error_chans_closed() {
        let _chan_4 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known());
        nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: [0; 32], data: "ERR".to_owned() });
        check_added_monitors!(nodes[0], 2);
+       check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: "ERR".to_string() });
        let events = nodes[0].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 2);
        match events[0] {
@@ -8950,6 +9111,7 @@ fn test_invalid_funding_tx() {
        nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
 
        confirm_transaction_at(&nodes[1], &tx, 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        check_added_monitors!(nodes[1], 1);
        let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(events_2.len(), 1);
@@ -8993,6 +9155,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t
 
        nodes[1].node.force_close_channel(&channel_id).unwrap();
        check_closed_broadcast!(nodes[1], true);
+       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
        check_added_monitors!(nodes[1], 1);
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
        assert_eq!(node_txn.len(), 1);
index c621f4eba22327b36602526c70d234f5b6426b72..08cd2306de865970d70afa8bc72b85b4d0909ac6 100644 (file)
@@ -15,7 +15,7 @@ use ln::{channel, PaymentPreimage, PaymentHash};
 use ln::channelmanager::BREAKDOWN_TIMEOUT;
 use ln::features::InitFeatures;
 use ln::msgs::{ChannelMessageHandler, ErrorAction};
-use util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
+use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
 use routing::network_graph::NetworkUpdate;
 use routing::router::get_route;
 
@@ -74,6 +74,7 @@ fn chanmon_fail_from_stale_commitment() {
        mine_transaction(&nodes[1], &bs_txn[0]);
        check_added_monitors!(nodes[1], 1);
        check_closed_broadcast!(nodes[1], true);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
@@ -175,6 +176,8 @@ fn chanmon_claim_value_coop_close() {
                        Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, &Secp256k1::new()).unwrap();
                check_spends!(spend_tx, shutdown_tx[0]);
        }
+       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
 }
 
 fn sorted_vec<T: Ord>(mut v: Vec<T>) -> Vec<T> {
@@ -314,9 +317,11 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) {
        assert!(nodes[0].node.list_channels().is_empty());
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        assert!(nodes[1].node.list_channels().is_empty());
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
        assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
 
index 0db30b8178cdc6bb8dc6113168d93d56ecb7f762..aca67c8f9f0ebe40980777f0c24f14799b742660 100644 (file)
@@ -17,7 +17,7 @@ use ln::features::InitFeatures;
 use ln::msgs::{ChannelMessageHandler, ErrorAction};
 use routing::network_graph::NetworkUpdate;
 use util::enforcing_trait_impls::EnforcingSigner;
-use util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
+use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
 use util::test_utils;
 use util::ser::{ReadableArgs, Writeable};
 
@@ -81,6 +81,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
                connect_block(&nodes[2], &Block { header, txdata: node_1_commitment_txn.clone() });
                check_added_monitors!(nodes[2], 1);
                check_closed_broadcast!(nodes[2], true); // We should get a BroadcastChannelUpdate (and *only* a BroadcstChannelUpdate)
+               check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
                let node_2_commitment_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
                assert_eq!(node_2_commitment_txn.len(), 3); // ChannelMonitor: 1 offered HTLC-Claim, ChannelManger: 1 local commitment tx, 1 Received HTLC-Claim
                assert_eq!(node_2_commitment_txn[1].output.len(), 2); // to-remote and Received HTLC (to-self is dust)
@@ -122,6 +123,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
        };
        check_added_monitors!(nodes[1], 1);
        check_closed_broadcast!(nodes[1], true); // We should get a BroadcastChannelUpdate (and *only* a BroadcstChannelUpdate)
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        // Connect ANTI_REORG_DELAY - 2 blocks, giving us a confirmation count of ANTI_REORG_DELAY - 1.
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 2);
        check_added_monitors!(nodes[1], 0);
@@ -298,6 +300,12 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_
        *nodes[0].chain_monitor.expect_channel_force_closed.lock().unwrap() = Some((chan.2, true));
        nodes[0].node.test_process_background_events(); // Required to free the pending background monitor update
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+       if connect_style == ConnectStyle::FullBlockViaListen && !use_funding_unconfirmed {
+               check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "Funding transaction was un-confirmed. Locked at 6 confs, now have 2 confs.".to_string() });
+       } else {
+               check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs.".to_string() });
+       }
        assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
        nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
 
@@ -369,6 +377,7 @@ fn test_set_outpoints_partial_claiming() {
        // Connect blocks on node A commitment transaction
        mine_transaction(&nodes[0], &remote_txn[0]);
        check_closed_broadcast!(nodes[0], true);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        check_added_monitors!(nodes[0], 1);
        // Verify node A broadcast tx claiming both HTLCs
        {
@@ -386,6 +395,7 @@ fn test_set_outpoints_partial_claiming() {
        // Connect blocks on node B
        connect_blocks(&nodes[1], 135);
        check_closed_broadcast!(nodes[1], true);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        check_added_monitors!(nodes[1], 1);
        // Verify node B broadcast 2 HTLC-timeout txn
        let partial_claim_tx = {
@@ -460,9 +470,11 @@ fn do_test_to_remote_after_local_detection(style: ConnectStyle) {
        assert!(nodes[0].node.list_channels().is_empty());
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        assert!(nodes[1].node.list_channels().is_empty());
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
 
        // Drop transactions broadcasted in response to the first commitment transaction (we have good
        // test coverage of these things already elsewhere).
index 26d39fbad9df08a4d54b507a9ad4126dbdd9db8c..d13b7d2df12fece9a5d78092a21caa73e816451b 100644 (file)
@@ -21,7 +21,7 @@ use ln::msgs::{ChannelMessageHandler, ErrorAction};
 use ln::script::ShutdownScript;
 use util::test_utils;
 use util::test_utils::OnGetShutdownScriptpubkey;
-use util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
+use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
 use util::errors::APIError;
 use util::config::UserConfig;
 
@@ -67,6 +67,8 @@ fn pre_funding_lock_shutdown_test() {
 
        assert!(nodes[0].node.list_channels().is_empty());
        assert!(nodes[1].node.list_channels().is_empty());
+       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
 }
 
 #[test]
@@ -139,6 +141,8 @@ fn updates_shutdown_wait() {
        nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.unwrap());
        let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
        assert!(node_1_none.is_none());
+       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
 
        assert!(nodes[0].node.list_channels().is_empty());
 
@@ -147,6 +151,8 @@ fn updates_shutdown_wait() {
        close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true);
        assert!(nodes[1].node.list_channels().is_empty());
        assert!(nodes[2].node.list_channels().is_empty());
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure);
 }
 
 #[test]
@@ -221,6 +227,9 @@ fn htlc_fail_async_shutdown() {
        close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true);
        assert!(nodes[1].node.list_channels().is_empty());
        assert!(nodes[2].node.list_channels().is_empty());
+       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[1], 2, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure);
 }
 
 fn do_test_shutdown_rebroadcast(recv_count: u8) {
@@ -359,6 +368,7 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) {
                nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.unwrap());
                let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
                assert!(node_1_none.is_none());
+               check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
        } else {
                // If one node, however, received + responded with an identical closing_signed we end
                // up erroring and node[0] will try to broadcast its own latest commitment transaction.
@@ -387,6 +397,7 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) {
                // closing_signed so we do it ourselves
                check_closed_broadcast!(nodes[1], false);
                check_added_monitors!(nodes[1], 1);
+               check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "Failed to find corresponding channel".to_string() });
        }
 
        assert!(nodes[0].node.list_channels().is_empty());
@@ -396,6 +407,9 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) {
        close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true);
        assert!(nodes[1].node.list_channels().is_empty());
        assert!(nodes[2].node.list_channels().is_empty());
+       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure);
 }
 
 #[test]
@@ -428,7 +442,8 @@ fn test_upfront_shutdown_script() {
        node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh();
        // Test we enforce upfront_scriptpbukey if by providing a diffrent one at closing that  we disconnect peer
        nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
-    assert!(regex::Regex::new(r"Got shutdown request with a scriptpubkey \([A-Fa-f0-9]+\) which did not match their previous scriptpubkey.").unwrap().is_match(check_closed_broadcast!(nodes[2], true).unwrap().data.as_str()));
+       assert!(regex::Regex::new(r"Got shutdown request with a scriptpubkey \([A-Fa-f0-9]+\) which did not match their previous scriptpubkey.").unwrap().is_match(check_closed_broadcast!(nodes[2], true).unwrap().data.as_str()));
+       check_closed_event!(nodes[2], 1, ClosureReason::ProcessingError { err: "Got shutdown request with a scriptpubkey (a91441c98a140039816273e50db317422c11c2bfcc8887) which did not match their previous scriptpubkey.".to_string() });
        check_added_monitors!(nodes[2], 1);
 
        // We test that in case of peer committing upfront to a script, if it doesn't change at closing, we sign
@@ -538,6 +553,7 @@ fn test_unsupported_anysegwit_upfront_shutdown_script() {
                },
                _ => panic!("Unexpected event"),
        }
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: Script(OP_PUSHNUM_16 OP_PUSHBYTES_2 0028)".to_string() });
 }
 
 #[test]
@@ -685,6 +701,7 @@ fn test_unsupported_anysegwit_shutdown_script() {
                _ => panic!("Unexpected event"),
        }
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Got a nonstandard scriptpubkey (60020028) from remote peer".to_string() });
 }
 
 #[test]
@@ -720,6 +737,7 @@ fn test_invalid_shutdown_script() {
                _ => panic!("Unexpected event"),
        }
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Got a nonstandard scriptpubkey (00020000) from remote peer".to_string() });
 }
 
 #[derive(PartialEq)]
@@ -785,7 +803,9 @@ fn do_test_closing_signed_reinit_timeout(timeout_step: TimeoutStep) {
                let node_0_2nd_closing_signed = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
                if timeout_step == TimeoutStep::NoTimeout {
                        nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.1.unwrap());
+                       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
                }
+               check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
        }
 
        if timeout_step != TimeoutStep::NoTimeout {
@@ -808,6 +828,7 @@ fn do_test_closing_signed_reinit_timeout(timeout_step: TimeoutStep) {
                         txn[0].output[0].script_pubkey.is_v0_p2wsh()));
                check_closed_broadcast!(nodes[1], true);
                check_added_monitors!(nodes[1], 1);
+               check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "closing_signed negotiation failed to finish within two timer ticks".to_string() });
        } else {
                assert!(txn[0].output[0].script_pubkey.is_v0_p2wpkh());
                assert!(txn[0].output[1].script_pubkey.is_v0_p2wpkh());
@@ -867,6 +888,8 @@ fn do_simple_legacy_shutdown_test(high_initiator_fee: bool) {
        nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap());
        let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
        assert!(node_0_none.is_none());
+       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
 }
 
 #[test]
@@ -920,4 +943,6 @@ fn simple_target_feerate_shutdown() {
        nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed);
        let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
        assert!(node_0_none.is_none());
+       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
 }