Merge pull request #254 from TheBlueMatt/2018-11-channelerror
[rust-lightning] / src / ln / channelmanager.rs
index 2fe0ec3837c72107ac26cf6128e53587318945b6..d547ef3f06734389a6afd9026fcbaa35f09068e5 100644 (file)
@@ -404,6 +404,38 @@ pub struct ChannelDetails {
        pub user_id: u64,
 }
 
+macro_rules! handle_error {
+       ($self: ident, $internal: expr, $their_node_id: expr) => {
+               match $internal {
+                       Ok(msg) => Ok(msg),
+                       Err(MsgHandleErrInternal { err, needs_channel_force_close }) => {
+                               if needs_channel_force_close {
+                                       match &err.action {
+                                               &Some(msgs::ErrorAction::DisconnectPeer { msg: Some(ref msg) }) => {
+                                                       if msg.channel_id == [0; 32] {
+                                                               $self.peer_disconnected(&$their_node_id, true);
+                                                       } else {
+                                                               $self.force_close_channel(&msg.channel_id);
+                                                       }
+                                               },
+                                               &Some(msgs::ErrorAction::DisconnectPeer { msg: None }) => {},
+                                               &Some(msgs::ErrorAction::IgnoreError) => {},
+                                               &Some(msgs::ErrorAction::SendErrorMessage { ref msg }) => {
+                                                       if msg.channel_id == [0; 32] {
+                                                               $self.peer_disconnected(&$their_node_id, true);
+                                                       } else {
+                                                               $self.force_close_channel(&msg.channel_id);
+                                                       }
+                                               },
+                                               &None => {},
+                                       }
+                               }
+                               Err(err)
+                       },
+               }
+       }
+}
+
 impl ChannelManager {
        /// Constructs a new ChannelManager to hold several channels and route between them.
        ///
@@ -1203,7 +1235,17 @@ impl ChannelManager {
                                route: route.clone(),
                                session_priv: session_priv.clone(),
                                first_hop_htlc_msat: htlc_msat,
-                       }, onion_packet).map_err(|he| APIError::ChannelUnavailable{err: he.err})?
+                       }, onion_packet).map_err(|he|
+                               match he {
+                                       ChannelError::Close(err) => {
+                                               // TODO: We need to close the channel here, but for that to be safe we have
+                                               // to do all channel closure inside the channel_state lock which is a
+                                               // somewhat-larger refactor, so we leave that for later.
+                                               APIError::ChannelUnavailable { err }
+                                       },
+                                       ChannelError::Ignore(err) => APIError::ChannelUnavailable { err },
+                               }
+                       )?
                };
                match res {
                        Some((update_add, commitment_signed, chan_monitor)) => {
@@ -1243,24 +1285,30 @@ impl ChannelManager {
                let _ = self.total_consistency_lock.read().unwrap();
 
                let (chan, msg, chan_monitor) = {
-                       let mut channel_state = self.channel_state.lock().unwrap();
-                       match channel_state.by_id.remove(temporary_channel_id) {
-                               Some(mut chan) => {
-                                       match chan.get_outbound_funding_created(funding_txo) {
-                                               Ok(funding_msg) => {
-                                                       (chan, funding_msg.0, funding_msg.1)
-                                               },
-                                               Err(e) => {
-                                                       log_error!(self, "Got bad signatures: {}!", e.err);
-                                                       channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
-                                                               node_id: chan.get_their_node_id(),
-                                                               action: e.action,
-                                                       });
-                                                       return;
-                                               },
-                                       }
+                       let (res, chan) = {
+                               let mut channel_state = self.channel_state.lock().unwrap();
+                               match channel_state.by_id.remove(temporary_channel_id) {
+                                       Some(mut chan) => {
+                                               (chan.get_outbound_funding_created(funding_txo)
+                                                       .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, chan.channel_id()))
+                                               , chan)
+                                       },
+                                       None => return
+                               }
+                       };
+                       match handle_error!(self, res, chan.get_their_node_id()) {
+                               Ok(funding_msg) => {
+                                       (chan, funding_msg.0, funding_msg.1)
+                               },
+                               Err(e) => {
+                                       log_error!(self, "Got bad signatures: {}!", e.err);
+                                       let mut channel_state = self.channel_state.lock().unwrap();
+                                       channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
+                                               node_id: chan.get_their_node_id(),
+                                               action: e.action,
+                                       });
+                                       return;
                                },
-                               None => return
                        }
                };
                // Because we have exclusive ownership of the channel here we can release the channel_state
@@ -1372,9 +1420,7 @@ impl ChannelManager {
                                                let (commitment_msg, monitor) = match forward_chan.send_commitment() {
                                                        Ok(res) => res,
                                                        Err(e) => {
-                                                               if let &Some(msgs::ErrorAction::DisconnectPeer{msg: Some(ref _err_msg)}) = &e.action {
-                                                               } else if let &Some(msgs::ErrorAction::SendErrorMessage{msg: ref _err_msg}) = &e.action {
-                                                               } else {
+                                                               if let ChannelError::Ignore(_) = e {
                                                                        panic!("Stated return value requirements in send_commitment() were not met");
                                                                }
                                                                //TODO: Handle...this is bad!
@@ -1745,7 +1791,7 @@ impl ChannelManager {
                                                        (chan.remove(), funding_msg, monitor_update)
                                                },
                                                Err(e) => {
-                                                       return Err(e).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))
+                                                       return Err(e).map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.temporary_channel_id))
                                                }
                                        }
                                },
@@ -1783,7 +1829,7 @@ impl ChannelManager {
                                                //TODO: here and below MsgHandleErrInternal, #153 case
                                                return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
                                        }
-                                       let chan_monitor = chan.funding_signed(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?;
+                                       let chan_monitor = chan.funding_signed(&msg).map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?;
                                        if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
                                                unimplemented!();
                                        }
@@ -2213,7 +2259,8 @@ impl ChannelManager {
                                        //TODO: here and below MsgHandleErrInternal, #153 case
                                        return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
                                }
-                               let (revoke_and_ack, commitment_signed, closing_signed, chan_monitor) = chan.commitment_signed(&msg, &*self.fee_estimator).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?;
+                               let (revoke_and_ack, commitment_signed, closing_signed, chan_monitor) = chan.commitment_signed(&msg, &*self.fee_estimator)
+                                       .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?;
                                if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
                                        unimplemented!();
                                }
@@ -2289,7 +2336,8 @@ impl ChannelManager {
                                                //TODO: here and below MsgHandleErrInternal, #153 case
                                                return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
                                        }
-                                       let (commitment_update, pending_forwards, pending_failures, closing_signed, chan_monitor) = chan.revoke_and_ack(&msg, &*self.fee_estimator).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?;
+                                       let (commitment_update, pending_forwards, pending_failures, closing_signed, chan_monitor) = chan.revoke_and_ack(&msg, &*self.fee_estimator)
+                                                       .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?;
                                        if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
                                                unimplemented!();
                                        }
@@ -2455,7 +2503,16 @@ impl ChannelManager {
                                if !chan.is_live() {
                                        return Err(APIError::ChannelUnavailable{err: "Channel is either not yet fully established or peer is currently disconnected"});
                                }
-                               if let Some((update_fee, commitment_signed, chan_monitor)) = chan.send_update_fee_and_commit(feerate_per_kw).map_err(|e| APIError::APIMisuseError{err: e.err})? {
+                               if let Some((update_fee, commitment_signed, chan_monitor)) = chan.send_update_fee_and_commit(feerate_per_kw)
+                                               .map_err(|e| match e {
+                                                       ChannelError::Ignore(err) => APIError::APIMisuseError{err},
+                                                       ChannelError::Close(err) => {
+                                                               // TODO: We need to close the channel here, but for that to be safe we have
+                                                               // to do all channel closure inside the channel_state lock which is a
+                                                               // somewhat-larger refactor, so we leave that for later.
+                                                               APIError::APIMisuseError{err}
+                                                       },
+                                               })? {
                                        if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
                                                unimplemented!();
                                        }
@@ -2608,38 +2665,6 @@ impl ChainListener for ChannelManager {
        }
 }
 
-macro_rules! handle_error {
-       ($self: ident, $internal: expr, $their_node_id: expr) => {
-               match $internal {
-                       Ok(msg) => Ok(msg),
-                       Err(MsgHandleErrInternal { err, needs_channel_force_close }) => {
-                               if needs_channel_force_close {
-                                       match &err.action {
-                                               &Some(msgs::ErrorAction::DisconnectPeer { msg: Some(ref msg) }) => {
-                                                       if msg.channel_id == [0; 32] {
-                                                               $self.peer_disconnected(&$their_node_id, true);
-                                                       } else {
-                                                               $self.force_close_channel(&msg.channel_id);
-                                                       }
-                                               },
-                                               &Some(msgs::ErrorAction::DisconnectPeer { msg: None }) => {},
-                                               &Some(msgs::ErrorAction::IgnoreError) => {},
-                                               &Some(msgs::ErrorAction::SendErrorMessage { ref msg }) => {
-                                                       if msg.channel_id == [0; 32] {
-                                                               $self.peer_disconnected(&$their_node_id, true);
-                                                       } else {
-                                                               $self.force_close_channel(&msg.channel_id);
-                                                       }
-                                               },
-                                               &None => {},
-                                       }
-                               }
-                               Err(err)
-                       },
-               }
-       }
-}
-
 impl ChannelMessageHandler for ChannelManager {
        //TODO: Handle errors and close channel (or so)
        fn handle_open_channel(&self, their_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), HandleError> {
@@ -3211,6 +3236,7 @@ mod tests {
        use chain::chaininterface::{ChainListener, ChainWatchInterface};
        use chain::keysinterface::{KeysInterface, SpendableOutputDescriptor};
        use chain::keysinterface;
+       use ln::channel::{COMMITMENT_TX_BASE_WEIGHT, COMMITMENT_TX_WEIGHT_PER_HTLC};
        use ln::channelmanager::{ChannelManager,ChannelManagerReadArgs,OnionKeys,PaymentFailReason,RAACommitmentOrder};
        use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS, ManyChannelMonitor};
        use ln::router::{Route, RouteHop, Router};
@@ -3503,6 +3529,17 @@ mod tests {
                }
        }
 
+       macro_rules! get_feerate {
+               ($node: expr, $channel_id: expr) => {
+                       {
+                               let chan_lock = $node.node.channel_state.lock().unwrap();
+                               let chan = chan_lock.by_id.get(&$channel_id).unwrap();
+                               chan.get_feerate()
+                       }
+               }
+       }
+
+
        fn create_chan_between_nodes_with_value_init(node_a: &Node, node_b: &Node, channel_value: u64, push_msat: u64) -> Transaction {
                node_a.node.create_channel(node_b.node.get_our_node_id(), channel_value, push_msat, 42).unwrap();
                node_b.node.handle_open_channel(&node_a.node.get_our_node_id(), &get_event_msg!(node_a, MessageSendEvent::SendOpenChannel, node_b.node.get_our_node_id())).unwrap();
@@ -3672,7 +3709,7 @@ mod tests {
                }
        }
 
-       fn close_channel(outbound_node: &Node, inbound_node: &Node, channel_id: &[u8; 32], funding_tx: Transaction, close_inbound_first: bool) -> (msgs::ChannelUpdate, msgs::ChannelUpdate) {
+       fn close_channel(outbound_node: &Node, inbound_node: &Node, channel_id: &[u8; 32], funding_tx: Transaction, close_inbound_first: bool) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, Transaction) {
                let (node_a, broadcaster_a, struct_a) = if close_inbound_first { (&inbound_node.node, &inbound_node.tx_broadcaster, inbound_node) } else { (&outbound_node.node, &outbound_node.tx_broadcaster, outbound_node) };
                let (node_b, broadcaster_b) = if close_inbound_first { (&outbound_node.node, &outbound_node.tx_broadcaster) } else { (&inbound_node.node, &inbound_node.tx_broadcaster) };
                let (tx_a, tx_b);
@@ -3735,7 +3772,7 @@ mod tests {
                assert_eq!(tx_a, tx_b);
                check_spends!(tx_a, funding_tx);
 
-               (as_update, bs_update)
+               (as_update, bs_update, tx_a)
        }
 
        struct SendEvent {
@@ -4136,14 +4173,6 @@ mod tests {
                let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
                let channel_id = chan.2;
 
-               macro_rules! get_feerate {
-                       ($node: expr) => {{
-                               let chan_lock = $node.node.channel_state.lock().unwrap();
-                               let chan = chan_lock.by_id.get(&channel_id).unwrap();
-                               chan.get_feerate()
-                       }}
-               }
-
                // balancing
                send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
 
@@ -4165,7 +4194,7 @@ mod tests {
                // (6) RAA is delivered                  ->
 
                // First nodes[0] generates an update_fee
-               nodes[0].node.update_fee(channel_id, get_feerate!(nodes[0]) + 20).unwrap();
+               nodes[0].node.update_fee(channel_id, get_feerate!(nodes[0], channel_id) + 20).unwrap();
                check_added_monitors!(nodes[0], 1);
 
                let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
@@ -4254,19 +4283,11 @@ mod tests {
                let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
                let channel_id = chan.2;
 
-               macro_rules! get_feerate {
-                       ($node: expr) => {{
-                               let chan_lock = $node.node.channel_state.lock().unwrap();
-                               let chan = chan_lock.by_id.get(&channel_id).unwrap();
-                               chan.get_feerate()
-                       }}
-               }
-
                // balancing
                send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
 
                // First nodes[0] generates an update_fee
-               nodes[0].node.update_fee(channel_id, get_feerate!(nodes[0]) + 20).unwrap();
+               nodes[0].node.update_fee(channel_id, get_feerate!(nodes[0], channel_id) + 20).unwrap();
                check_added_monitors!(nodes[0], 1);
 
                let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
@@ -4312,14 +4333,6 @@ mod tests {
                let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
                let channel_id = chan.2;
 
-               macro_rules! get_feerate {
-                       ($node: expr) => {{
-                               let chan_lock = $node.node.channel_state.lock().unwrap();
-                               let chan = chan_lock.by_id.get(&channel_id).unwrap();
-                               chan.get_feerate()
-                       }}
-               }
-
                // A                                        B
                // update_fee/commitment_signed          ->
                //                                       .- send (1) RAA and (2) commitment_signed
@@ -4340,7 +4353,7 @@ mod tests {
                // revoke_and_ack                        ->
 
                // First nodes[0] generates an update_fee
-               let initial_feerate = get_feerate!(nodes[0]);
+               let initial_feerate = get_feerate!(nodes[0], channel_id);
                nodes[0].node.update_fee(channel_id, initial_feerate + 20).unwrap();
                check_added_monitors!(nodes[0], 1);
 
@@ -4424,16 +4437,8 @@ mod tests {
                let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
                let channel_id = chan.2;
 
-               macro_rules! get_feerate {
-                       ($node: expr) => {{
-                               let chan_lock = $node.node.channel_state.lock().unwrap();
-                               let chan = chan_lock.by_id.get(&channel_id).unwrap();
-                               chan.get_feerate()
-                       }}
-               }
-
-               let feerate = get_feerate!(nodes[0]);
-               nodes[0].node.update_fee(channel_id, feerate+20).unwrap();
+               let feerate = get_feerate!(nodes[0], channel_id);
+               nodes[0].node.update_fee(channel_id, feerate+25).unwrap();
                check_added_monitors!(nodes[0], 1);
 
                let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
@@ -4464,24 +4469,69 @@ mod tests {
                check_added_monitors!(nodes[1], 1);
        }
 
+       #[test]
+       fn test_update_fee_that_funder_cannot_afford() {
+               let nodes = create_network(2);
+               let channel_value = 1888;
+               let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, 700000);
+               let channel_id = chan.2;
+
+               let feerate = 260;
+               nodes[0].node.update_fee(channel_id, feerate).unwrap();
+               check_added_monitors!(nodes[0], 1);
+               let update_msg = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+
+               nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg.update_fee.unwrap()).unwrap();
+
+               commitment_signed_dance!(nodes[1], nodes[0], update_msg.commitment_signed, false);
+
+               //Confirm that the new fee based on the last local commitment txn is what we expected based on the feerate of 260 set above.
+               //This value results in a fee that is exactly what the funder can afford (277 sat + 1000 sat channel reserve)
+               {
+                       let chan_lock = nodes[1].node.channel_state.lock().unwrap();
+                       let chan = chan_lock.by_id.get(&channel_id).unwrap();
+
+                       //We made sure neither party's funds are below the dust limit so -2 non-HTLC txns from number of outputs
+                       let num_htlcs = chan.last_local_commitment_txn[0].output.len() - 2;
+                       let total_fee: u64 = feerate * (COMMITMENT_TX_BASE_WEIGHT + (num_htlcs as u64) * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000;
+                       let mut actual_fee = chan.last_local_commitment_txn[0].output.iter().fold(0, |acc, output| acc + output.value);
+                       actual_fee = channel_value - actual_fee;
+                       assert_eq!(total_fee, actual_fee);
+               } //drop the mutex
+
+               //Add 2 to the previous fee rate to the final fee increases by 1 (with no HTLCs the fee is essentially
+               //fee_rate*(724/1000) so the increment of 1*0.724 is rounded back down)
+               nodes[0].node.update_fee(channel_id, feerate+2).unwrap();
+               check_added_monitors!(nodes[0], 1);
+
+               let update2_msg = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+
+               nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update2_msg.update_fee.unwrap()).unwrap();
+
+               //While producing the commitment_signed response after handling a received update_fee request the
+               //check to see if the funder, who sent the update_fee request, can afford the new fee (funder_balance >= fee+channel_reserve)
+               //Should produce and error.
+               let err = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &update2_msg.commitment_signed).unwrap_err();
+
+               assert!(match err.err {
+                       "Funding remote cannot afford proposed new fee" => true,
+                       _ => false,
+               });
+
+               //clear the message we could not handle
+               nodes[1].node.get_and_clear_pending_msg_events();
+       }
+
        #[test]
        fn test_update_fee_with_fundee_update_add_htlc() {
                let mut nodes = create_network(2);
                let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
                let channel_id = chan.2;
 
-               macro_rules! get_feerate {
-                       ($node: expr) => {{
-                               let chan_lock = $node.node.channel_state.lock().unwrap();
-                               let chan = chan_lock.by_id.get(&channel_id).unwrap();
-                               chan.get_feerate()
-                       }}
-               }
-
                // balancing
                send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
 
-               let feerate = get_feerate!(nodes[0]);
+               let feerate = get_feerate!(nodes[0], channel_id);
                nodes[0].node.update_fee(channel_id, feerate+20).unwrap();
                check_added_monitors!(nodes[0], 1);
 
@@ -4579,14 +4629,6 @@ mod tests {
                let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
                let channel_id = chan.2;
 
-               macro_rules! get_feerate {
-                       ($node: expr) => {{
-                               let chan_lock = $node.node.channel_state.lock().unwrap();
-                               let chan = chan_lock.by_id.get(&channel_id).unwrap();
-                               chan.get_feerate()
-                       }}
-               }
-
                // A                                        B
                // (1) update_fee/commitment_signed      ->
                //                                       <- (2) revoke_and_ack
@@ -4602,7 +4644,7 @@ mod tests {
                // revoke_and_ack                        ->
 
                // Create and deliver (1)...
-               let feerate = get_feerate!(nodes[0]);
+               let feerate = get_feerate!(nodes[0], channel_id);
                nodes[0].node.update_fee(channel_id, feerate+20).unwrap();
                check_added_monitors!(nodes[0], 1);
 
@@ -4676,8 +4718,8 @@ mod tests {
                check_added_monitors!(nodes[1], 1);
                assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 
-               assert_eq!(get_feerate!(nodes[0]), feerate + 30);
-               assert_eq!(get_feerate!(nodes[1]), feerate + 30);
+               assert_eq!(get_feerate!(nodes[0], channel_id), feerate + 30);
+               assert_eq!(get_feerate!(nodes[1], channel_id), feerate + 30);
                close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
        }
 
@@ -6214,7 +6256,7 @@ mod tests {
 
        /// pending_htlc_adds includes both the holding cell and in-flight update_add_htlcs, whereas
        /// for claims/fails they are separated out.
-       fn reconnect_nodes(node_a: &Node, node_b: &Node, pre_all_htlcs: bool, pending_htlc_adds: (i64, i64), pending_htlc_claims: (usize, usize), pending_cell_htlc_claims: (usize, usize), pending_cell_htlc_fails: (usize, usize), pending_raa: (bool, bool)) {
+       fn reconnect_nodes(node_a: &Node, node_b: &Node, send_funding_locked: (bool, bool), pending_htlc_adds: (i64, i64), pending_htlc_claims: (usize, usize), pending_cell_htlc_claims: (usize, usize), pending_cell_htlc_fails: (usize, usize), pending_raa: (bool, bool)) {
                node_a.node.peer_connected(&node_b.node.get_our_node_id());
                let reestablish_1 = get_chan_reestablish_msgs!(node_a, node_b);
                node_b.node.peer_connected(&node_a.node.get_our_node_id());
@@ -6247,7 +6289,7 @@ mod tests {
                        (pending_htlc_adds.1 == 0 && pending_htlc_claims.1 == 0 && pending_cell_htlc_claims.1 == 0 && pending_cell_htlc_fails.1 == 0));
 
                for chan_msgs in resp_1.drain(..) {
-                       if pre_all_htlcs {
+                       if send_funding_locked.0 {
                                node_a.node.handle_funding_locked(&node_b.node.get_our_node_id(), &chan_msgs.0.unwrap()).unwrap();
                                let announcement_event = node_a.node.get_and_clear_pending_msg_events();
                                if !announcement_event.is_empty() {
@@ -6304,7 +6346,7 @@ mod tests {
                }
 
                for chan_msgs in resp_2.drain(..) {
-                       if pre_all_htlcs {
+                       if send_funding_locked.1 {
                                node_b.node.handle_funding_locked(&node_a.node.get_our_node_id(), &chan_msgs.0.unwrap()).unwrap();
                                let announcement_event = node_b.node.get_and_clear_pending_msg_events();
                                if !announcement_event.is_empty() {
@@ -6368,7 +6410,7 @@ mod tests {
 
                nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
                nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
-               reconnect_nodes(&nodes[0], &nodes[1], true, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+               reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 
                let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
                let payment_hash_2 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
@@ -6377,7 +6419,7 @@ mod tests {
 
                nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
                nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
-               reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+               reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 
                let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
                let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
@@ -6390,7 +6432,7 @@ mod tests {
                claim_payment_along_route(&nodes[0], &vec!(&nodes[1], &nodes[2]), true, payment_preimage_3);
                fail_payment_along_route(&nodes[0], &[&nodes[1], &nodes[2]], true, payment_hash_5);
 
-               reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (1, 0), (1, 0), (false, false));
+               reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (1, 0), (1, 0), (false, false));
                {
                        let events = nodes[0].node.get_and_clear_pending_events();
                        assert_eq!(events.len(), 2);
@@ -6471,19 +6513,19 @@ mod tests {
                if messages_delivered < 3 {
                        // Even if the funding_locked messages get exchanged, as long as nothing further was
                        // received on either side, both sides will need to resend them.
-                       reconnect_nodes(&nodes[0], &nodes[1], true, (0, 1), (0, 0), (0, 0), (0, 0), (false, false));
+                       reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 1), (0, 0), (0, 0), (0, 0), (false, false));
                } else if messages_delivered == 3 {
                        // nodes[0] still wants its RAA + commitment_signed
-                       reconnect_nodes(&nodes[0], &nodes[1], false, (-1, 0), (0, 0), (0, 0), (0, 0), (true, false));
+                       reconnect_nodes(&nodes[0], &nodes[1], (false, false), (-1, 0), (0, 0), (0, 0), (0, 0), (true, false));
                } else if messages_delivered == 4 {
                        // nodes[0] still wants its commitment_signed
-                       reconnect_nodes(&nodes[0], &nodes[1], false, (-1, 0), (0, 0), (0, 0), (0, 0), (false, false));
+                       reconnect_nodes(&nodes[0], &nodes[1], (false, false), (-1, 0), (0, 0), (0, 0), (0, 0), (false, false));
                } else if messages_delivered == 5 {
                        // nodes[1] still wants its final RAA
-                       reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, true));
+                       reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, true));
                } else if messages_delivered == 6 {
                        // Everything was delivered...
-                       reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+                       reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
                }
 
                let events_1 = nodes[1].node.get_and_clear_pending_events();
@@ -6495,7 +6537,7 @@ mod tests {
 
                nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
                nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
-               reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+               reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 
                nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now();
                nodes[1].node.process_pending_htlc_forwards();
@@ -6569,7 +6611,7 @@ mod tests {
                nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
                nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
                if messages_delivered < 2 {
-                       reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (1, 0), (0, 0), (0, 0), (false, false));
+                       reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (false, false));
                        //TODO: Deduplicate PaymentSent events, then enable this if:
                        //if messages_delivered < 1 {
                                let events_4 = nodes[0].node.get_and_clear_pending_events();
@@ -6583,21 +6625,21 @@ mod tests {
                        //}
                } else if messages_delivered == 2 {
                        // nodes[0] still wants its RAA + commitment_signed
-                       reconnect_nodes(&nodes[0], &nodes[1], false, (0, -1), (0, 0), (0, 0), (0, 0), (false, true));
+                       reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, -1), (0, 0), (0, 0), (0, 0), (false, true));
                } else if messages_delivered == 3 {
                        // nodes[0] still wants its commitment_signed
-                       reconnect_nodes(&nodes[0], &nodes[1], false, (0, -1), (0, 0), (0, 0), (0, 0), (false, false));
+                       reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, -1), (0, 0), (0, 0), (0, 0), (false, false));
                } else if messages_delivered == 4 {
                        // nodes[1] still wants its final RAA
-                       reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (true, false));
+                       reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (true, false));
                } else if messages_delivered == 5 {
                        // Everything was delivered...
-                       reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+                       reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
                }
 
                nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
                nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
-               reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+               reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 
                // Channel should still work fine...
                let payment_preimage_2 = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000).0;
@@ -6638,20 +6680,28 @@ mod tests {
                        _ => panic!("Unexpected event"),
                }
 
+               reconnect_nodes(&nodes[0], &nodes[1], (false, true), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+
+               nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+               nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+
                confirm_transaction(&nodes[1].chain_monitor, &tx, tx.version);
                let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
-               assert_eq!(events_2.len(), 1);
+               assert_eq!(events_2.len(), 2);
                match events_2[0] {
                        MessageSendEvent::SendFundingLocked { ref node_id, msg: _ } => {
                                assert_eq!(*node_id, nodes[0].node.get_our_node_id());
                        },
                        _ => panic!("Unexpected event"),
                }
+               match events_2[1] {
+                       MessageSendEvent::SendAnnouncementSignatures { ref node_id, msg: _ } => {
+                               assert_eq!(*node_id, nodes[0].node.get_our_node_id());
+                       },
+                       _ => panic!("Unexpected event"),
+               }
 
-               reconnect_nodes(&nodes[0], &nodes[1], true, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
-               nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-               nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
-               reconnect_nodes(&nodes[0], &nodes[1], true, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+               reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 
                // TODO: We shouldn't need to manually pass list_usable_chanels here once we support
                // rebroadcasting announcement_signatures upon reconnect.
@@ -6854,7 +6904,7 @@ mod tests {
                if disconnect {
                        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
                        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
-                       reconnect_nodes(&nodes[0], &nodes[1], true, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+                       reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
                }
 
                *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
@@ -6895,7 +6945,7 @@ mod tests {
                if disconnect {
                        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
                        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
-                       reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+                       reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
                }
 
                // ...and make sure we can force-close a TemporaryFailure channel with a PermanentFailure
@@ -7455,7 +7505,7 @@ mod tests {
                nodes[0].node = Arc::new(nodes_0_deserialized);
                check_added_monitors!(nodes[0], 1);
 
-               reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+               reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 
                fail_payment(&nodes[0], &[&nodes[1]], our_payment_hash);
                claim_payment(&nodes[0], &[&nodes[1]], our_payment_preimage);
@@ -7525,8 +7575,8 @@ mod tests {
                nodes[0].node = Arc::new(nodes_0_deserialized);
 
                // nodes[1] and nodes[2] have no lost state with nodes[0]...
-               reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
-               reconnect_nodes(&nodes[0], &nodes[2], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+               reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+               reconnect_nodes(&nodes[0], &nodes[2], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
                //... and we can even still claim the payment!
                claim_payment(&nodes[2], &[&nodes[0], &nodes[1]], our_payment_preimage);
 
@@ -7813,4 +7863,191 @@ mod tests {
                let spend_tx = check_static_output!(events, nodes, 0, 0, 1, 1);
                check_spends!(spend_tx, node_txn[0].clone());
        }
+
+       #[test]
+       fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() {
+               let nodes = create_network(2);
+
+               // Create some initial channels
+               let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
+
+               let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
+               let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
+               assert_eq!(revoked_local_txn[0].input.len(), 1);
+               assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
+
+               claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
+
+               let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+               // A will generate HTLC-Timeout from revoked commitment tx
+               nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
+               let events = nodes[0].node.get_and_clear_pending_msg_events();
+               match events[0] {
+                       MessageSendEvent::BroadcastChannelUpdate { .. } => {},
+                       _ => panic!("Unexpected event"),
+               }
+               let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+               assert_eq!(revoked_htlc_txn.len(), 2);
+               assert_eq!(revoked_htlc_txn[0].input.len(), 1);
+               assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), 133);
+               check_spends!(revoked_htlc_txn[0], revoked_local_txn[0].clone());
+
+               // B will generate justice tx from A's revoked commitment/HTLC tx
+               nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] }, 1);
+               let events = nodes[1].node.get_and_clear_pending_msg_events();
+               match events[0] {
+                       MessageSendEvent::BroadcastChannelUpdate { .. } => {},
+                       _ => panic!("Unexpected event"),
+               }
+
+               let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+               assert_eq!(node_txn.len(), 4);
+               assert_eq!(node_txn[3].input.len(), 1);
+               check_spends!(node_txn[3], revoked_htlc_txn[0].clone());
+
+               let events = nodes[1].chan_monitor.simple_monitor.get_and_clear_pending_events();
+               // Check B's ChannelMonitor was able to generate the right spendable output descriptor
+               let spend_tx = check_static_output!(events, nodes, 1, 1, 1, 1);
+               check_spends!(spend_tx, node_txn[3].clone());
+       }
+
+       #[test]
+       fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() {
+               let nodes = create_network(2);
+
+               // Create some initial channels
+               let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
+
+               let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
+               let revoked_local_txn = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
+               assert_eq!(revoked_local_txn[0].input.len(), 1);
+               assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
+
+               claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
+
+               let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+               // B will generate HTLC-Success from revoked commitment tx
+               nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
+               let events = nodes[1].node.get_and_clear_pending_msg_events();
+               match events[0] {
+                       MessageSendEvent::BroadcastChannelUpdate { .. } => {},
+                       _ => panic!("Unexpected event"),
+               }
+               let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+
+               assert_eq!(revoked_htlc_txn.len(), 2);
+               assert_eq!(revoked_htlc_txn[0].input.len(), 1);
+               assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), 138);
+               check_spends!(revoked_htlc_txn[0], revoked_local_txn[0].clone());
+
+               // A will generate justice tx from B's revoked commitment/HTLC tx
+               nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] }, 1);
+               let events = nodes[0].node.get_and_clear_pending_msg_events();
+               match events[0] {
+                       MessageSendEvent::BroadcastChannelUpdate { .. } => {},
+                       _ => panic!("Unexpected event"),
+               }
+
+               let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+               assert_eq!(node_txn.len(), 4);
+               assert_eq!(node_txn[3].input.len(), 1);
+               check_spends!(node_txn[3], revoked_htlc_txn[0].clone());
+
+               let events = nodes[0].chan_monitor.simple_monitor.get_and_clear_pending_events();
+               // Check A's ChannelMonitor was able to generate the right spendable output descriptor
+               let spend_tx = check_static_output!(events, nodes, 1, 2, 1, 0);
+               check_spends!(spend_tx, node_txn[3].clone());
+       }
+
+       #[test]
+       fn test_dynamic_spendable_outputs_local_htlc_success_tx() {
+               let nodes = create_network(2);
+
+               // Create some initial channels
+               let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
+
+               let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000).0;
+               let local_txn = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
+               assert_eq!(local_txn[0].input.len(), 1);
+               check_spends!(local_txn[0], chan_1.3.clone());
+
+               // Give B knowledge of preimage to be able to generate a local HTLC-Success Tx
+               nodes[1].node.claim_funds(payment_preimage);
+               check_added_monitors!(nodes[1], 1);
+               let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+               nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![local_txn[0].clone()] }, 1);
+               let events = nodes[1].node.get_and_clear_pending_msg_events();
+               match events[0] {
+                       MessageSendEvent::UpdateHTLCs { .. } => {},
+                       _ => panic!("Unexpected event"),
+               }
+               match events[1] {
+                       MessageSendEvent::BroadcastChannelUpdate { .. } => {},
+                       _ => panic!("Unexepected event"),
+               }
+               let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+               assert_eq!(node_txn[0].input.len(), 1);
+               assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), 138);
+               check_spends!(node_txn[0], local_txn[0].clone());
+
+               // Verify that B is able to spend its own HTLC-Success tx thanks to spendable output event given back by its ChannelMonitor
+               let spend_txn = check_dynamic_output_p2wsh!(nodes[1]);
+               assert_eq!(spend_txn.len(), 1);
+               check_spends!(spend_txn[0], node_txn[0].clone());
+       }
+
+       #[test]
+       fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() {
+               let nodes = create_network(2);
+
+               // Create some initial channels
+               let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
+
+               route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000).0;
+               let local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
+               assert_eq!(local_txn[0].input.len(), 1);
+               check_spends!(local_txn[0], chan_1.3.clone());
+
+               // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
+               let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+               nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![local_txn[0].clone()] }, 200);
+               let events = nodes[0].node.get_and_clear_pending_msg_events();
+               match events[0] {
+                       MessageSendEvent::BroadcastChannelUpdate { .. } => {},
+                       _ => panic!("Unexepected event"),
+               }
+               let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+               assert_eq!(node_txn[0].input.len(), 1);
+               assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), 133);
+               check_spends!(node_txn[0], local_txn[0].clone());
+
+               // Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor
+               let spend_txn = check_dynamic_output_p2wsh!(nodes[0]);
+               assert_eq!(spend_txn.len(), 4);
+               assert_eq!(spend_txn[0], spend_txn[2]);
+               assert_eq!(spend_txn[1], spend_txn[3]);
+               check_spends!(spend_txn[0], local_txn[0].clone());
+               check_spends!(spend_txn[1], node_txn[0].clone());
+       }
+
+       #[test]
+       fn test_static_output_closing_tx() {
+               let nodes = create_network(2);
+
+               let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
+
+               send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
+               let closing_tx = close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true).2;
+
+               let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+               nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![closing_tx.clone()] }, 1);
+               let events = nodes[0].chan_monitor.simple_monitor.get_and_clear_pending_events();
+               let spend_tx = check_static_output!(events, nodes, 0, 0, 2, 0);
+               check_spends!(spend_tx, closing_tx.clone());
+
+               nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![closing_tx.clone()] }, 1);
+               let events = nodes[1].chan_monitor.simple_monitor.get_and_clear_pending_events();
+               let spend_tx = check_static_output!(events, nodes, 0, 0, 2, 1);
+               check_spends!(spend_tx, closing_tx);
+       }
 }