Merge pull request #374 from dongcarl/2019-08-channel-open-sanity
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Fri, 9 Aug 2019 16:53:33 +0000 (16:53 +0000)
committerGitHub <noreply@github.com>
Fri, 9 Aug 2019 16:53:33 +0000 (16:53 +0000)
tests: Add sanity tests for ChannelOpens

1  2 
src/ln/channel.rs
src/ln/functional_tests.rs

diff --combined src/ln/channel.rs
index f523471210abaf4650700d42814f75d8c75112f4,ac569e02889421b749ae50c8bb665a81b3424906..04a65b361e3cf8cfdbbf3fab118ecc8985ed6b53
@@@ -16,7 -16,7 +16,7 @@@ use secp256k1::{Secp256k1,Signature}
  use secp256k1;
  
  use ln::msgs;
 -use ln::msgs::{DecodeError, OptionalField, LocalFeatures};
 +use ln::msgs::{DecodeError, OptionalField, LocalFeatures, DataLossProtect};
  use ln::channelmonitor::ChannelMonitor;
  use ln::channelmanager::{PendingHTLCStatus, HTLCSource, HTLCFailReason, HTLCFailureMsg, PendingForwardHTLCInfo, RAACommitmentOrder, PaymentPreimage, PaymentHash, BREAKDOWN_TIMEOUT, MAX_LOCAL_BREAKDOWN_TIMEOUT};
  use ln::chan_utils::{TxCreationKeys,HTLCOutputInCommitment,HTLC_SUCCESS_TX_WEIGHT,HTLC_TIMEOUT_TX_WEIGHT};
@@@ -32,7 -32,7 +32,7 @@@ use util::config::{UserConfig,ChannelCo
  
  use std;
  use std::default::Default;
 -use std::{cmp,mem};
 +use std::{cmp,mem,fmt};
  use std::sync::{Arc};
  
  #[cfg(test)]
@@@ -366,23 -366,10 +366,23 @@@ pub const OFFERED_HTLC_SCRIPT_WEIGHT: u
  /// Used to return a simple Error back to ChannelManager. Will get converted to a
  /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
  /// channel_id in ChannelManager.
 -#[derive(Debug)]
  pub(super) enum ChannelError {
        Ignore(&'static str),
        Close(&'static str),
 +      CloseDelayBroadcast {
 +              msg: &'static str,
 +              update: Option<ChannelMonitor>
 +      },
 +}
 +
 +impl fmt::Debug for ChannelError {
 +      fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
 +              match self {
 +                      &ChannelError::Ignore(e) => write!(f, "Ignore : {}", e),
 +                      &ChannelError::Close(e) => write!(f, "Close : {}", e),
 +                      &ChannelError::CloseDelayBroadcast { msg, .. } => write!(f, "CloseDelayBroadcast : {}", msg)
 +              }
 +      }
  }
  
  macro_rules! secp_check {
@@@ -403,7 -390,7 +403,7 @@@ impl Channel 
        /// Returns a minimum channel reserve value **they** need to maintain
        ///
        /// Guaranteed to return a value no larger than channel_value_satoshis
-       fn get_our_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
+       pub(crate) fn get_our_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
                let (q, _) = channel_value_satoshis.overflowing_div(100);
                cmp::min(channel_value_satoshis, cmp::max(q, 1000)) //TODO
        }
                if inbound_htlc_count + 1 > OUR_MAX_HTLCS as u32 {
                        return Err(ChannelError::Close("Remote tried to push more than our max accepted HTLCs"));
                }
 -              //TODO: Spec is unclear if this is per-direction or in total (I assume per direction):
                // Check our_max_htlc_value_in_flight_msat
                if htlc_inbound_value_msat + msg.amount_msat > Channel::get_our_max_htlc_value_in_flight_msat(self.channel_value_satoshis) {
 -                      return Err(ChannelError::Close("Remote HTLC add would put them over their max HTLC value in flight"));
 +                      return Err(ChannelError::Close("Remote HTLC add would put them over our max HTLC value"));
                }
                // Check our_channel_reserve_satoshis (we're getting paid, so they have to at least meet
                // the reserve_satoshis we told them to always have as direct payment so that they lose
                        return Err(ChannelError::Close("Peer sent a garbage channel_reestablish"));
                }
  
 +              if msg.next_remote_commitment_number > 0 {
 +                      match msg.data_loss_protect {
 +                              OptionalField::Present(ref data_loss) => {
 +                                      if chan_utils::build_commitment_secret(self.local_keys.commitment_seed, INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1) != data_loss.your_last_per_commitment_secret {
 +                                              return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided"));
 +                                      }
 +                                      if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.cur_local_commitment_transaction_number {
 +                                              self.channel_monitor.provide_rescue_remote_commitment_tx_info(data_loss.my_current_per_commitment_point);
 +                                              return Err(ChannelError::CloseDelayBroadcast { msg: "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can't do any automated broadcasting", update: Some(self.channel_monitor.clone())
 +                                      });
 +                                      }
 +                              },
 +                              OptionalField::Absent => {}
 +                      }
 +              }
 +
                // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
                // remaining cases either succeed or ErrorMessage-fail).
                self.channel_state &= !(ChannelState::PeerDisconnected as u32);
                                // now!
                                match self.free_holding_cell_htlcs() {
                                        Err(ChannelError::Close(msg)) => return Err(ChannelError::Close(msg)),
 -                                      Err(ChannelError::Ignore(_)) => panic!("Got non-channel-failing result from free_holding_cell_htlcs"),
 +                                      Err(ChannelError::Ignore(_)) | Err(ChannelError::CloseDelayBroadcast { .. }) => panic!("Got non-channel-failing result from free_holding_cell_htlcs"),
                                        Ok(Some((commitment_update, channel_monitor))) => return Ok((resend_funding_locked, required_revoke, Some(commitment_update), Some(channel_monitor), self.resend_order.clone(), shutdown_msg)),
                                        Ok(None) => return Ok((resend_funding_locked, required_revoke, None, None, self.resend_order.clone(), shutdown_msg)),
                                }
        pub fn get_channel_reestablish(&self) -> msgs::ChannelReestablish {
                assert_eq!(self.channel_state & ChannelState::PeerDisconnected as u32, ChannelState::PeerDisconnected as u32);
                assert_ne!(self.cur_remote_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
 +              let data_loss_protect = if self.cur_remote_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
 +                      let remote_last_secret = self.channel_monitor.get_secret(self.cur_remote_commitment_transaction_number + 2).unwrap();
 +                      log_trace!(self, "Enough info to generate a Data Loss Protect with per_commitment_secret {}", log_bytes!(remote_last_secret));
 +                      OptionalField::Present(DataLossProtect {
 +                              your_last_per_commitment_secret: remote_last_secret,
 +                              my_current_per_commitment_point: PublicKey::from_secret_key(&self.secp_ctx, &self.build_local_commitment_secret(self.cur_local_commitment_transaction_number + 1))
 +                      })
 +              } else {
 +                      log_debug!(self, "We don't seen yet any revoked secret, if this channnel has already been updated it means we are fallen-behind, you should wait for other peer closing");
 +                      OptionalField::Present(DataLossProtect {
 +                              your_last_per_commitment_secret: [0;32],
 +                              my_current_per_commitment_point: PublicKey::from_secret_key(&self.secp_ctx, &self.build_local_commitment_secret(self.cur_local_commitment_transaction_number))
 +                      })
 +              };
                msgs::ChannelReestablish {
                        channel_id: self.channel_id(),
                        // The protocol has two different commitment number concepts - the "commitment
                        // dropped this channel on disconnect as it hasn't yet reached FundingSent so we can't
                        // overflow here.
                        next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.cur_remote_commitment_transaction_number - 1,
 -                      data_loss_protect: OptionalField::Absent,
 +                      data_loss_protect,
                }
        }
  
                if outbound_htlc_count + 1 > self.their_max_accepted_htlcs as u32 {
                        return Err(ChannelError::Ignore("Cannot push more than their max accepted HTLCs"));
                }
 -              //TODO: Spec is unclear if this is per-direction or in total (I assume per direction):
                // Check their_max_htlc_value_in_flight_msat
                if htlc_outbound_value_msat + amount_msat > self.their_max_htlc_value_in_flight_msat {
 -                      return Err(ChannelError::Ignore("Cannot send value that would put us over the max HTLC value in flight"));
 +                      return Err(ChannelError::Ignore("Cannot send value that would put us over the max HTLC value in flight our peer will accept"));
                }
  
                // Check self.their_channel_reserve_satoshis (the amount we must keep as
                // reserve for them to have something to claim if we misbehave)
                if self.value_to_self_msat < self.their_channel_reserve_satoshis * 1000 + amount_msat + htlc_outbound_value_msat {
 -                      return Err(ChannelError::Ignore("Cannot send value that would put us over the reserve value"));
 +                      return Err(ChannelError::Ignore("Cannot send value that would put us over their reserve value"));
                }
  
                //TODO: Check cltv_expiry? Do this in channel manager?
index 5446c80aad821d8960703f58ca4fd902cd4ff345,8dae7b45bf8911e7d77291d38c28b2e599b96e69..819fc23ba138b61d99cf36e95191b5efed54b8a6
@@@ -3,9 -3,8 +3,9 @@@
  //! claim outputs on-chain.
  
  use chain::transaction::OutPoint;
 -use chain::chaininterface::{ChainListener, ChainWatchInterface};
 +use chain::chaininterface::{ChainListener, ChainWatchInterface, ChainWatchInterfaceUtil};
  use chain::keysinterface::{KeysInterface, SpendableOutputDescriptor, KeysManager};
 +use chain::keysinterface;
  use ln::channel::{COMMITMENT_TX_BASE_WEIGHT, COMMITMENT_TX_WEIGHT_PER_HTLC};
  use ln::channelmanager::{ChannelManager,ChannelManagerReadArgs,HTLCForwardInfo,RAACommitmentOrder, PaymentPreimage, PaymentHash, BREAKDOWN_TIMEOUT};
  use ln::channelmonitor::{ChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ManyChannelMonitor, ANTI_REORG_DELAY};
@@@ -19,7 -18,6 +19,7 @@@ use util::events::{Event, EventsProvide
  use util::errors::APIError;
  use util::ser::{Writeable, ReadableArgs};
  use util::config::UserConfig;
 +use util::logger::Logger;
  
  use bitcoin::util::hash::BitcoinHash;
  use bitcoin_hashes::sha256d::Hash as Sha256dHash;
@@@ -41,7 -39,7 +41,7 @@@ use secp256k1::key::{PublicKey,SecretKe
  
  use std::collections::{BTreeSet, HashMap, HashSet};
  use std::default::Default;
 -use std::sync::Arc;
 +use std::sync::{Arc, Mutex};
  use std::sync::atomic::Ordering;
  use std::mem;
  
@@@ -49,6 -47,58 +49,58 @@@ use rand::{thread_rng, Rng}
  
  use ln::functional_test_utils::*;
  
+ #[test]
+ fn test_insane_channel_opens() {
+       // Stand up a network of 2 nodes
+       let nodes = create_network(2, &[None, None]);
+       // Instantiate channel parameters where we push the maximum msats given our
+       // funding satoshis
+       let channel_value_sat = 31337; // same as funding satoshis
+       let channel_reserve_satoshis = Channel::get_our_channel_reserve_satoshis(channel_value_sat);
+       let push_msat = (channel_value_sat - channel_reserve_satoshis) * 1000;
+       // Have node0 initiate a channel to node1 with aforementioned parameters
+       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_sat, push_msat, 42).unwrap();
+       // Extract the channel open message from node0 to node1
+       let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+       // Test helper that asserts we get the correct error string given a mutator
+       // that supposedly makes the channel open message insane
+       let insane_open_helper = |expected_error_str, message_mutator: fn(msgs::OpenChannel) -> msgs::OpenChannel| {
+               match nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), LocalFeatures::new(), &message_mutator(open_channel_message.clone())) {
+                       Err(msgs::HandleError{ err: error_str, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) => {
+                               assert_eq!(error_str, expected_error_str, "unexpected HandleError string (expected `{}`, actual `{}`)", expected_error_str, error_str)
+                       },
+                       Err(msgs::HandleError{..}) => {panic!("unexpected HandleError action")},
+                       _ => panic!("insane OpenChannel message was somehow Ok"),
+               }
+       };
+       use ln::channel::MAX_FUNDING_SATOSHIS;
+       use ln::channelmanager::MAX_LOCAL_BREAKDOWN_TIMEOUT;
+       // Test all mutations that would make the channel open message insane
+       insane_open_helper("funding value > 2^24", |mut msg| { msg.funding_satoshis = MAX_FUNDING_SATOSHIS; msg });
+       insane_open_helper("Bogus channel_reserve_satoshis", |mut msg| { msg.channel_reserve_satoshis = msg.funding_satoshis + 1; msg });
+       insane_open_helper("push_msat larger than funding value", |mut msg| { msg.push_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000 + 1; msg });
+       insane_open_helper("Peer never wants payout outputs?", |mut msg| { msg.dust_limit_satoshis = msg.funding_satoshis + 1 ; msg });
+       insane_open_helper("Bogus; channel reserve is less than dust limit", |mut msg| { msg.dust_limit_satoshis = msg.channel_reserve_satoshis + 1; msg });
+       insane_open_helper("Minimum htlc value is full channel value", |mut msg| { msg.htlc_minimum_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000; msg });
+       insane_open_helper("They wanted our payments to be delayed by a needlessly long period", |mut msg| { msg.to_self_delay = MAX_LOCAL_BREAKDOWN_TIMEOUT + 1; msg });
+       insane_open_helper("0 max_accpted_htlcs makes for a useless channel", |mut msg| { msg.max_accepted_htlcs = 0; msg });
+       insane_open_helper("max_accpted_htlcs > 483", |mut msg| { msg.max_accepted_htlcs = 484; msg });
+ }
  #[test]
  fn test_async_inbound_update_fee() {
        let mut nodes = create_network(2, &[None, None]);
@@@ -1236,7 -1286,7 +1288,7 @@@ fn do_channel_reserve_test(test_recv: b
                assert!(route.hops.iter().rev().skip(1).all(|h| h.fee_msat == feemsat));
                let err = nodes[0].node.send_payment(route, our_payment_hash).err().unwrap();
                match err {
 -                      APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over the max HTLC value in flight"),
 +                      APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over the max HTLC value in flight our peer will accept"),
                        _ => panic!("Unknown error variants"),
                }
        }
                let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value + 1);
                let err = nodes[0].node.send_payment(route.clone(), our_payment_hash).err().unwrap();
                match err {
 -                      APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over the reserve value"),
 +                      APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over their reserve value"),
                        _ => panic!("Unknown error variants"),
                }
        }
        {
                let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_2 + 1);
                match nodes[0].node.send_payment(route, our_payment_hash).err().unwrap() {
 -                      APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over the reserve value"),
 +                      APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over their reserve value"),
                        _ => panic!("Unknown error variants"),
                }
        }
        {
                let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_22+1);
                match nodes[0].node.send_payment(route, our_payment_hash).err().unwrap() {
 -                      APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over the reserve value"),
 +                      APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over their reserve value"),
                        _ => panic!("Unknown error variants"),
                }
        }
@@@ -5019,7 -5069,7 +5071,7 @@@ fn test_update_add_htlc_bolt2_sender_ex
        let err = nodes[0].node.send_payment(route, our_payment_hash);
  
        if let Err(APIError::ChannelUnavailable{err}) = err {
 -              assert_eq!(err, "Cannot send value that would put us over the max HTLC value in flight");
 +              assert_eq!(err, "Cannot send value that would put us over the max HTLC value in flight our peer will accept");
        } else {
                assert!(false);
        }
@@@ -5143,7 -5193,7 +5195,7 @@@ fn test_update_add_htlc_bolt2_receiver_
        let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
  
        if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err {
 -              assert_eq!(err,"Remote HTLC add would put them over their max HTLC value in flight");
 +              assert_eq!(err,"Remote HTLC add would put them over our max HTLC value");
        } else {
                assert!(false);
        }
@@@ -5947,113 -5997,3 +5999,113 @@@ fn test_user_configurable_csv_delay() 
                }
        } else { assert!(false); }
  }
 +
 +#[test]
 +fn test_data_loss_protect() {
 +      // We want to be sure that :
 +      // * we don't broadcast our Local Commitment Tx in case of fallen behind
 +      // * we close channel in case of detecting other being fallen behind
 +      // * we are able to claim our own outputs thanks to remote my_current_per_commitment_point
 +      let mut nodes = create_network(2, &[None, None]);
 +
 +      let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, LocalFeatures::new(), LocalFeatures::new());
 +
 +      // Cache node A state before any channel update
 +      let previous_node_state = nodes[0].node.encode();
 +      let mut previous_chan_monitor_state = test_utils::TestVecWriter(Vec::new());
 +      nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut previous_chan_monitor_state).unwrap();
 +
 +      send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
 +      send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
 +
 +      nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
 +      nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 +
 +      // Restore node A from previous state
 +      let logger: Arc<Logger> = Arc::new(test_utils::TestLogger::with_id(format!("node {}", 0)));
 +      let chan_monitor = <(Sha256dHash, ChannelMonitor)>::read(&mut ::std::io::Cursor::new(previous_chan_monitor_state.0), Arc::clone(&logger)).unwrap().1;
 +      let chain_monitor = Arc::new(ChainWatchInterfaceUtil::new(Network::Testnet, Arc::clone(&logger)));
 +      let tx_broadcaster = Arc::new(test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new())});
 +      let feeest = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 });
 +      let monitor = Arc::new(test_utils::TestChannelMonitor::new(chain_monitor.clone(), tx_broadcaster.clone(), logger.clone(), feeest.clone()));
 +      let mut channel_monitors = HashMap::new();
 +      channel_monitors.insert(OutPoint { txid: chan.3.txid(), index: 0 }, &chan_monitor);
 +      let node_state_0 = <(Sha256dHash, ChannelManager)>::read(&mut ::std::io::Cursor::new(previous_node_state), ChannelManagerReadArgs {
 +              keys_manager: Arc::new(keysinterface::KeysManager::new(&nodes[0].node_seed, Network::Testnet, Arc::clone(&logger), 42, 21)),
 +              fee_estimator: feeest.clone(),
 +              monitor: monitor.clone(),
 +              chain_monitor: chain_monitor.clone(),
 +              logger: Arc::clone(&logger),
 +              tx_broadcaster,
 +              default_config: UserConfig::new(),
 +              channel_monitors: &channel_monitors
 +      }).unwrap().1;
 +      nodes[0].node = Arc::new(node_state_0);
 +      monitor.add_update_monitor(OutPoint { txid: chan.3.txid(), index: 0 }, chan_monitor.clone()).is_ok();
 +      nodes[0].chan_monitor = monitor;
 +      nodes[0].chain_monitor = chain_monitor;
 +      check_added_monitors!(nodes[0], 1);
 +
 +      nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
 +      nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
 +
 +      let reestablish_0 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
 +
 +      // Check we update monitor following learning of per_commitment_point from B
 +      if let Err(err) = nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_0[0])  {
 +              if let Some(error) = err.action {
 +                      match error {
 +                              ErrorAction::SendErrorMessage { msg } => {
 +                                      assert_eq!(msg.data, "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can't do any automated broadcasting");
 +                              },
 +                              _ => panic!("Unexpected event!"),
 +                      }
 +              } else { assert!(false); }
 +      } else { assert!(false); }
 +      check_added_monitors!(nodes[0], 1);
 +
 +      {
 +              let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
 +              assert_eq!(node_txn.len(), 0);
 +      }
 +
 +      let mut reestablish_1 = Vec::with_capacity(1);
 +      for msg in nodes[0].node.get_and_clear_pending_msg_events() {
 +              if let MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } = msg {
 +                      assert_eq!(*node_id, nodes[1].node.get_our_node_id());
 +                      reestablish_1.push(msg.clone());
 +              } else if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg {
 +              } else {
 +                      panic!("Unexpected event")
 +              }
 +      }
 +
 +      // Check we close channel detecting A is fallen-behind
 +      if let Err(err) = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]) {
 +              if let Some(error) = err.action {
 +                      match error {
 +                              ErrorAction::SendErrorMessage { msg } => {
 +                                      assert_eq!(msg.data, "Peer attempted to reestablish channel with a very old local commitment transaction"); },
 +                              _ => panic!("Unexpected event!"),
 +                      }
 +              } else { assert!(false); }
 +      } else { assert!(false); }
 +
 +      let events = nodes[1].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events.len(), 1);
 +      match events[0] {
 +              MessageSendEvent::BroadcastChannelUpdate { .. } => {},
 +              _ => panic!("Unexpected event"),
 +      }
 +
 +      // Check A is able to claim to_remote output
 +      let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
 +      assert_eq!(node_txn.len(), 1);
 +      check_spends!(node_txn[0], chan.3.clone());
 +      assert_eq!(node_txn[0].output.len(), 2);
 +      let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
 +      nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn[0].clone()]}, 1);
 +      let spend_txn = check_spendable_outputs!(nodes[0], 1);
 +      assert_eq!(spend_txn.len(), 1);
 +      check_spends!(spend_txn[0], node_txn[0].clone());
 +}