]> git.bitcoin.ninja Git - rust-lightning/commitdiff
Merge branch 'master' of github.com:rust-bitcoin/rust-lightning into use-workspaces
authorRJ Rybarczyk <rj@rybar.tech>
Mon, 18 Nov 2019 23:51:21 +0000 (23:51 +0000)
committerRJ Rybarczyk <rj@rybar.tech>
Mon, 18 Nov 2019 23:51:21 +0000 (23:51 +0000)
1  2 
lightning/fuzz/fuzz_targets/chanmon_fail_consistency.rs
lightning/fuzz/fuzz_targets/full_stack_target.rs
lightning/src/ln/chanmon_update_fail_tests.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/functional_tests.rs

index 91b639eacc0674ab499c305b6a461e5e6e98b64f,0000000000000000000000000000000000000000..74b1e5d14bf8333d47be5d18d63b744c55e2d480
mode 100644,000000..100644
--- /dev/null
@@@ -1,783 -1,0 +1,783 @@@
-                                                                       assert!(nodes[$node].claim_funds(PaymentPreimage(payment_hash.0)));
 +//! Test that monitor update failures don't get our channel state out of sync.
 +//! One of the biggest concern with the monitor update failure handling code is that messages
 +//! resent after monitor updating is restored are delivered out-of-order, resulting in
 +//! commitment_signed messages having "invalid signatures".
 +//! To test this we stand up a network of three nodes and read bytes from the fuzz input to denote
 +//! actions such as sending payments, handling events, or changing monitor update return values on
 +//! a per-node basis. This should allow it to find any cases where the ordering of actions results
 +//! in us getting out of sync with ourselves, and, assuming at least one of our recieve- or
 +//! send-side handling is correct, other peers. We consider it a failure if any action results in a
 +//! channel being force-closed.
 +
 +//Uncomment this for libfuzzer builds:
 +//#![no_main]
 +
 +extern crate bitcoin;
 +extern crate bitcoin_hashes;
 +extern crate lightning;
 +extern crate secp256k1;
 +
 +use bitcoin::BitcoinHash;
 +use bitcoin::blockdata::block::BlockHeader;
 +use bitcoin::blockdata::transaction::{Transaction, TxOut};
 +use bitcoin::blockdata::script::{Builder, Script};
 +use bitcoin::blockdata::opcodes;
 +use bitcoin::network::constants::Network;
 +
 +use bitcoin_hashes::Hash as TraitImport;
 +use bitcoin_hashes::hash160::Hash as Hash160;
 +use bitcoin_hashes::sha256::Hash as Sha256;
 +use bitcoin_hashes::sha256d::Hash as Sha256d;
 +
 +use lightning::chain::chaininterface;
 +use lightning::chain::transaction::OutPoint;
 +use lightning::chain::chaininterface::{BroadcasterInterface,ConfirmationTarget,ChainListener,FeeEstimator,ChainWatchInterfaceUtil};
 +use lightning::chain::keysinterface::{ChannelKeys, KeysInterface};
 +use lightning::ln::channelmonitor;
 +use lightning::ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, HTLCUpdate};
 +use lightning::ln::channelmanager::{ChannelManager, PaymentHash, PaymentPreimage, ChannelManagerReadArgs};
 +use lightning::ln::router::{Route, RouteHop};
 +use lightning::ln::msgs::{CommitmentUpdate, ChannelMessageHandler, ErrorAction, LightningError, UpdateAddHTLC, LocalFeatures};
 +use lightning::util::events;
 +use lightning::util::logger::Logger;
 +use lightning::util::config::UserConfig;
 +use lightning::util::events::{EventsProvider, MessageSendEventsProvider};
 +use lightning::util::ser::{Readable, ReadableArgs, Writeable, Writer};
 +
 +mod utils;
 +use utils::test_logger;
 +
 +use secp256k1::key::{PublicKey,SecretKey};
 +use secp256k1::Secp256k1;
 +
 +use std::mem;
 +use std::cmp::Ordering;
 +use std::collections::{HashSet, hash_map, HashMap};
 +use std::sync::{Arc,Mutex};
 +use std::sync::atomic;
 +use std::io::Cursor;
 +
 +struct FuzzEstimator {}
 +impl FeeEstimator for FuzzEstimator {
 +      fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u64 {
 +              253
 +      }
 +}
 +
 +pub struct TestBroadcaster {}
 +impl BroadcasterInterface for TestBroadcaster {
 +      fn broadcast_transaction(&self, _tx: &Transaction) { }
 +}
 +
 +pub struct VecWriter(pub Vec<u8>);
 +impl Writer for VecWriter {
 +      fn write_all(&mut self, buf: &[u8]) -> Result<(), ::std::io::Error> {
 +              self.0.extend_from_slice(buf);
 +              Ok(())
 +      }
 +      fn size_hint(&mut self, size: usize) {
 +              self.0.reserve_exact(size);
 +      }
 +}
 +
 +static mut IN_RESTORE: bool = false;
 +pub struct TestChannelMonitor {
 +      pub simple_monitor: Arc<channelmonitor::SimpleManyChannelMonitor<OutPoint>>,
 +      pub update_ret: Mutex<Result<(), channelmonitor::ChannelMonitorUpdateErr>>,
 +      pub latest_good_update: Mutex<HashMap<OutPoint, Vec<u8>>>,
 +      pub latest_update_good: Mutex<HashMap<OutPoint, bool>>,
 +      pub latest_updates_good_at_last_ser: Mutex<HashMap<OutPoint, bool>>,
 +      pub should_update_manager: atomic::AtomicBool,
 +}
 +impl TestChannelMonitor {
 +      pub fn new(chain_monitor: Arc<chaininterface::ChainWatchInterface>, broadcaster: Arc<chaininterface::BroadcasterInterface>, logger: Arc<Logger>, feeest: Arc<chaininterface::FeeEstimator>) -> Self {
 +              Self {
 +                      simple_monitor: channelmonitor::SimpleManyChannelMonitor::new(chain_monitor, broadcaster, logger, feeest),
 +                      update_ret: Mutex::new(Ok(())),
 +                      latest_good_update: Mutex::new(HashMap::new()),
 +                      latest_update_good: Mutex::new(HashMap::new()),
 +                      latest_updates_good_at_last_ser: Mutex::new(HashMap::new()),
 +                      should_update_manager: atomic::AtomicBool::new(false),
 +              }
 +      }
 +}
 +impl channelmonitor::ManyChannelMonitor for TestChannelMonitor {
 +      fn add_update_monitor(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
 +              let ret = self.update_ret.lock().unwrap().clone();
 +              if let Ok(()) = ret {
 +                      let mut ser = VecWriter(Vec::new());
 +                      monitor.write_for_disk(&mut ser).unwrap();
 +                      self.latest_good_update.lock().unwrap().insert(funding_txo, ser.0);
 +                      match self.latest_update_good.lock().unwrap().entry(funding_txo) {
 +                              hash_map::Entry::Vacant(mut e) => { e.insert(true); },
 +                              hash_map::Entry::Occupied(mut e) => {
 +                                      if !e.get() && unsafe { IN_RESTORE } {
 +                                              // Technically we can't consider an update to be "good" unless we're doing
 +                                              // it in response to a test_restore_channel_monitor as the channel may
 +                                              // still be waiting on such a call, so only set us to good if we're in the
 +                                              // middle of a restore call.
 +                                              e.insert(true);
 +                                      }
 +                              },
 +                      }
 +                      self.should_update_manager.store(true, atomic::Ordering::Relaxed);
 +              } else {
 +                      self.latest_update_good.lock().unwrap().insert(funding_txo, false);
 +              }
 +              assert!(self.simple_monitor.add_update_monitor(funding_txo, monitor).is_ok());
 +              ret
 +      }
 +
 +      fn fetch_pending_htlc_updated(&self) -> Vec<HTLCUpdate> {
 +              return self.simple_monitor.fetch_pending_htlc_updated();
 +      }
 +}
 +
 +struct KeyProvider {
 +      node_id: u8,
 +      session_id: atomic::AtomicU8,
 +      channel_id: atomic::AtomicU8,
 +}
 +impl KeysInterface for KeyProvider {
 +      fn get_node_secret(&self) -> SecretKey {
 +              SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, self.node_id]).unwrap()
 +      }
 +
 +      fn get_destination_script(&self) -> Script {
 +              let secp_ctx = Secp256k1::signing_only();
 +              let channel_monitor_claim_key = SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, self.node_id]).unwrap();
 +              let our_channel_monitor_claim_key_hash = Hash160::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
 +              Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&our_channel_monitor_claim_key_hash[..]).into_script()
 +      }
 +
 +      fn get_shutdown_pubkey(&self) -> PublicKey {
 +              let secp_ctx = Secp256k1::signing_only();
 +              PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, self.node_id]).unwrap())
 +      }
 +
 +      fn get_channel_keys(&self, _inbound: bool) -> ChannelKeys {
 +              ChannelKeys {
 +                      funding_key:               SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, self.node_id]).unwrap(),
 +                      revocation_base_key:       SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, self.node_id]).unwrap(),
 +                      payment_base_key:          SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, self.node_id]).unwrap(),
 +                      delayed_payment_base_key:  SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, self.node_id]).unwrap(),
 +                      htlc_base_key:             SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, self.node_id]).unwrap(),
 +                      commitment_seed: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, self.node_id],
 +              }
 +      }
 +
 +      fn get_session_key(&self) -> SecretKey {
 +              let id = self.session_id.fetch_add(1, atomic::Ordering::Relaxed);
 +              SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, id, 10, self.node_id]).unwrap()
 +      }
 +
 +      fn get_channel_id(&self) -> [u8; 32] {
 +              let id = self.channel_id.fetch_add(1, atomic::Ordering::Relaxed);
 +              [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, id, 11, self.node_id]
 +      }
 +}
 +
 +#[inline]
 +pub fn do_test(data: &[u8]) {
 +      let fee_est = Arc::new(FuzzEstimator{});
 +      let broadcast = Arc::new(TestBroadcaster{});
 +
 +      macro_rules! make_node {
 +              ($node_id: expr) => { {
 +                      let logger: Arc<Logger> = Arc::new(test_logger::TestLogger::new($node_id.to_string()));
 +                      let watch = Arc::new(ChainWatchInterfaceUtil::new(Network::Bitcoin, Arc::clone(&logger)));
 +                      let monitor = Arc::new(TestChannelMonitor::new(watch.clone(), broadcast.clone(), logger.clone(), fee_est.clone()));
 +
 +                      let keys_manager = Arc::new(KeyProvider { node_id: $node_id, session_id: atomic::AtomicU8::new(0), channel_id: atomic::AtomicU8::new(0) });
 +                      let mut config = UserConfig::new();
 +                      config.channel_options.fee_proportional_millionths = 0;
 +                      config.channel_options.announced_channel = true;
 +                      config.peer_channel_config_limits.min_dust_limit_satoshis = 0;
 +                      (ChannelManager::new(Network::Bitcoin, fee_est.clone(), monitor.clone(), watch.clone(), broadcast.clone(), Arc::clone(&logger), keys_manager.clone(), config, 0).unwrap(),
 +                      monitor)
 +              } }
 +      }
 +
 +      macro_rules! reload_node {
 +              ($ser: expr, $node_id: expr, $old_monitors: expr) => { {
 +                      let logger: Arc<Logger> = Arc::new(test_logger::TestLogger::new($node_id.to_string()));
 +                      let watch = Arc::new(ChainWatchInterfaceUtil::new(Network::Bitcoin, Arc::clone(&logger)));
 +                      let monitor = Arc::new(TestChannelMonitor::new(watch.clone(), broadcast.clone(), logger.clone(), fee_est.clone()));
 +
 +                      let keys_manager = Arc::new(KeyProvider { node_id: $node_id, session_id: atomic::AtomicU8::new(0), channel_id: atomic::AtomicU8::new(0) });
 +                      let mut config = UserConfig::new();
 +                      config.channel_options.fee_proportional_millionths = 0;
 +                      config.channel_options.announced_channel = true;
 +                      config.peer_channel_config_limits.min_dust_limit_satoshis = 0;
 +
 +                      let mut monitors = HashMap::new();
 +                      let mut old_monitors = $old_monitors.latest_good_update.lock().unwrap();
 +                      for (outpoint, monitor_ser) in old_monitors.drain() {
 +                              monitors.insert(outpoint, <(Sha256d, ChannelMonitor)>::read(&mut Cursor::new(&monitor_ser), Arc::clone(&logger)).expect("Failed to read monitor").1);
 +                              monitor.latest_good_update.lock().unwrap().insert(outpoint, monitor_ser);
 +                      }
 +                      let mut monitor_refs = HashMap::new();
 +                      for (outpoint, monitor) in monitors.iter() {
 +                              monitor_refs.insert(*outpoint, monitor);
 +                      }
 +
 +                      let read_args = ChannelManagerReadArgs {
 +                              keys_manager,
 +                              fee_estimator: fee_est.clone(),
 +                              monitor: monitor.clone(),
 +                              chain_monitor: watch,
 +                              tx_broadcaster: broadcast.clone(),
 +                              logger,
 +                              default_config: config,
 +                              channel_monitors: &monitor_refs,
 +                      };
 +
 +                      let res = (<(Sha256d, ChannelManager)>::read(&mut Cursor::new(&$ser.0), read_args).expect("Failed to read manager").1, monitor);
 +                      for (_, was_good) in $old_monitors.latest_updates_good_at_last_ser.lock().unwrap().iter() {
 +                              if !was_good {
 +                                      // If the last time we updated a monitor we didn't successfully update (and we
 +                                      // have sense updated our serialized copy of the ChannelManager) we may
 +                                      // force-close the channel on our counterparty cause we know we're missing
 +                                      // something. Thus, we just return here since we can't continue to test.
 +                                      return;
 +                              }
 +                      }
 +                      res
 +              } }
 +      }
 +
 +
 +      let mut channel_txn = Vec::new();
 +      macro_rules! make_channel {
 +              ($source: expr, $dest: expr, $chan_id: expr) => { {
 +                      $source.create_channel($dest.get_our_node_id(), 10000000, 42, 0).unwrap();
 +                      let open_channel = {
 +                              let events = $source.get_and_clear_pending_msg_events();
 +                              assert_eq!(events.len(), 1);
 +                              if let events::MessageSendEvent::SendOpenChannel { ref msg, .. } = events[0] {
 +                                      msg.clone()
 +                              } else { panic!("Wrong event type"); }
 +                      };
 +
 +                      $dest.handle_open_channel(&$source.get_our_node_id(), LocalFeatures::new(), &open_channel).unwrap();
 +                      let accept_channel = {
 +                              let events = $dest.get_and_clear_pending_msg_events();
 +                              assert_eq!(events.len(), 1);
 +                              if let events::MessageSendEvent::SendAcceptChannel { ref msg, .. } = events[0] {
 +                                      msg.clone()
 +                              } else { panic!("Wrong event type"); }
 +                      };
 +
 +                      $source.handle_accept_channel(&$dest.get_our_node_id(), LocalFeatures::new(), &accept_channel).unwrap();
 +                      {
 +                              let events = $source.get_and_clear_pending_events();
 +                              assert_eq!(events.len(), 1);
 +                              if let events::Event::FundingGenerationReady { ref temporary_channel_id, ref channel_value_satoshis, ref output_script, .. } = events[0] {
 +                                      let tx = Transaction { version: $chan_id, lock_time: 0, input: Vec::new(), output: vec![TxOut {
 +                                              value: *channel_value_satoshis, script_pubkey: output_script.clone(),
 +                                      }]};
 +                                      let funding_output = OutPoint::new(tx.txid(), 0);
 +                                      $source.funding_transaction_generated(&temporary_channel_id, funding_output);
 +                                      channel_txn.push(tx);
 +                              } else { panic!("Wrong event type"); }
 +                      }
 +
 +                      let funding_created = {
 +                              let events = $source.get_and_clear_pending_msg_events();
 +                              assert_eq!(events.len(), 1);
 +                              if let events::MessageSendEvent::SendFundingCreated { ref msg, .. } = events[0] {
 +                                      msg.clone()
 +                              } else { panic!("Wrong event type"); }
 +                      };
 +                      $dest.handle_funding_created(&$source.get_our_node_id(), &funding_created).unwrap();
 +
 +                      let funding_signed = {
 +                              let events = $dest.get_and_clear_pending_msg_events();
 +                              assert_eq!(events.len(), 1);
 +                              if let events::MessageSendEvent::SendFundingSigned { ref msg, .. } = events[0] {
 +                                      msg.clone()
 +                              } else { panic!("Wrong event type"); }
 +                      };
 +                      $source.handle_funding_signed(&$dest.get_our_node_id(), &funding_signed).unwrap();
 +
 +                      {
 +                              let events = $source.get_and_clear_pending_events();
 +                              assert_eq!(events.len(), 1);
 +                              if let events::Event::FundingBroadcastSafe { .. } = events[0] {
 +                              } else { panic!("Wrong event type"); }
 +                      }
 +              } }
 +      }
 +
 +      macro_rules! confirm_txn {
 +              ($node: expr) => { {
 +                      let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +                      let mut txn = Vec::with_capacity(channel_txn.len());
 +                      let mut posn = Vec::with_capacity(channel_txn.len());
 +                      for i in 0..channel_txn.len() {
 +                              txn.push(&channel_txn[i]);
 +                              posn.push(i as u32 + 1);
 +                      }
 +                      $node.block_connected(&header, 1, &txn, &posn);
 +                      for i in 2..100 {
 +                              header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +                              $node.block_connected(&header, i, &Vec::new(), &[0; 0]);
 +                      }
 +              } }
 +      }
 +
 +      macro_rules! lock_fundings {
 +              ($nodes: expr) => { {
 +                      let mut node_events = Vec::new();
 +                      for node in $nodes.iter() {
 +                              node_events.push(node.get_and_clear_pending_msg_events());
 +                      }
 +                      for (idx, node_event) in node_events.iter().enumerate() {
 +                              for event in node_event {
 +                                      if let events::MessageSendEvent::SendFundingLocked { ref node_id, ref msg } = event {
 +                                              for node in $nodes.iter() {
 +                                                      if node.get_our_node_id() == *node_id {
 +                                                              node.handle_funding_locked(&$nodes[idx].get_our_node_id(), msg).unwrap();
 +                                                      }
 +                                              }
 +                                      } else { panic!("Wrong event type"); }
 +                              }
 +                      }
 +
 +                      for node in $nodes.iter() {
 +                              let events = node.get_and_clear_pending_msg_events();
 +                              for event in events {
 +                                      if let events::MessageSendEvent::SendAnnouncementSignatures { .. } = event {
 +                                      } else { panic!("Wrong event type"); }
 +                              }
 +                      }
 +              } }
 +      }
 +
 +      // 3 nodes is enough to hit all the possible cases, notably unknown-source-unknown-dest
 +      // forwarding.
 +      let (mut node_a, mut monitor_a) = make_node!(0);
 +      let (mut node_b, mut monitor_b) = make_node!(1);
 +      let (mut node_c, mut monitor_c) = make_node!(2);
 +
 +      let mut nodes = [node_a, node_b, node_c];
 +
 +      make_channel!(nodes[0], nodes[1], 0);
 +      make_channel!(nodes[1], nodes[2], 1);
 +
 +      for node in nodes.iter() {
 +              confirm_txn!(node);
 +      }
 +
 +      lock_fundings!(nodes);
 +
 +      let chan_a = nodes[0].list_usable_channels()[0].short_channel_id.unwrap();
 +      let chan_b = nodes[2].list_usable_channels()[0].short_channel_id.unwrap();
 +
 +      let mut payment_id = 0;
 +
 +      let mut chan_a_disconnected = false;
 +      let mut chan_b_disconnected = false;
 +      let mut ba_events = Vec::new();
 +      let mut bc_events = Vec::new();
 +
 +      let mut node_a_ser = VecWriter(Vec::new());
 +      nodes[0].write(&mut node_a_ser).unwrap();
 +      let mut node_b_ser = VecWriter(Vec::new());
 +      nodes[1].write(&mut node_b_ser).unwrap();
 +      let mut node_c_ser = VecWriter(Vec::new());
 +      nodes[2].write(&mut node_c_ser).unwrap();
 +
 +      macro_rules! test_err {
 +              ($res: expr) => {
 +                      match $res {
 +                              Ok(()) => {},
 +                              Err(LightningError { action: ErrorAction::IgnoreError, .. }) => { },
 +                              _ => { $res.unwrap() },
 +                      }
 +              }
 +      }
 +
 +      macro_rules! test_return {
 +              () => { {
 +                      assert_eq!(nodes[0].list_channels().len(), 1);
 +                      assert_eq!(nodes[1].list_channels().len(), 2);
 +                      assert_eq!(nodes[2].list_channels().len(), 1);
 +                      return;
 +              } }
 +      }
 +
 +      let mut read_pos = 0;
 +      macro_rules! get_slice {
 +              ($len: expr) => {
 +                      {
 +                              let slice_len = $len as usize;
 +                              if data.len() < read_pos + slice_len {
 +                                      test_return!();
 +                              }
 +                              read_pos += slice_len;
 +                              &data[read_pos - slice_len..read_pos]
 +                      }
 +              }
 +      }
 +
 +      loop {
 +              macro_rules! send_payment {
 +                      ($source: expr, $dest: expr) => { {
 +                              let payment_hash = Sha256::hash(&[payment_id; 1]);
 +                              payment_id = payment_id.wrapping_add(1);
 +                              if let Err(_) = $source.send_payment(Route {
 +                                      hops: vec![RouteHop {
 +                                              pubkey: $dest.0.get_our_node_id(),
 +                                              short_channel_id: $dest.1,
 +                                              fee_msat: 5000000,
 +                                              cltv_expiry_delta: 200,
 +                                      }],
 +                              }, PaymentHash(payment_hash.into_inner())) {
 +                                      // Probably ran out of funds
 +                                      test_return!();
 +                              }
 +                      } };
 +                      ($source: expr, $middle: expr, $dest: expr) => { {
 +                              let payment_hash = Sha256::hash(&[payment_id; 1]);
 +                              payment_id = payment_id.wrapping_add(1);
 +                              if let Err(_) = $source.send_payment(Route {
 +                                      hops: vec![RouteHop {
 +                                              pubkey: $middle.0.get_our_node_id(),
 +                                              short_channel_id: $middle.1,
 +                                              fee_msat: 50000,
 +                                              cltv_expiry_delta: 100,
 +                                      },RouteHop {
 +                                              pubkey: $dest.0.get_our_node_id(),
 +                                              short_channel_id: $dest.1,
 +                                              fee_msat: 5000000,
 +                                              cltv_expiry_delta: 200,
 +                                      }],
 +                              }, PaymentHash(payment_hash.into_inner())) {
 +                                      // Probably ran out of funds
 +                                      test_return!();
 +                              }
 +                      } }
 +              }
 +
 +              macro_rules! process_msg_events {
 +                      ($node: expr, $corrupt_forward: expr) => { {
 +                              let events = if $node == 1 {
 +                                      let mut new_events = Vec::new();
 +                                      mem::swap(&mut new_events, &mut ba_events);
 +                                      new_events.extend_from_slice(&bc_events[..]);
 +                                      bc_events.clear();
 +                                      new_events
 +                              } else { Vec::new() };
 +                              for event in events.iter().chain(nodes[$node].get_and_clear_pending_msg_events().iter()) {
 +                                      match event {
 +                                              events::MessageSendEvent::UpdateHTLCs { ref node_id, updates: CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
 +                                                      for dest in nodes.iter() {
 +                                                              if dest.get_our_node_id() == *node_id {
 +                                                                      assert!(update_fee.is_none());
 +                                                                      for update_add in update_add_htlcs {
 +                                                                              if !$corrupt_forward {
 +                                                                                      test_err!(dest.handle_update_add_htlc(&nodes[$node].get_our_node_id(), &update_add));
 +                                                                              } else {
 +                                                                                      // Corrupt the update_add_htlc message so that its HMAC
 +                                                                                      // check will fail and we generate a
 +                                                                                      // update_fail_malformed_htlc instead of an
 +                                                                                      // update_fail_htlc as we do when we reject a payment.
 +                                                                                      let mut msg_ser = update_add.encode();
 +                                                                                      msg_ser[1000] ^= 0xff;
 +                                                                                      let new_msg = UpdateAddHTLC::read(&mut Cursor::new(&msg_ser)).unwrap();
 +                                                                                      test_err!(dest.handle_update_add_htlc(&nodes[$node].get_our_node_id(), &new_msg));
 +                                                                              }
 +                                                                      }
 +                                                                      for update_fulfill in update_fulfill_htlcs {
 +                                                                              test_err!(dest.handle_update_fulfill_htlc(&nodes[$node].get_our_node_id(), &update_fulfill));
 +                                                                      }
 +                                                                      for update_fail in update_fail_htlcs {
 +                                                                              test_err!(dest.handle_update_fail_htlc(&nodes[$node].get_our_node_id(), &update_fail));
 +                                                                      }
 +                                                                      for update_fail_malformed in update_fail_malformed_htlcs {
 +                                                                              test_err!(dest.handle_update_fail_malformed_htlc(&nodes[$node].get_our_node_id(), &update_fail_malformed));
 +                                                                      }
 +                                                                      test_err!(dest.handle_commitment_signed(&nodes[$node].get_our_node_id(), &commitment_signed));
 +                                                              }
 +                                                      }
 +                                              },
 +                                              events::MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
 +                                                      for dest in nodes.iter() {
 +                                                              if dest.get_our_node_id() == *node_id {
 +                                                                      test_err!(dest.handle_revoke_and_ack(&nodes[$node].get_our_node_id(), msg));
 +                                                              }
 +                                                      }
 +                                              },
 +                                              events::MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => {
 +                                                      for dest in nodes.iter() {
 +                                                              if dest.get_our_node_id() == *node_id {
 +                                                                      test_err!(dest.handle_channel_reestablish(&nodes[$node].get_our_node_id(), msg));
 +                                                              }
 +                                                      }
 +                                              },
 +                                              events::MessageSendEvent::SendFundingLocked { .. } => {
 +                                                      // Can be generated as a reestablish response
 +                                              },
 +                                              events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {
 +                                                      // Can be generated due to a payment forward being rejected due to a
 +                                                      // channel having previously failed a monitor update
 +                                              },
 +                                              _ => panic!("Unhandled message event"),
 +                                      }
 +                              }
 +                      } }
 +              }
 +
 +              macro_rules! drain_msg_events_on_disconnect {
 +                      ($counterparty_id: expr) => { {
 +                              if $counterparty_id == 0 {
 +                                      for event in nodes[0].get_and_clear_pending_msg_events() {
 +                                              match event {
 +                                                      events::MessageSendEvent::UpdateHTLCs { .. } => {},
 +                                                      events::MessageSendEvent::SendRevokeAndACK { .. } => {},
 +                                                      events::MessageSendEvent::SendChannelReestablish { .. } => {},
 +                                                      events::MessageSendEvent::SendFundingLocked { .. } => {},
 +                                                      events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {},
 +                                                      _ => panic!("Unhandled message event"),
 +                                              }
 +                                      }
 +                                      ba_events.clear();
 +                              } else {
 +                                      for event in nodes[2].get_and_clear_pending_msg_events() {
 +                                              match event {
 +                                                      events::MessageSendEvent::UpdateHTLCs { .. } => {},
 +                                                      events::MessageSendEvent::SendRevokeAndACK { .. } => {},
 +                                                      events::MessageSendEvent::SendChannelReestablish { .. } => {},
 +                                                      events::MessageSendEvent::SendFundingLocked { .. } => {},
 +                                                      events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {},
 +                                                      _ => panic!("Unhandled message event"),
 +                                              }
 +                                      }
 +                                      bc_events.clear();
 +                              }
 +                              let mut events = nodes[1].get_and_clear_pending_msg_events();
 +                              let drop_node_id = if $counterparty_id == 0 { nodes[0].get_our_node_id() } else { nodes[2].get_our_node_id() };
 +                              let msg_sink = if $counterparty_id == 0 { &mut bc_events } else { &mut ba_events };
 +                              for event in events.drain(..) {
 +                                      let push = match event {
 +                                              events::MessageSendEvent::UpdateHTLCs { ref node_id, .. } => {
 +                                                      if *node_id != drop_node_id { true } else { false }
 +                                              },
 +                                              events::MessageSendEvent::SendRevokeAndACK { ref node_id, .. } => {
 +                                                      if *node_id != drop_node_id { true } else { false }
 +                                              },
 +                                              events::MessageSendEvent::SendChannelReestablish { ref node_id, .. } => {
 +                                                      if *node_id != drop_node_id { true } else { false }
 +                                              },
 +                                              events::MessageSendEvent::SendFundingLocked { .. } => false,
 +                                              events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => false,
 +                                              _ => panic!("Unhandled message event"),
 +                                      };
 +                                      if push { msg_sink.push(event); }
 +                              }
 +                      } }
 +              }
 +
 +              macro_rules! process_events {
 +                      ($node: expr, $fail: expr) => { {
 +                              // In case we get 256 payments we may have a hash collision, resulting in the
 +                              // second claim/fail call not finding the duplicate-hash HTLC, so we have to
 +                              // deduplicate the calls here.
 +                              let mut claim_set = HashSet::new();
 +                              let mut events = nodes[$node].get_and_clear_pending_events();
 +                              // Sort events so that PendingHTLCsForwardable get processed last. This avoids a
 +                              // case where we first process a PendingHTLCsForwardable, then claim/fail on a
 +                              // PaymentReceived, claiming/failing two HTLCs, but leaving a just-generated
 +                              // PaymentReceived event for the second HTLC in our pending_events (and breaking
 +                              // our claim_set deduplication).
 +                              events.sort_by(|a, b| {
 +                                      if let events::Event::PaymentReceived { .. } = a {
 +                                              if let events::Event::PendingHTLCsForwardable { .. } = b {
 +                                                      Ordering::Less
 +                                              } else { Ordering::Equal }
 +                                      } else if let events::Event::PendingHTLCsForwardable { .. } = a {
 +                                              if let events::Event::PaymentReceived { .. } = b {
 +                                                      Ordering::Greater
 +                                              } else { Ordering::Equal }
 +                                      } else { Ordering::Equal }
 +                              });
 +                              for event in events.drain(..) {
 +                                      match event {
 +                                              events::Event::PaymentReceived { payment_hash, .. } => {
 +                                                      if claim_set.insert(payment_hash.0) {
 +                                                              if $fail {
 +                                                                      assert!(nodes[$node].fail_htlc_backwards(&payment_hash));
 +                                                              } else {
++                                                                      assert!(nodes[$node].claim_funds(PaymentPreimage(payment_hash.0), 5_000_000));
 +                                                              }
 +                                                      }
 +                                              },
 +                                              events::Event::PaymentSent { .. } => {},
 +                                              events::Event::PaymentFailed { .. } => {},
 +                                              events::Event::PendingHTLCsForwardable { .. } => {
 +                                                      nodes[$node].process_pending_htlc_forwards();
 +                                              },
 +                                              _ => panic!("Unhandled event"),
 +                                      }
 +                              }
 +                      } }
 +              }
 +
 +              match get_slice!(1)[0] {
 +                      0x00 => *monitor_a.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure),
 +                      0x01 => *monitor_b.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure),
 +                      0x02 => *monitor_c.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure),
 +                      0x03 => *monitor_a.update_ret.lock().unwrap() = Ok(()),
 +                      0x04 => *monitor_b.update_ret.lock().unwrap() = Ok(()),
 +                      0x05 => *monitor_c.update_ret.lock().unwrap() = Ok(()),
 +                      0x06 => { unsafe { IN_RESTORE = true }; nodes[0].test_restore_channel_monitor(); unsafe { IN_RESTORE = false }; },
 +                      0x07 => { unsafe { IN_RESTORE = true }; nodes[1].test_restore_channel_monitor(); unsafe { IN_RESTORE = false }; },
 +                      0x08 => { unsafe { IN_RESTORE = true }; nodes[2].test_restore_channel_monitor(); unsafe { IN_RESTORE = false }; },
 +                      0x09 => send_payment!(nodes[0], (&nodes[1], chan_a)),
 +                      0x0a => send_payment!(nodes[1], (&nodes[0], chan_a)),
 +                      0x0b => send_payment!(nodes[1], (&nodes[2], chan_b)),
 +                      0x0c => send_payment!(nodes[2], (&nodes[1], chan_b)),
 +                      0x0d => send_payment!(nodes[0], (&nodes[1], chan_a), (&nodes[2], chan_b)),
 +                      0x0e => send_payment!(nodes[2], (&nodes[1], chan_b), (&nodes[0], chan_a)),
 +                      0x0f => {
 +                              if !chan_a_disconnected {
 +                                      nodes[0].peer_disconnected(&nodes[1].get_our_node_id(), false);
 +                                      nodes[1].peer_disconnected(&nodes[0].get_our_node_id(), false);
 +                                      chan_a_disconnected = true;
 +                                      drain_msg_events_on_disconnect!(0);
 +                              }
 +                      },
 +                      0x10 => {
 +                              if !chan_b_disconnected {
 +                                      nodes[1].peer_disconnected(&nodes[2].get_our_node_id(), false);
 +                                      nodes[2].peer_disconnected(&nodes[1].get_our_node_id(), false);
 +                                      chan_b_disconnected = true;
 +                                      drain_msg_events_on_disconnect!(2);
 +                              }
 +                      },
 +                      0x11 => {
 +                              if chan_a_disconnected {
 +                                      nodes[0].peer_connected(&nodes[1].get_our_node_id());
 +                                      nodes[1].peer_connected(&nodes[0].get_our_node_id());
 +                                      chan_a_disconnected = false;
 +                              }
 +                      },
 +                      0x12 => {
 +                              if chan_b_disconnected {
 +                                      nodes[1].peer_connected(&nodes[2].get_our_node_id());
 +                                      nodes[2].peer_connected(&nodes[1].get_our_node_id());
 +                                      chan_b_disconnected = false;
 +                              }
 +                      },
 +                      0x13 => process_msg_events!(0, true),
 +                      0x14 => process_msg_events!(0, false),
 +                      0x15 => process_events!(0, true),
 +                      0x16 => process_events!(0, false),
 +                      0x17 => process_msg_events!(1, true),
 +                      0x18 => process_msg_events!(1, false),
 +                      0x19 => process_events!(1, true),
 +                      0x1a => process_events!(1, false),
 +                      0x1b => process_msg_events!(2, true),
 +                      0x1c => process_msg_events!(2, false),
 +                      0x1d => process_events!(2, true),
 +                      0x1e => process_events!(2, false),
 +                      0x1f => {
 +                              if !chan_a_disconnected {
 +                                      nodes[1].peer_disconnected(&nodes[0].get_our_node_id(), false);
 +                                      chan_a_disconnected = true;
 +                                      drain_msg_events_on_disconnect!(0);
 +                              }
 +                              let (new_node_a, new_monitor_a) = reload_node!(node_a_ser, 0, monitor_a);
 +                              node_a = Arc::new(new_node_a);
 +                              nodes[0] = node_a.clone();
 +                              monitor_a = new_monitor_a;
 +                      },
 +                      0x20 => {
 +                              if !chan_a_disconnected {
 +                                      nodes[0].peer_disconnected(&nodes[1].get_our_node_id(), false);
 +                                      chan_a_disconnected = true;
 +                                      nodes[0].get_and_clear_pending_msg_events();
 +                                      ba_events.clear();
 +                              }
 +                              if !chan_b_disconnected {
 +                                      nodes[2].peer_disconnected(&nodes[1].get_our_node_id(), false);
 +                                      chan_b_disconnected = true;
 +                                      nodes[2].get_and_clear_pending_msg_events();
 +                                      bc_events.clear();
 +                              }
 +                              let (new_node_b, new_monitor_b) = reload_node!(node_b_ser, 1, monitor_b);
 +                              node_b = Arc::new(new_node_b);
 +                              nodes[1] = node_b.clone();
 +                              monitor_b = new_monitor_b;
 +                      },
 +                      0x21 => {
 +                              if !chan_b_disconnected {
 +                                      nodes[1].peer_disconnected(&nodes[2].get_our_node_id(), false);
 +                                      chan_b_disconnected = true;
 +                                      drain_msg_events_on_disconnect!(2);
 +                              }
 +                              let (new_node_c, new_monitor_c) = reload_node!(node_c_ser, 2, monitor_c);
 +                              node_c = Arc::new(new_node_c);
 +                              nodes[2] = node_c.clone();
 +                              monitor_c = new_monitor_c;
 +                      },
 +                      _ => test_return!(),
 +              }
 +
 +              if monitor_a.should_update_manager.load(atomic::Ordering::Relaxed) {
 +                      node_a_ser.0.clear();
 +                      nodes[0].write(&mut node_a_ser).unwrap();
 +                      monitor_a.should_update_manager.store(false, atomic::Ordering::Relaxed);
 +                      *monitor_a.latest_updates_good_at_last_ser.lock().unwrap() = monitor_a.latest_update_good.lock().unwrap().clone();
 +              }
 +              if monitor_b.should_update_manager.load(atomic::Ordering::Relaxed) {
 +                      node_b_ser.0.clear();
 +                      nodes[1].write(&mut node_b_ser).unwrap();
 +                      monitor_b.should_update_manager.store(false, atomic::Ordering::Relaxed);
 +                      *monitor_b.latest_updates_good_at_last_ser.lock().unwrap() = monitor_b.latest_update_good.lock().unwrap().clone();
 +              }
 +              if monitor_c.should_update_manager.load(atomic::Ordering::Relaxed) {
 +                      node_c_ser.0.clear();
 +                      nodes[2].write(&mut node_c_ser).unwrap();
 +                      monitor_c.should_update_manager.store(false, atomic::Ordering::Relaxed);
 +                      *monitor_c.latest_updates_good_at_last_ser.lock().unwrap() = monitor_c.latest_update_good.lock().unwrap().clone();
 +              }
 +      }
 +}
 +
 +#[cfg(feature = "afl")]
 +#[macro_use] extern crate afl;
 +#[cfg(feature = "afl")]
 +fn main() {
 +      fuzz!(|data| {
 +              do_test(data);
 +      });
 +}
 +
 +#[cfg(feature = "honggfuzz")]
 +#[macro_use] extern crate honggfuzz;
 +#[cfg(feature = "honggfuzz")]
 +fn main() {
 +      loop {
 +              fuzz!(|data| {
 +                      do_test(data);
 +              });
 +      }
 +}
 +
 +#[cfg(feature = "libfuzzer_fuzz")]
 +#[macro_use] extern crate libfuzzer_sys;
 +#[cfg(feature = "libfuzzer_fuzz")]
 +fuzz_target!(|data: &[u8]| {
 +      do_test(data);
 +});
 +
 +extern crate hex;
 +#[cfg(test)]
 +mod tests {
 +      #[test]
 +      fn duplicate_crash() {
 +              super::do_test(&::hex::decode("00").unwrap());
 +      }
 +}
index 29220f46846a5d8063ff350cb0943d35d798d37c,0000000000000000000000000000000000000000..41ab473fd61c4877412b1dcf36acfde09f5bb3d2
mode 100644,000000..100644
--- /dev/null
@@@ -1,890 -1,0 +1,891 @@@
-       let mut payments_received: Vec<PaymentHash> = Vec::new();
 +//! Test that no series of bytes received over the wire/connections created/payments sent can
 +//! result in a crash. We do this by standing up a node and then reading bytes from input to denote
 +//! actions such as creating new inbound/outbound connections, bytes to be read from a connection,
 +//! or payments to send/ways to handle events generated.
 +//! This test has been very useful, though due to its complexity good starting inputs are critical.
 +
 +//Uncomment this for libfuzzer builds:
 +//#![no_main]
 +
 +extern crate bitcoin;
 +extern crate bitcoin_hashes;
 +extern crate lightning;
 +extern crate secp256k1;
 +
 +use bitcoin::blockdata::block::BlockHeader;
 +use bitcoin::blockdata::transaction::{Transaction, TxOut};
 +use bitcoin::blockdata::script::{Builder, Script};
 +use bitcoin::blockdata::opcodes;
 +use bitcoin::consensus::encode::deserialize;
 +use bitcoin::network::constants::Network;
 +use bitcoin::util::hash::BitcoinHash;
 +
 +use bitcoin_hashes::Hash as TraitImport;
 +use bitcoin_hashes::HashEngine as TraitImportEngine;
 +use bitcoin_hashes::sha256::Hash as Sha256;
 +use bitcoin_hashes::hash160::Hash as Hash160;
 +use bitcoin_hashes::sha256d::Hash as Sha256dHash;
 +
 +use lightning::chain::chaininterface::{BroadcasterInterface,ConfirmationTarget,ChainListener,FeeEstimator,ChainWatchInterfaceUtil};
 +use lightning::chain::transaction::OutPoint;
 +use lightning::chain::keysinterface::{ChannelKeys, KeysInterface};
 +use lightning::ln::channelmonitor;
 +use lightning::ln::channelmanager::{ChannelManager, PaymentHash, PaymentPreimage};
 +use lightning::ln::peer_handler::{MessageHandler,PeerManager,SocketDescriptor};
 +use lightning::ln::router::Router;
 +use lightning::util::events::{EventsProvider,Event};
 +use lightning::util::logger::Logger;
 +use lightning::util::config::UserConfig;
 +
 +mod utils;
 +
 +use utils::test_logger;
 +
 +use secp256k1::key::{PublicKey,SecretKey};
 +use secp256k1::Secp256k1;
 +
 +use std::cell::RefCell;
 +use std::collections::{HashMap, hash_map};
 +use std::cmp;
 +use std::hash::Hash;
 +use std::sync::Arc;
 +use std::sync::atomic::{AtomicU64,AtomicUsize,Ordering};
 +
 +#[inline]
 +pub fn slice_to_be16(v: &[u8]) -> u16 {
 +      ((v[0] as u16) << 8*1) |
 +      ((v[1] as u16) << 8*0)
 +}
 +
 +#[inline]
 +pub fn slice_to_be24(v: &[u8]) -> u32 {
 +      ((v[0] as u32) << 8*2) |
 +      ((v[1] as u32) << 8*1) |
 +      ((v[2] as u32) << 8*0)
 +}
 +
 +#[inline]
 +pub fn slice_to_be32(v: &[u8]) -> u32 {
 +      ((v[0] as u32) << 8*3) |
 +      ((v[1] as u32) << 8*2) |
 +      ((v[2] as u32) << 8*1) |
 +      ((v[3] as u32) << 8*0)
 +}
 +
 +#[inline]
 +pub fn be64_to_array(u: u64) -> [u8; 8] {
 +      let mut v = [0; 8];
 +      v[0] = ((u >> 8*7) & 0xff) as u8;
 +      v[1] = ((u >> 8*6) & 0xff) as u8;
 +      v[2] = ((u >> 8*5) & 0xff) as u8;
 +      v[3] = ((u >> 8*4) & 0xff) as u8;
 +      v[4] = ((u >> 8*3) & 0xff) as u8;
 +      v[5] = ((u >> 8*2) & 0xff) as u8;
 +      v[6] = ((u >> 8*1) & 0xff) as u8;
 +      v[7] = ((u >> 8*0) & 0xff) as u8;
 +      v
 +}
 +
 +struct InputData {
 +      data: Vec<u8>,
 +      read_pos: AtomicUsize,
 +}
 +impl InputData {
 +      fn get_slice(&self, len: usize) -> Option<&[u8]> {
 +              let old_pos = self.read_pos.fetch_add(len, Ordering::AcqRel);
 +              if self.data.len() < old_pos + len {
 +                      return None;
 +              }
 +              Some(&self.data[old_pos..old_pos + len])
 +      }
 +}
 +
 +struct FuzzEstimator {
 +      input: Arc<InputData>,
 +}
 +impl FeeEstimator for FuzzEstimator {
 +      fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u64 {
 +              //TODO: We should actually be testing at least much more than 64k...
 +              match self.input.get_slice(2) {
 +                      Some(slice) => cmp::max(slice_to_be16(slice) as u64, 253),
 +                      None => 0
 +              }
 +      }
 +}
 +
 +struct TestBroadcaster {}
 +impl BroadcasterInterface for TestBroadcaster {
 +      fn broadcast_transaction(&self, _tx: &Transaction) {}
 +}
 +
 +#[derive(Clone)]
 +struct Peer<'a> {
 +      id: u8,
 +      peers_connected: &'a RefCell<[bool; 256]>,
 +}
 +impl<'a> SocketDescriptor for Peer<'a> {
 +      fn send_data(&mut self, data: &[u8], _resume_read: bool) -> usize {
 +              data.len()
 +      }
 +      fn disconnect_socket(&mut self) {
 +              assert!(self.peers_connected.borrow()[self.id as usize]);
 +              self.peers_connected.borrow_mut()[self.id as usize] = false;
 +      }
 +}
 +impl<'a> PartialEq for Peer<'a> {
 +      fn eq(&self, other: &Self) -> bool {
 +              self.id == other.id
 +      }
 +}
 +impl<'a> Eq for Peer<'a> {}
 +impl<'a> Hash for Peer<'a> {
 +      fn hash<H : std::hash::Hasher>(&self, h: &mut H) {
 +              self.id.hash(h)
 +      }
 +}
 +
 +struct MoneyLossDetector<'a> {
 +      manager: Arc<ChannelManager>,
 +      monitor: Arc<channelmonitor::SimpleManyChannelMonitor<OutPoint>>,
 +      handler: PeerManager<Peer<'a>>,
 +
 +      peers: &'a RefCell<[bool; 256]>,
 +      funding_txn: Vec<Transaction>,
 +      txids_confirmed: HashMap<Sha256dHash, usize>,
 +      header_hashes: Vec<Sha256dHash>,
 +      height: usize,
 +      max_height: usize,
 +      blocks_connected: u32,
 +}
 +impl<'a> MoneyLossDetector<'a> {
 +      pub fn new(peers: &'a RefCell<[bool; 256]>, manager: Arc<ChannelManager>, monitor: Arc<channelmonitor::SimpleManyChannelMonitor<OutPoint>>, handler: PeerManager<Peer<'a>>) -> Self {
 +              MoneyLossDetector {
 +                      manager,
 +                      monitor,
 +                      handler,
 +
 +                      peers,
 +                      funding_txn: Vec::new(),
 +                      txids_confirmed: HashMap::new(),
 +                      header_hashes: vec![Default::default()],
 +                      height: 0,
 +                      max_height: 0,
 +                      blocks_connected: 0,
 +              }
 +      }
 +
 +      fn connect_block(&mut self, all_txn: &[Transaction]) {
 +              let mut txn = Vec::with_capacity(all_txn.len());
 +              let mut txn_idxs = Vec::with_capacity(all_txn.len());
 +              for (idx, tx) in all_txn.iter().enumerate() {
 +                      let txid = tx.txid();
 +                      match self.txids_confirmed.entry(txid) {
 +                              hash_map::Entry::Vacant(e) => {
 +                                      e.insert(self.height);
 +                                      txn.push(tx);
 +                                      txn_idxs.push(idx as u32 + 1);
 +                              },
 +                              _ => {},
 +                      }
 +              }
 +
 +              let header = BlockHeader { version: 0x20000000, prev_blockhash: self.header_hashes[self.height], merkle_root: Default::default(), time: self.blocks_connected, bits: 42, nonce: 42 };
 +              self.height += 1;
 +              self.blocks_connected += 1;
 +              self.manager.block_connected(&header, self.height as u32, &txn[..], &txn_idxs[..]);
 +              (*self.monitor).block_connected(&header, self.height as u32, &txn[..], &txn_idxs[..]);
 +              if self.header_hashes.len() > self.height {
 +                      self.header_hashes[self.height] = header.bitcoin_hash();
 +              } else {
 +                      assert_eq!(self.header_hashes.len(), self.height);
 +                      self.header_hashes.push(header.bitcoin_hash());
 +              }
 +              self.max_height = cmp::max(self.height, self.max_height);
 +      }
 +
 +      fn disconnect_block(&mut self) {
 +              if self.height > 0 && (self.max_height < 6 || self.height >= self.max_height - 6) {
 +                      self.height -= 1;
 +                      let header = BlockHeader { version: 0x20000000, prev_blockhash: self.header_hashes[self.height], merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +                      self.manager.block_disconnected(&header, self.height as u32);
 +                      self.monitor.block_disconnected(&header, self.height as u32);
 +                      let removal_height = self.height;
 +                      self.txids_confirmed.retain(|_, height| {
 +                              removal_height != *height
 +                      });
 +              }
 +      }
 +}
 +
 +impl<'a> Drop for MoneyLossDetector<'a> {
 +      fn drop(&mut self) {
 +              if !::std::thread::panicking() {
 +                      // Disconnect all peers
 +                      for (idx, peer) in self.peers.borrow().iter().enumerate() {
 +                              if *peer {
 +                                      self.handler.disconnect_event(&Peer{id: idx as u8, peers_connected: &self.peers});
 +                              }
 +                      }
 +
 +                      // Force all channels onto the chain (and time out claim txn)
 +                      self.manager.force_close_all_channels();
 +              }
 +      }
 +}
 +
 +struct KeyProvider {
 +      node_secret: SecretKey,
 +      counter: AtomicU64,
 +}
 +impl KeysInterface for KeyProvider {
 +      fn get_node_secret(&self) -> SecretKey {
 +              self.node_secret.clone()
 +      }
 +
 +      fn get_destination_script(&self) -> Script {
 +              let secp_ctx = Secp256k1::signing_only();
 +              let channel_monitor_claim_key = SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
 +              let our_channel_monitor_claim_key_hash = <Hash160 as bitcoin_hashes::Hash>::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
 +              Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&our_channel_monitor_claim_key_hash[..]).into_script()
 +      }
 +
 +      fn get_shutdown_pubkey(&self) -> PublicKey {
 +              let secp_ctx = Secp256k1::signing_only();
 +              PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]).unwrap())
 +      }
 +
 +      fn get_channel_keys(&self, inbound: bool) -> ChannelKeys {
 +              let ctr = self.counter.fetch_add(1, Ordering::Relaxed) as u8;
 +              if inbound {
 +                      ChannelKeys {
 +                              funding_key:               SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, ctr]).unwrap(),
 +                              revocation_base_key:       SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, ctr]).unwrap(),
 +                              payment_base_key:          SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, ctr]).unwrap(),
 +                              delayed_payment_base_key:  SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, ctr]).unwrap(),
 +                              htlc_base_key:             SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, ctr]).unwrap(),
 +                              commitment_seed: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, ctr],
 +                      }
 +              } else {
 +                      ChannelKeys {
 +                              funding_key:               SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, ctr]).unwrap(),
 +                              revocation_base_key:       SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, ctr]).unwrap(),
 +                              payment_base_key:          SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, ctr]).unwrap(),
 +                              delayed_payment_base_key:  SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, ctr]).unwrap(),
 +                              htlc_base_key:             SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, ctr]).unwrap(),
 +                              commitment_seed: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, ctr],
 +                      }
 +              }
 +      }
 +
 +      fn get_session_key(&self) -> SecretKey {
 +              let ctr = self.counter.fetch_add(1, Ordering::Relaxed) as u8;
 +              SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13, ctr]).unwrap()
 +      }
 +
 +      fn get_channel_id(&self) -> [u8; 32] {
 +              let ctr = self.counter.fetch_add(1, Ordering::Relaxed);
 +              [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 +              (ctr >> 8*7) as u8, (ctr >> 8*6) as u8, (ctr >> 8*5) as u8, (ctr >> 8*4) as u8, (ctr >> 8*3) as u8, (ctr >> 8*2) as u8, (ctr >> 8*1) as u8, 14, (ctr >> 8*0) as u8]
 +      }
 +}
 +
 +#[inline]
 +pub fn do_test(data: &[u8], logger: &Arc<Logger>) {
 +      let input = Arc::new(InputData {
 +              data: data.to_vec(),
 +              read_pos: AtomicUsize::new(0),
 +      });
 +      let fee_est = Arc::new(FuzzEstimator {
 +              input: input.clone(),
 +      });
 +
 +      macro_rules! get_slice {
 +              ($len: expr) => {
 +                      match input.get_slice($len as usize) {
 +                              Some(slice) => slice,
 +                              None => return,
 +                      }
 +              }
 +      }
 +
 +      macro_rules! get_pubkey {
 +              () => {
 +                      match PublicKey::from_slice(get_slice!(33)) {
 +                              Ok(key) => key,
 +                              Err(_) => return,
 +                      }
 +              }
 +      }
 +
 +      let our_network_key = match SecretKey::from_slice(get_slice!(32)) {
 +              Ok(key) => key,
 +              Err(_) => return,
 +      };
 +
 +      let watch = Arc::new(ChainWatchInterfaceUtil::new(Network::Bitcoin, Arc::clone(&logger)));
 +      let broadcast = Arc::new(TestBroadcaster{});
 +      let monitor = channelmonitor::SimpleManyChannelMonitor::new(watch.clone(), broadcast.clone(), Arc::clone(&logger), fee_est.clone());
 +
 +      let keys_manager = Arc::new(KeyProvider { node_secret: our_network_key.clone(), counter: AtomicU64::new(0) });
 +      let mut config = UserConfig::new();
 +      config.channel_options.fee_proportional_millionths =  slice_to_be32(get_slice!(4));
 +      config.channel_options.announced_channel = get_slice!(1)[0] != 0;
 +      config.peer_channel_config_limits.min_dust_limit_satoshis = 0;
 +      let channelmanager = ChannelManager::new(Network::Bitcoin, fee_est.clone(), monitor.clone(), watch.clone(), broadcast.clone(), Arc::clone(&logger), keys_manager.clone(), config, 0).unwrap();
 +      let router = Arc::new(Router::new(PublicKey::from_secret_key(&Secp256k1::signing_only(), &keys_manager.get_node_secret()), watch.clone(), Arc::clone(&logger)));
 +
 +      let peers = RefCell::new([false; 256]);
 +      let mut loss_detector = MoneyLossDetector::new(&peers, channelmanager.clone(), monitor.clone(), PeerManager::new(MessageHandler {
 +              chan_handler: channelmanager.clone(),
 +              route_handler: router.clone(),
 +      }, our_network_key, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 0], Arc::clone(&logger)));
 +
 +      let mut should_forward = false;
-                               for payment in payments_received.drain(..) {
++      let mut payments_received: Vec<(PaymentHash, u64)> = Vec::new();
 +      let mut payments_sent = 0;
 +      let mut pending_funding_generation: Vec<([u8; 32], u64, Script)> = Vec::new();
 +      let mut pending_funding_signatures = HashMap::new();
 +      let mut pending_funding_relay = Vec::new();
 +
 +      loop {
 +              match get_slice!(1)[0] {
 +                      0 => {
 +                              let mut new_id = 0;
 +                              for i in 1..256 {
 +                                      if !peers.borrow()[i-1] {
 +                                              new_id = i;
 +                                              break;
 +                                      }
 +                              }
 +                              if new_id == 0 { return; }
 +                              loss_detector.handler.new_outbound_connection(get_pubkey!(), Peer{id: (new_id - 1) as u8, peers_connected: &peers}).unwrap();
 +                              peers.borrow_mut()[new_id - 1] = true;
 +                      },
 +                      1 => {
 +                              let mut new_id = 0;
 +                              for i in 1..256 {
 +                                      if !peers.borrow()[i-1] {
 +                                              new_id = i;
 +                                              break;
 +                                      }
 +                              }
 +                              if new_id == 0 { return; }
 +                              loss_detector.handler.new_inbound_connection(Peer{id: (new_id - 1) as u8, peers_connected: &peers}).unwrap();
 +                              peers.borrow_mut()[new_id - 1] = true;
 +                      },
 +                      2 => {
 +                              let peer_id = get_slice!(1)[0];
 +                              if !peers.borrow()[peer_id as usize] { return; }
 +                              loss_detector.handler.disconnect_event(&Peer{id: peer_id, peers_connected: &peers});
 +                              peers.borrow_mut()[peer_id as usize] = false;
 +                      },
 +                      3 => {
 +                              let peer_id = get_slice!(1)[0];
 +                              if !peers.borrow()[peer_id as usize] { return; }
 +                              match loss_detector.handler.read_event(&mut Peer{id: peer_id, peers_connected: &peers}, get_slice!(get_slice!(1)[0]).to_vec()) {
 +                                      Ok(res) => assert!(!res),
 +                                      Err(_) => { peers.borrow_mut()[peer_id as usize] = false; }
 +                              }
 +                      },
 +                      4 => {
 +                              let value = slice_to_be24(get_slice!(3)) as u64;
 +                              let route = match router.get_route(&get_pubkey!(), None, &Vec::new(), value, 42) {
 +                                      Ok(route) => route,
 +                                      Err(_) => return,
 +                              };
 +                              let mut payment_hash = PaymentHash([0; 32]);
 +                              payment_hash.0[0..8].copy_from_slice(&be64_to_array(payments_sent));
 +                              let mut sha = Sha256::engine();
 +                              sha.input(&payment_hash.0[..]);
 +                              payment_hash.0 = Sha256::from_engine(sha).into_inner();
 +                              payments_sent += 1;
 +                              match channelmanager.send_payment(route, payment_hash) {
 +                                      Ok(_) => {},
 +                                      Err(_) => return,
 +                              }
 +                      },
 +                      5 => {
 +                              let peer_id = get_slice!(1)[0];
 +                              if !peers.borrow()[peer_id as usize] { return; }
 +                              let their_key = get_pubkey!();
 +                              let chan_value = slice_to_be24(get_slice!(3)) as u64;
 +                              let push_msat_value = slice_to_be24(get_slice!(3)) as u64;
 +                              if channelmanager.create_channel(their_key, chan_value, push_msat_value, 0).is_err() { return; }
 +                      },
 +                      6 => {
 +                              let mut channels = channelmanager.list_channels();
 +                              let channel_id = get_slice!(1)[0] as usize;
 +                              if channel_id >= channels.len() { return; }
 +                              channels.sort_by(|a, b| { a.channel_id.cmp(&b.channel_id) });
 +                              if channelmanager.close_channel(&channels[channel_id].channel_id).is_err() { return; }
 +                      },
 +                      7 => {
 +                              if should_forward {
 +                                      channelmanager.process_pending_htlc_forwards();
 +                                      should_forward = false;
 +                              }
 +                      },
 +                      8 => {
-                                               channelmanager.claim_funds(payment_preimage);
++                              for (payment, amt) in payments_received.drain(..) {
 +                                      // SHA256 is defined as XOR of all input bytes placed in the first byte, and 0s
 +                                      // for the remaining bytes. Thus, if not all remaining bytes are 0s we cannot
 +                                      // fulfill this HTLC, but if they are, we can just take the first byte and
 +                                      // place that anywhere in our preimage.
 +                                      if &payment.0[1..] != &[0; 31] {
 +                                              channelmanager.fail_htlc_backwards(&payment);
 +                                      } else {
 +                                              let mut payment_preimage = PaymentPreimage([0; 32]);
 +                                              payment_preimage.0[0] = payment.0[0];
-                               for payment in payments_received.drain(..) {
++                                              channelmanager.claim_funds(payment_preimage, amt);
 +                                      }
 +                              }
 +                      },
 +                      9 => {
-                               Event::PaymentReceived { payment_hash, .. } => {
-                                       payments_received.push(payment_hash);
++                              for (payment, _) in payments_received.drain(..) {
 +                                      channelmanager.fail_htlc_backwards(&payment);
 +                              }
 +                      },
 +                      10 => {
 +                              'outer_loop: for funding_generation in pending_funding_generation.drain(..) {
 +                                      let mut tx = Transaction { version: 0, lock_time: 0, input: Vec::new(), output: vec![TxOut {
 +                                                      value: funding_generation.1, script_pubkey: funding_generation.2,
 +                                              }] };
 +                                      let funding_output = 'search_loop: loop {
 +                                              let funding_txid = tx.txid();
 +                                              if let None = loss_detector.txids_confirmed.get(&funding_txid) {
 +                                                      let outpoint = OutPoint::new(funding_txid, 0);
 +                                                      for chan in channelmanager.list_channels() {
 +                                                              if chan.channel_id == outpoint.to_channel_id() {
 +                                                                      tx.version += 1;
 +                                                                      continue 'search_loop;
 +                                                              }
 +                                                      }
 +                                                      break outpoint;
 +                                              }
 +                                              tx.version += 1;
 +                                              if tx.version > 0xff {
 +                                                      continue 'outer_loop;
 +                                              }
 +                                      };
 +                                      channelmanager.funding_transaction_generated(&funding_generation.0, funding_output.clone());
 +                                      pending_funding_signatures.insert(funding_output, tx);
 +                              }
 +                      },
 +                      11 => {
 +                              if !pending_funding_relay.is_empty() {
 +                                      loss_detector.connect_block(&pending_funding_relay[..]);
 +                                      for _ in 2..100 {
 +                                              loss_detector.connect_block(&[]);
 +                                      }
 +                              }
 +                              for tx in pending_funding_relay.drain(..) {
 +                                      loss_detector.funding_txn.push(tx);
 +                              }
 +                      },
 +                      12 => {
 +                              let txlen = slice_to_be16(get_slice!(2));
 +                              if txlen == 0 {
 +                                      loss_detector.connect_block(&[]);
 +                              } else {
 +                                      let txres: Result<Transaction, _> = deserialize(get_slice!(txlen));
 +                                      if let Ok(tx) = txres {
 +                                              loss_detector.connect_block(&[tx]);
 +                                      } else {
 +                                              return;
 +                                      }
 +                              }
 +                      },
 +                      13 => {
 +                              loss_detector.disconnect_block();
 +                      },
 +                      14 => {
 +                              let mut channels = channelmanager.list_channels();
 +                              let channel_id = get_slice!(1)[0] as usize;
 +                              if channel_id >= channels.len() { return; }
 +                              channels.sort_by(|a, b| { a.channel_id.cmp(&b.channel_id) });
 +                              channelmanager.force_close_channel(&channels[channel_id].channel_id);
 +                      },
 +                      _ => return,
 +              }
 +              loss_detector.handler.process_events();
 +              for event in loss_detector.manager.get_and_clear_pending_events() {
 +                      match event {
 +                              Event::FundingGenerationReady { temporary_channel_id, channel_value_satoshis, output_script, .. } => {
 +                                      pending_funding_generation.push((temporary_channel_id, channel_value_satoshis, output_script));
 +                              },
 +                              Event::FundingBroadcastSafe { funding_txo, .. } => {
 +                                      pending_funding_relay.push(pending_funding_signatures.remove(&funding_txo).unwrap());
 +                              },
++                              Event::PaymentReceived { payment_hash, amt } => {
++                                      //TODO: enhance by fetching random amounts from fuzz input?
++                                      payments_received.push((payment_hash, amt));
 +                              },
 +                              Event::PaymentSent {..} => {},
 +                              Event::PaymentFailed {..} => {},
 +                              Event::PendingHTLCsForwardable {..} => {
 +                                      should_forward = true;
 +                              },
 +                              Event::SpendableOutputs {..} => {},
 +                      }
 +              }
 +      }
 +}
 +
 +#[cfg(feature = "afl")]
 +#[macro_use] extern crate afl;
 +#[cfg(feature = "afl")]
 +fn main() {
 +      fuzz!(|data| {
 +              let logger: Arc<Logger> = Arc::new(test_logger::TestLogger::new("".to_owned()));
 +              do_test(data, &logger);
 +      });
 +}
 +
 +#[cfg(feature = "honggfuzz")]
 +#[macro_use] extern crate honggfuzz;
 +#[cfg(feature = "honggfuzz")]
 +fn main() {
 +      loop {
 +              fuzz!(|data| {
 +                      let logger: Arc<Logger> = Arc::new(test_logger::TestLogger::new("".to_owned()));
 +                      do_test(data, &logger);
 +              });
 +      }
 +}
 +
 +#[cfg(feature = "libfuzzer_fuzz")]
 +#[macro_use] extern crate libfuzzer_sys;
 +#[cfg(feature = "libfuzzer_fuzz")]
 +fuzz_target!(|data: &[u8]| {
 +      let logger: Arc<Logger> = Arc::new(test_logger::TestLogger::new("".to_owned()));
 +      do_test(data, &logger);
 +});
 +
 +extern crate hex;
 +#[cfg(test)]
 +mod tests {
 +      use utils::test_logger;
 +      use lightning::util::logger::{Logger, Record};
 +      use std::collections::HashMap;
 +      use std::sync::{Arc, Mutex};
 +
 +      #[test]
 +      fn duplicate_crash() {
 +              let logger: Arc<Logger> = Arc::new(test_logger::TestLogger::new("".to_owned()));
 +              super::do_test(&::hex::decode("00").unwrap(), &logger);
 +      }
 +
 +      struct TrackingLogger {
 +              /// (module, message) -> count
 +              pub lines: Mutex<HashMap<(String, String), usize>>,
 +      }
 +      impl Logger for TrackingLogger {
 +              fn log(&self, record: &Record) {
 +                      *self.lines.lock().unwrap().entry((record.module_path.to_string(), format!("{}", record.args))).or_insert(0) += 1;
 +                      println!("{:<5} [{} : {}, {}] {}", record.level.to_string(), record.module_path, record.file, record.line, record.args);
 +              }
 +      }
 +
 +      #[test]
 +      fn test_no_existing_test_breakage() {
 +              // To avoid accidentally causing all existing fuzz test cases to be useless by making minor
 +              // changes (such as requesting feerate info in a new place), we run a pretty full
 +              // step-through with two peers and HTLC forwarding here. Obviously this is pretty finicky,
 +              // so this should be updated pretty liberally, but at least we'll know when changes occur.
 +              // If nothing else, this test serves as a pretty great initial full_stack_target seed.
 +
 +              // What each byte represents is broken down below, and then everything is concatenated into
 +              // one large test at the end (you want %s/ -.*//g %s/\n\| \|\t\|\///g).
 +
 +              // Following BOLT 8, lightning message on the wire are: 2-byte encrypted message length + 
 +              // 16-byte MAC of the encrypted message length + encrypted Lightning message + 16-byte MAC
 +              // of the Lightning message
 +              // I.e 2nd inbound read, len 18 : 0006 (encrypted message length) + 03000000000000000000000000000000 (MAC of the encrypted message length)
 +              // Len 22 : 0010 00000000 (encrypted lightning message) + 03000000000000000000000000000000 (MAC of the Lightning message)
 +
 +              // 0000000000000000000000000000000000000000000000000000000000000000 - our network key
 +              // 00000000 - fee_proportional_millionths
 +              // 01 - announce_channels_publicly
 +              //
 +              // 00 - new outbound connection with id 0
 +              // 030000000000000000000000000000000000000000000000000000000000000000 - peer's pubkey
 +              // 030032 - inbound read from peer id 0 of len 50
 +              // 00 030000000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - noise act two (0||pubkey||mac)
 +              //
 +              // 030012 - inbound read from peer id 0 of len 18
 +              // 0006 03000000000000000000000000000000 - message header indicating message length 6
 +              // 030016 - inbound read from peer id 0 of len 22
 +              // 0010 00000000 03000000000000000000000000000000 - init message with no features (type 16) and mac
 +              //
 +              // 030012 - inbound read from peer id 0 of len 18
 +              // 0141 03000000000000000000000000000000 - message header indicating message length 321
 +              // 0300fe - inbound read from peer id 0 of len 254
 +              // 0020 7500000000000000000000000000000000000000000000000000000000000000 ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679 000000000000c350 0000000000000000 0000000000000222 ffffffffffffffff 0000000000000222 0000000000000000 000000fd 0006 01e3 030000000000000000000000000000000000000000000000000000000000000001 030000000000000000000000000000000000000000000000000000000000000002 030000000000000000000000000000000000000000000000000000000000000003 030000000000000000000000000000000000000000000000000000000000000004 - beginning of open_channel message
 +              // 030053 - inbound read from peer id 0 of len 83
 +              // 030000000000000000000000000000000000000000000000000000000000000005 030000000000000000000000000000000000000000000000000000000000000000 01 03000000000000000000000000000000 - rest of open_channel and mac
 +              //
 +              // 00fd00fd00fd - Three feerate requests (all returning min feerate, which our open_channel also uses) (gonna be ingested by FuzzEstimator)
 +              // - client should now respond with accept_channel (CHECK 1: type 33 to peer 03000000)
 +              //
 +              // 030012 - inbound read from peer id 0 of len 18
 +              // 0084 03000000000000000000000000000000 - message header indicating message length 132
 +              // 030094 - inbound read from peer id 0 of len 148
 +              // 0022 ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679 3d00000000000000000000000000000000000000000000000000000000000000 0000 5c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001 03000000000000000000000000000000 - funding_created and mac
 +              // - client should now respond with funding_signed (CHECK 2: type 35 to peer 03000000)
 +              //
 +              // 0c005e - connect a block with one transaction of len 94
 +              // 020000000100000000000000000000000000000000000000000000000000000000000000000000000000ffffffff0150c3000000000000220020ae0000000000000000000000000000000000000000000000000000000000000000000000 - the funding transaction
 +              // 0c0000 - connect a block with no transactions
 +              // 0c0000 - connect a block with no transactions
 +              // 0c0000 - connect a block with no transactions
 +              // 0c0000 - connect a block with no transactions
 +              // 0c0000 - connect a block with no transactions
 +              // 0c0000 - connect a block with no transactions
 +              // 0c0000 - connect a block with no transactions
 +              // 0c0000 - connect a block with no transactions
 +              // 0c0000 - connect a block with no transactions
 +              // 0c0000 - connect a block with no transactions
 +              // 0c0000 - connect a block with no transactions
 +              // 0c0000 - connect a block with no transactions
 +              // - by now client should have sent a funding_locked (CHECK 3: SendFundingLocked to 03000000 for chan 3d000000)
 +              //
 +              // 030012 - inbound read from peer id 0 of len 18
 +              // 0043 03000000000000000000000000000000 - message header indicating message length 67
 +              // 030053 - inbound read from peer id 0 of len 83
 +              // 0024 3d00000000000000000000000000000000000000000000000000000000000000 030100000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - funding_locked and mac
 +              //
 +              // 01 - new inbound connection with id 1
 +              // 030132 - inbound read from peer id 1 of len 50
 +              // 0003000000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000 - inbound noise act 1
 +              // 030142 - inbound read from peer id 1 of len 66
 +              // 000302000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003000000000000000000000000000000 - inbound noise act 3
 +              //
 +              // 030112 - inbound read from peer id 1 of len 18
 +              // 0006 01000000000000000000000000000000 - message header indicating message length 6
 +              // 030116 - inbound read from peer id 1 of len 22
 +              // 0010 00000000 01000000000000000000000000000000 - init message with no features (type 16)
 +              //
 +              // 05 01 030200000000000000000000000000000000000000000000000000000000000000 00c350 0003e8 - create outbound channel to peer 1 for 50k sat
 +              // 00fd00fd00fd - Three feerate requests (all returning min feerate) (gonna be ingested by FuzzEstimator)
 +              //
 +              // 030112 - inbound read from peer id 1 of len 18
 +              // 0110 01000000000000000000000000000000 - message header indicating message length 272
 +              // 0301ff - inbound read from peer id 1 of len 255
 +              // 0021 0000000000000000000000000000000000000000000000000000000000000e02 000000000000001a 00000000004c4b40 00000000000003e8 00000000000003e8 00000002 03f0 0005 030000000000000000000000000000000000000000000000000000000000000100 030000000000000000000000000000000000000000000000000000000000000200 030000000000000000000000000000000000000000000000000000000000000300 030000000000000000000000000000000000000000000000000000000000000400 030000000000000000000000000000000000000000000000000000000000000500 03000000000000000000000000000000 - beginning of accept_channel
 +              // 030121 - inbound read from peer id 1 of len 33
 +              // 0000000000000000000000000000000000 01000000000000000000000000000000 - rest of accept_channel and mac
 +              //
 +              // 0a - create the funding transaction (client should send funding_created now)
 +              //
 +              // 030112 - inbound read from peer id 1 of len 18
 +              // 0062 01000000000000000000000000000000 - message header indicating message length 98
 +              // 030172 - inbound read from peer id 1 of len 114
 +              // 0023 3900000000000000000000000000000000000000000000000000000000000000 f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100 01000000000000000000000000000000 - funding_signed message and mac
 +              //
 +              // 0b - broadcast funding transaction
 +              // - by now client should have sent a funding_locked (CHECK 4: SendFundingLocked to 03020000 for chan 3f000000)
 +              //
 +              // 030112 - inbound read from peer id 1 of len 18
 +              // 0043 01000000000000000000000000000000 - message header indicating message length 67
 +              // 030153 - inbound read from peer id 1 of len 83
 +              // 0024 3900000000000000000000000000000000000000000000000000000000000000 030100000000000000000000000000000000000000000000000000000000000000 01000000000000000000000000000000 - funding_locked and mac
 +              //
 +              // 030012 - inbound read from peer id 0 of len 18
 +              // 05ac 03000000000000000000000000000000 - message header indicating message length 1452
 +              // 0300ff - inbound read from peer id 0 of len 255
 +              // 0080 3d00000000000000000000000000000000000000000000000000000000000000 0000000000000000 0000000000003e80 ff00000000000000000000000000000000000000000000000000000000000000 00000121 00 030000000000000000000000000000000000000000000000000000000000000555 0000000e000001000000000000000003e8000000010000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000 ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff - beginning of update_add_htlc from 0 to 1 via client
 +              // 0300ff - inbound read from peer id 0 of len 255
 +              // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
 +              // 0300ff - inbound read from peer id 0 of len 255
 +              // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
 +              // 0300ff - inbound read from peer id 0 of len 255
 +              // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
 +              // 0300ff - inbound read from peer id 0 of len 255
 +              // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
 +              // 0300c1 - inbound read from peer id 0 of len 193
 +              // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff ef00000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - end of update_add_htlc from 0 to 1 via client and mac
 +              //
 +              // 00fd - A feerate request (returning min feerate, which our open_channel also uses) (gonna be ingested by FuzzEstimator)
 +              //
 +              // 030012 - inbound read from peer id 0 of len 18
 +              // 0064 03000000000000000000000000000000 - message header indicating message length 100
 +              // 030074 - inbound read from peer id 0 of len 116
 +              // 0084 3d00000000000000000000000000000000000000000000000000000000000000 4d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001 0000 03000000000000000000000000000000 - commitment_signed and mac
 +              // - client should now respond with revoke_and_ack and commitment_signed (CHECK 5/6: types 133 and 132 to peer 03000000)
 +              //
 +              // 030012 - inbound read from peer id 0 of len 18
 +              // 0063 03000000000000000000000000000000 - message header indicating message length 99
 +              // 030073 - inbound read from peer id 0 of len 115
 +              // 0085 3d00000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 030200000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - revoke_and_ack and mac
 +              //
 +              // 07 - process the now-pending HTLC forward
 +              // - client now sends id 1 update_add_htlc and commitment_signed (CHECK 7: SendHTLCs event for node 03020000 with 1 HTLCs for channel 3f000000)
 +              //
 +              // - we respond with commitment_signed then revoke_and_ack (a weird, but valid, order)
 +              // 030112 - inbound read from peer id 1 of len 18
 +              // 0064 01000000000000000000000000000000 - message header indicating message length 100
 +              // 030174 - inbound read from peer id 1 of len 116
 +              // 0084 3900000000000000000000000000000000000000000000000000000000000000 f1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100 0000 01000000000000000000000000000000 - commitment_signed and mac
 +              //
 +              // 030112 - inbound read from peer id 1 of len 18
 +              // 0063 01000000000000000000000000000000 - message header indicating message length 99
 +              // 030173 - inbound read from peer id 1 of len 115
 +              // 0085 3900000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 030200000000000000000000000000000000000000000000000000000000000000 01000000000000000000000000000000 - revoke_and_ack and mac
 +              //
 +              // 030112 - inbound read from peer id 1 of len 18
 +              // 004a 01000000000000000000000000000000 - message header indicating message length 74
 +              // 03015a - inbound read from peer id 1 of len 90
 +              // 0082 3900000000000000000000000000000000000000000000000000000000000000 0000000000000000 ff00888888888888888888888888888888888888888888888888888888888888 01000000000000000000000000000000 - update_fulfill_htlc and mac
 +              // - client should immediately claim the pending HTLC from peer 0 (CHECK 8: SendFulfillHTLCs for node 03000000 with preimage ff00888888 for channel 3d000000)
 +              //
 +              // 030112 - inbound read from peer id 1 of len 18
 +              // 0064 01000000000000000000000000000000 - message header indicating message length 100
 +              // 030174 - inbound read from peer id 1 of len 116
 +              // 0084 3900000000000000000000000000000000000000000000000000000000000000 fd000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100 0000 01000000000000000000000000000000 - commitment_signed and mac
 +              //
 +              // 030112 - inbound read from peer id 1 of len 18
 +              // 0063 01000000000000000000000000000000 - message header indicating message length 99
 +              // 030173 - inbound read from peer id 1 of len 115
 +              // 0085 3900000000000000000000000000000000000000000000000000000000000000 0100000000000000000000000000000000000000000000000000000000000000 030300000000000000000000000000000000000000000000000000000000000000 01000000000000000000000000000000 - revoke_and_ack and mac
 +              //
 +              // - before responding to the commitment_signed generated above, send a new HTLC
 +              // 030012 - inbound read from peer id 0 of len 18
 +              // 05ac 03000000000000000000000000000000 - message header indicating message length 1452
 +              // 0300ff - inbound read from peer id 0 of len 255
 +              // 0080 3d00000000000000000000000000000000000000000000000000000000000000 0000000000000001 0000000000003e80 ff00000000000000000000000000000000000000000000000000000000000000 00000121 00 030000000000000000000000000000000000000000000000000000000000000555 0000000e000001000000000000000003e8000000010000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000 ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff - beginning of update_add_htlc from 0 to 1 via client
 +              // 0300ff - inbound read from peer id 0 of len 255
 +              // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
 +              // 0300ff - inbound read from peer id 0 of len 255
 +              // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
 +              // 0300ff - inbound read from peer id 0 of len 255
 +              // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
 +              // 0300ff - inbound read from peer id 0 of len 255
 +              // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
 +              // 0300c1 - inbound read from peer id 0 of len 193
 +              // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff ef00000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - end of update_add_htlc from 0 to 1 via client and mac
 +              //
 +              // 00fd - A feerate request (returning min feerate, which our open_channel also uses) (gonna be ingested by FuzzEstimator)
 +              //
 +              // - now respond to the update_fulfill_htlc+commitment_signed messages the client sent to peer 0
 +              // 030012 - inbound read from peer id 0 of len 18
 +              // 0063 03000000000000000000000000000000 - message header indicating message length 99
 +              // 030073 - inbound read from peer id 0 of len 115
 +              // 0085 3d00000000000000000000000000000000000000000000000000000000000000 0100000000000000000000000000000000000000000000000000000000000000 030300000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - revoke_and_ack and mac
 +              // - client should now respond with revoke_and_ack and commitment_signed (CHECK 5/6 duplicates)
 +              //
 +              // 030012 - inbound read from peer id 0 of len 18
 +              // 0064 03000000000000000000000000000000 - message header indicating message length 100
 +              // 030074 - inbound read from peer id 0 of len 116
 +              // 0084 3d00000000000000000000000000000000000000000000000000000000000000 be000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001 0000 03000000000000000000000000000000 - commitment_signed and mac
 +              //
 +              // 030012 - inbound read from peer id 0 of len 18
 +              // 0063 03000000000000000000000000000000 - message header indicating message length 99
 +              // 030073 - inbound read from peer id 0 of len 115
 +              // 0085 3d00000000000000000000000000000000000000000000000000000000000000 0200000000000000000000000000000000000000000000000000000000000000 030400000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - revoke_and_ack and mac
 +              //
 +              // 07 - process the now-pending HTLC forward
 +              // - client now sends id 1 update_add_htlc and commitment_signed (CHECK 7 duplicate)
 +              // - we respond with revoke_and_ack, then commitment_signed, then update_fail_htlc
 +              //
 +              // 030112 - inbound read from peer id 1 of len 18
 +              // 0064 01000000000000000000000000000000 - message header indicating message length 100
 +              // 030174 - inbound read from peer id 1 of len 116
 +              // 0084 3900000000000000000000000000000000000000000000000000000000000000 fc000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100 0000 01000000000000000000000000000000 - commitment_signed and mac
 +              //
 +              // 030112 - inbound read from peer id 1 of len 18
 +              // 0063 01000000000000000000000000000000 - message header indicating message length 99
 +              // 030173 - inbound read from peer id 1 of len 115
 +              // 0085 3900000000000000000000000000000000000000000000000000000000000000 0200000000000000000000000000000000000000000000000000000000000000 030400000000000000000000000000000000000000000000000000000000000000 01000000000000000000000000000000 - revoke_and_ack and mac
 +              //
 +              // 030112 - inbound read from peer id 1 of len 18
 +              // 002c 01000000000000000000000000000000 - message header indicating message length 44
 +              // 03013c - inbound read from peer id 1 of len 60
 +              // 0083 3900000000000000000000000000000000000000000000000000000000000000 0000000000000001 0000 01000000000000000000000000000000 - update_fail_htlc and mac
 +              //
 +              // 030112 - inbound read from peer id 1 of len 18
 +              // 0064 01000000000000000000000000000000 - message header indicating message length 100
 +              // 030174 - inbound read from peer id 1 of len 116
 +              // 0084 3900000000000000000000000000000000000000000000000000000000000000 fb000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100 0000 01000000000000000000000000000000 - commitment_signed and mac
 +              //
 +              // 030112 - inbound read from peer id 1 of len 18
 +              // 0063 01000000000000000000000000000000 - message header indicating message length 99
 +              // 030173 - inbound read from peer id 1 of len 115
 +              // 0085 3900000000000000000000000000000000000000000000000000000000000000 0300000000000000000000000000000000000000000000000000000000000000 030500000000000000000000000000000000000000000000000000000000000000 01000000000000000000000000000000 - revoke_and_ack and mac
 +              //
 +              // 07 - process the now-pending HTLC forward
 +              // - client now sends id 0 update_fail_htlc and commitment_signed (CHECK 9)
 +              // - now respond to the update_fail_htlc+commitment_signed messages the client sent to peer 0
 +              //
 +              // 030012 - inbound read from peer id 0 of len 18
 +              // 0063 03000000000000000000000000000000 - message header indicating message length 99
 +              // 030073 - inbound read from peer id 0 of len 115
 +              // 0085 3d00000000000000000000000000000000000000000000000000000000000000 0300000000000000000000000000000000000000000000000000000000000000 030500000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - revoke_and_ack and mac
 +              //
 +              // 030012 - inbound read from peer id 0 of len 18
 +              // 0064 03000000000000000000000000000000 - message header indicating message length 100
 +              // 030074 - inbound read from peer id 0 of len 116
 +              // 0084 3d00000000000000000000000000000000000000000000000000000000000000 4f000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001 0000 03000000000000000000000000000000 - commitment_signed and mac
 +              // - client should now respond with revoke_and_ack (CHECK 5 duplicate)
 +              //
 +              // 030012 - inbound read from peer id 0 of len 18
 +              // 05ac 03000000000000000000000000000000 - message header indicating message length 1452
 +              // 0300ff - inbound read from peer id 0 of len 255
 +              // 0080 3d00000000000000000000000000000000000000000000000000000000000000 0000000000000002 00000000000b0838 ff00000000000000000000000000000000000000000000000000000000000000 00000121 00 030000000000000000000000000000000000000000000000000000000000000555 0000000e0000010000000000000003e800000000010000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000 ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff - beginning of update_add_htlc from 0 to 1 via client
 +              // 0300ff - inbound read from peer id 0 of len 255
 +              // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
 +              // 0300ff - inbound read from peer id 0 of len 255
 +              // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
 +              // 0300ff - inbound read from peer id 0 of len 255
 +              // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
 +              // 0300ff - inbound read from peer id 0 of len 255
 +              // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
 +              // 0300c1 - inbound read from peer id 0 of len 193
 +              // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff ef00000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - end of update_add_htlc from 0 to 1 via client and mac
 +              //
 +              // 00fd - A feerate request (returning min feerate, which our open_channel also uses) (gonna be ingested by FuzzEstimator)
 +              //
 +              // 030012 - inbound read from peer id 0 of len 18
 +              // 00a4 03000000000000000000000000000000 - message header indicating message length 164
 +              // 0300b4 - inbound read from peer id 0 of len 180
 +              // 0084 3d00000000000000000000000000000000000000000000000000000000000000 07000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001 0001 c8000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007f00000000000000 03000000000000000000000000000000 - commitment_signed and mac
 +              // - client should now respond with revoke_and_ack and commitment_signed (CHECK 5/6 duplicates)
 +              //
 +              // 030012 - inbound read from peer id 0 of len 18
 +              // 0063 03000000000000000000000000000000 - message header indicating message length 99
 +              // 030073 - inbound read from peer id 0 of len 115
 +              // 0085 3d00000000000000000000000000000000000000000000000000000000000000 0400000000000000000000000000000000000000000000000000000000000000 030600000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - revoke_and_ack and mac
 +              //
 +              // 07 - process the now-pending HTLC forward
 +              // - client now sends id 1 update_add_htlc and commitment_signed (CHECK 7 duplicate)
 +              //
 +              // 0c007d - connect a block with one transaction of len 125
 +              // 0200000001390000000000000000000000000000000000000000000000000000000000000000000000000000008002000100000000000022002090000000000000000000000000000000000000000000000000000000000000006cc10000000000001600145c0000000000000000000000000000000000000005000020 - the commitment transaction for channel 3f00000000000000000000000000000000000000000000000000000000000000
 +              // 00fd - A feerate request (returning min feerate, which our open_channel also uses) (gonna be ingested by FuzzEstimator)
 +              // 00fd - A feerate request (returning min feerate, which our open_channel also uses) (gonna be ingested by FuzzEstimator)
 +              // 0c005e - connect a block with one transaction of len 94
 +              // 0200000001fd00000000000000000000000000000000000000000000000000000000000000000000000000000000014f00000000000000220020f60000000000000000000000000000000000000000000000000000000000000000000000 - the funding transaction
 +              // 0c0000 - connect a block with no transactions
 +              // 0c0000 - connect a block with no transactions
 +              // 0c0000 - connect a block with no transactions
 +              // 0c0000 - connect a block with no transactions
 +              // 0c0000 - connect a block with no transactions
 +              //
 +              // 07 - process the now-pending HTLC forward
 +              // - client now fails the HTLC backwards as it was unable to extract the payment preimage (CHECK 9 duplicate and CHECK 10)
 +
 +              let logger = Arc::new(TrackingLogger { lines: Mutex::new(HashMap::new()) });
 +              super::do_test(&::hex::decode("00000000000000000000000000000000000000000000000000000000000000000000000001000300000000000000000000000000000000000000000000000000000000000000000300320003000000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000030012000603000000000000000000000000000000030016001000000000030000000000000000000000000000000300120141030000000000000000000000000000000300fe00207500000000000000000000000000000000000000000000000000000000000000ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679000000000000c35000000000000000000000000000000222ffffffffffffffff00000000000002220000000000000000000000fd000601e3030000000000000000000000000000000000000000000000000000000000000001030000000000000000000000000000000000000000000000000000000000000002030000000000000000000000000000000000000000000000000000000000000003030000000000000000000000000000000000000000000000000000000000000004030053030000000000000000000000000000000000000000000000000000000000000005030000000000000000000000000000000000000000000000000000000000000000010300000000000000000000000000000000fd00fd00fd0300120084030000000000000000000000000000000300940022ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb1819096793d0000000000000000000000000000000000000000000000000000000000000000005c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001030000000000000000000000000000000c005e020000000100000000000000000000000000000000000000000000000000000000000000000000000000ffffffff0150c3000000000000220020ae00000000000000000000000000000000000000000000000000000000000000000000000c00000c00000c00000c00000c00000c00000c00000c00000c00000c00000c00000c000003001200430300000000000000000000000000000003005300243d000000000000000000000000000000000000000000000000000000000000000301000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000001030132000300000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003014200030200000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000300000000000000000000000000000003011200060100000000000000000000000000000003011600100000000001000000000000000000000000000000050103020000000000000000000000000000000000000000000000000000000000000000c3500003e800fd00fd00fd0301120110010000000000000000000000000000000301ff00210000000000000000000000000000000000000000000000000000000000000e02000000000000001a00000000004c4b4000000000000003e800000000000003e80000000203f00005030000000000000000000000000000000000000000000000000000000000000100030000000000000000000000000000000000000000000000000000000000000200030000000000000000000000000000000000000000000000000000000000000300030000000000000000000000000000000000000000000000000000000000000400030000000000000000000000000000000000000000000000000000000000000500030000000000000000000000000000000301210000000000000000000000000000000000010000000000000000000000000000000a03011200620100000000000000000000000000000003017200233900000000000000000000000000000000000000000000000000000000000000f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100010000000000000000000000000000000b030112004301000000000000000000000000000000030153002439000000000000000000000000000000000000000000000000000000000000000301000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003e80ff0000000000000000000000000000000000000000000000000000000000000000000121000300000000000000000000000000000000000000000000000000000000000005550000000e000001000000000000000003e8000000010000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffef000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000000fd03001200640300000000000000000000000000000003007400843d000000000000000000000000000000000000000000000000000000000000004d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000030200000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000703011200640100000000000000000000000000000003017400843900000000000000000000000000000000000000000000000000000000000000f100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003020000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000030112004a0100000000000000000000000000000003015a008239000000000000000000000000000000000000000000000000000000000000000000000000000000ff008888888888888888888888888888888888888888888888888888888888880100000000000000000000000000000003011200640100000000000000000000000000000003017400843900000000000000000000000000000000000000000000000000000000000000fd0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000010000000000000000000000000000000301120063010000000000000000000000000000000301730085390000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000303000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d0000000000000000000000000000000000000000000000000000000000000000000000000000010000000000003e80ff0000000000000000000000000000000000000000000000000000000000000000000121000300000000000000000000000000000000000000000000000000000000000005550000000e000001000000000000000003e8000000010000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffef000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000000fd03001200630300000000000000000000000000000003007300853d0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000303000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200640300000000000000000000000000000003007400843d00000000000000000000000000000000000000000000000000000000000000be00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000030400000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000703011200640100000000000000000000000000000003017400843900000000000000000000000000000000000000000000000000000000000000fc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853900000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003040000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000030112002c0100000000000000000000000000000003013c00833900000000000000000000000000000000000000000000000000000000000000000000000000000100000100000000000000000000000000000003011200640100000000000000000000000000000003017400843900000000000000000000000000000000000000000000000000000000000000fb000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000001000000000000000000000000000000030112006301000000000000000000000000000000030173008539000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000000000000000000000000000000000000030500000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000703001200630300000000000000000000000000000003007300853d0000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000000305000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200640300000000000000000000000000000003007400843d000000000000000000000000000000000000000000000000000000000000004f00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000300000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d00000000000000000000000000000000000000000000000000000000000000000000000000000200000000000b0838ff0000000000000000000000000000000000000000000000000000000000000000000121000300000000000000000000000000000000000000000000000000000000000005550000000e0000010000000000000003e800000000010000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffef000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000000fd03001200a4030000000000000000000000000000000300b400843d00000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010001c8000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007f000000000000000300000000000000000000000000000003001200630300000000000000000000000000000003007300853d00000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000003060000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000070c007d0200000001390000000000000000000000000000000000000000000000000000000000000000000000000000008002000100000000000022002090000000000000000000000000000000000000000000000000000000000000006cc10000000000001600145c000000000000000000000000000000000000000500002000fd00fd0c005e0200000001fd00000000000000000000000000000000000000000000000000000000000000000000000000000000014f00000000000000220020f600000000000000000000000000000000000000000000000000000000000000000000000c00000c00000c00000c00000c000007").unwrap(), &(Arc::clone(&logger) as Arc<Logger>));
 +
 +              let log_entries = logger.lines.lock().unwrap();
 +              assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendAcceptChannel event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000000 for channel ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679".to_string())), Some(&1)); // 1
 +              assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendFundingSigned event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000000 for channel 3d00000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&1)); // 2
 +              assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendFundingLocked event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000000 for channel 3d00000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&1)); // 3
 +              assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendFundingLocked event in peer_handler for node 030200000000000000000000000000000000000000000000000000000000000000 for channel 3900000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&1)); // 4
 +              assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendRevokeAndACK event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000000 for channel 3d00000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&4)); // 5
 +              assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling UpdateHTLCs event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000000 with 0 adds, 0 fulfills, 0 fails for channel 3d00000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&3)); // 6
 +              assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling UpdateHTLCs event in peer_handler for node 030200000000000000000000000000000000000000000000000000000000000000 with 1 adds, 0 fulfills, 0 fails for channel 3900000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&3)); // 7
 +              assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling UpdateHTLCs event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000000 with 0 adds, 1 fulfills, 0 fails for channel 3d00000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&1)); // 8
 +              assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling UpdateHTLCs event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000000 with 0 adds, 0 fulfills, 1 fails for channel 3d00000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&2)); // 9
 +              assert_eq!(log_entries.get(&("lightning::ln::channelmonitor".to_string(), "Input spending remote commitment tx (00000000000000000000000000000000000000000000000000000000000000fd:0) in 0000000000000000000000000000000000000000000000000000000000000044 resolves outbound HTLC with payment hash ff00000000000000000000000000000000000000000000000000000000000000 with timeout".to_string())), Some(&1)); // 10
 +      }
 +}
index 4b8490c5b227b8c7b506ae2cc699135ffbe23f46,0000000000000000000000000000000000000000..c1fe6fbdd6d7c54002d31f277481a200f5cb5b7b
mode 100644,000000..100644
--- /dev/null
@@@ -1,1684 -1,0 +1,1684 @@@
-       claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
 +//! Functional tests which test the correct handling of ChannelMonitorUpdateErr returns from
 +//! monitor updates.
 +//! There are a bunch of these as their handling is relatively error-prone so they are split out
 +//! here. See also the chanmon_fail_consistency fuzz test.
 +
 +use ln::channelmanager::{RAACommitmentOrder, PaymentPreimage, PaymentHash};
 +use ln::channelmonitor::ChannelMonitorUpdateErr;
 +use ln::msgs;
 +use ln::msgs::{ChannelMessageHandler, LocalFeatures, RoutingMessageHandler};
 +use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
 +use util::errors::APIError;
 +
 +use bitcoin_hashes::sha256::Hash as Sha256;
 +use bitcoin_hashes::Hash;
 +
 +use ln::functional_test_utils::*;
 +
 +#[test]
 +fn test_simple_monitor_permanent_update_fail() {
 +      // Test that we handle a simple permanent monitor update failure
 +      let mut nodes = create_network(2, &[None, None]);
 +      create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +
 +      let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
 +      let (_, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
 +
 +      *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::PermanentFailure);
 +      if let Err(APIError::ChannelUnavailable {..}) = nodes[0].node.send_payment(route, payment_hash_1) {} else { panic!(); }
 +      check_added_monitors!(nodes[0], 1);
 +
 +      let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events_1.len(), 2);
 +      match events_1[0] {
 +              MessageSendEvent::BroadcastChannelUpdate { .. } => {},
 +              _ => panic!("Unexpected event"),
 +      };
 +      match events_1[1] {
 +              MessageSendEvent::HandleError { node_id, .. } => assert_eq!(node_id, nodes[1].node.get_our_node_id()),
 +              _ => panic!("Unexpected event"),
 +      };
 +
 +      // TODO: Once we hit the chain with the failure transaction we should check that we get a
 +      // PaymentFailed event
 +
 +      assert_eq!(nodes[0].node.list_channels().len(), 0);
 +}
 +
 +fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
 +      // Test that we can recover from a simple temporary monitor update failure optionally with
 +      // a disconnect in between
 +      let mut nodes = create_network(2, &[None, None]);
 +      create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +
 +      let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
 +      let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
 +
 +      *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
 +      if let Err(APIError::MonitorUpdateFailed) = nodes[0].node.send_payment(route.clone(), payment_hash_1) {} else { panic!(); }
 +      check_added_monitors!(nodes[0], 1);
 +
 +      assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
 +      assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 +      assert_eq!(nodes[0].node.list_channels().len(), 1);
 +
 +      if disconnect {
 +              nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
 +              nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 +              reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 +      }
 +
 +      *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
 +      nodes[0].node.test_restore_channel_monitor();
 +      check_added_monitors!(nodes[0], 1);
 +
 +      let mut events_2 = nodes[0].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events_2.len(), 1);
 +      let payment_event = SendEvent::from_event(events_2.pop().unwrap());
 +      assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
 +      nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
 +      commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
 +
 +      expect_pending_htlcs_forwardable!(nodes[1]);
 +
 +      let events_3 = nodes[1].node.get_and_clear_pending_events();
 +      assert_eq!(events_3.len(), 1);
 +      match events_3[0] {
 +              Event::PaymentReceived { ref payment_hash, amt } => {
 +                      assert_eq!(payment_hash_1, *payment_hash);
 +                      assert_eq!(amt, 1000000);
 +              },
 +              _ => panic!("Unexpected event"),
 +      }
 +
-       assert!(nodes[1].node.claim_funds(payment_preimage_1));
++      claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1, 1_000_000);
 +
 +      // Now set it to failed again...
 +      let (_, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
 +      *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
 +      if let Err(APIError::MonitorUpdateFailed) = nodes[0].node.send_payment(route, payment_hash_2) {} else { panic!(); }
 +      check_added_monitors!(nodes[0], 1);
 +
 +      assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
 +      assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 +      assert_eq!(nodes[0].node.list_channels().len(), 1);
 +
 +      if disconnect {
 +              nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
 +              nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 +              reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 +      }
 +
 +      // ...and make sure we can force-close a TemporaryFailure channel with a PermanentFailure
 +      *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::PermanentFailure);
 +      nodes[0].node.test_restore_channel_monitor();
 +      check_added_monitors!(nodes[0], 1);
 +      check_closed_broadcast!(nodes[0]);
 +
 +      // TODO: Once we hit the chain with the failure transaction we should check that we get a
 +      // PaymentFailed event
 +
 +      assert_eq!(nodes[0].node.list_channels().len(), 0);
 +}
 +
 +#[test]
 +fn test_simple_monitor_temporary_update_fail() {
 +      do_test_simple_monitor_temporary_update_fail(false);
 +      do_test_simple_monitor_temporary_update_fail(true);
 +}
 +
 +fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
 +      let disconnect_flags = 8 | 16;
 +
 +      // Test that we can recover from a temporary monitor update failure with some in-flight
 +      // HTLCs going on at the same time potentially with some disconnection thrown in.
 +      // * First we route a payment, then get a temporary monitor update failure when trying to
 +      //   route a second payment. We then claim the first payment.
 +      // * If disconnect_count is set, we will disconnect at this point (which is likely as
 +      //   TemporaryFailure likely indicates net disconnect which resulted in failing to update
 +      //   the ChannelMonitor on a watchtower).
 +      // * If !(disconnect_count & 16) we deliver a update_fulfill_htlc/CS for the first payment
 +      //   immediately, otherwise we wait disconnect and deliver them via the reconnect
 +      //   channel_reestablish processing (ie disconnect_count & 16 makes no sense if
 +      //   disconnect_count & !disconnect_flags is 0).
 +      // * We then update the channel monitor, reconnecting if disconnect_count is set and walk
 +      //   through message sending, potentially disconnect/reconnecting multiple times based on
 +      //   disconnect_count, to get the update_fulfill_htlc through.
 +      // * We then walk through more message exchanges to get the original update_add_htlc
 +      //   through, swapping message ordering based on disconnect_count & 8 and optionally
 +      //   disconnect/reconnecting based on disconnect_count.
 +      let mut nodes = create_network(2, &[None, None]);
 +      create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +
 +      let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
 +
 +      // Now try to send a second payment which will fail to send
 +      let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
 +      let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
 +
 +      *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
 +      if let Err(APIError::MonitorUpdateFailed) = nodes[0].node.send_payment(route.clone(), payment_hash_2) {} else { panic!(); }
 +      check_added_monitors!(nodes[0], 1);
 +
 +      assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
 +      assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 +      assert_eq!(nodes[0].node.list_channels().len(), 1);
 +
 +      // Claim the previous payment, which will result in a update_fulfill_htlc/CS from nodes[1]
 +      // but nodes[0] won't respond since it is frozen.
-       claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
++      assert!(nodes[1].node.claim_funds(payment_preimage_1, 1_000_000));
 +      check_added_monitors!(nodes[1], 1);
 +      let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events_2.len(), 1);
 +      let (bs_initial_fulfill, bs_initial_commitment_signed) = match events_2[0] {
 +              MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
 +                      assert_eq!(*node_id, nodes[0].node.get_our_node_id());
 +                      assert!(update_add_htlcs.is_empty());
 +                      assert_eq!(update_fulfill_htlcs.len(), 1);
 +                      assert!(update_fail_htlcs.is_empty());
 +                      assert!(update_fail_malformed_htlcs.is_empty());
 +                      assert!(update_fee.is_none());
 +
 +                      if (disconnect_count & 16) == 0 {
 +                              nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]).unwrap();
 +                              let events_3 = nodes[0].node.get_and_clear_pending_events();
 +                              assert_eq!(events_3.len(), 1);
 +                              match events_3[0] {
 +                                      Event::PaymentSent { ref payment_preimage } => {
 +                                              assert_eq!(*payment_preimage, payment_preimage_1);
 +                                      },
 +                                      _ => panic!("Unexpected event"),
 +                              }
 +
 +                              if let Err(msgs::LightningError{err, action: msgs::ErrorAction::IgnoreError }) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed) {
 +                                      assert_eq!(err, "Previous monitor update failure prevented generation of RAA");
 +                              } else { panic!(); }
 +                      }
 +
 +                      (update_fulfill_htlcs[0].clone(), commitment_signed.clone())
 +              },
 +              _ => panic!("Unexpected event"),
 +      };
 +
 +      if disconnect_count & !disconnect_flags > 0 {
 +              nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
 +              nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 +      }
 +
 +      // Now fix monitor updating...
 +      *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
 +      nodes[0].node.test_restore_channel_monitor();
 +      check_added_monitors!(nodes[0], 1);
 +
 +      macro_rules! disconnect_reconnect_peers { () => { {
 +              nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
 +              nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 +
 +              nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
 +              let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
 +              assert_eq!(reestablish_1.len(), 1);
 +              nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
 +              let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
 +              assert_eq!(reestablish_2.len(), 1);
 +
 +              nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]).unwrap();
 +              let as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
 +              nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]).unwrap();
 +              let bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
 +
 +              assert!(as_resp.0.is_none());
 +              assert!(bs_resp.0.is_none());
 +
 +              (reestablish_1, reestablish_2, as_resp, bs_resp)
 +      } } }
 +
 +      let (payment_event, initial_revoke_and_ack) = if disconnect_count & !disconnect_flags > 0 {
 +              assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
 +              assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 +
 +              nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
 +              let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
 +              assert_eq!(reestablish_1.len(), 1);
 +              nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
 +              let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
 +              assert_eq!(reestablish_2.len(), 1);
 +
 +              nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]).unwrap();
 +              check_added_monitors!(nodes[0], 0);
 +              let mut as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
 +              nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]).unwrap();
 +              check_added_monitors!(nodes[1], 0);
 +              let mut bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
 +
 +              assert!(as_resp.0.is_none());
 +              assert!(bs_resp.0.is_none());
 +
 +              assert!(bs_resp.1.is_none());
 +              if (disconnect_count & 16) == 0 {
 +                      assert!(bs_resp.2.is_none());
 +
 +                      assert!(as_resp.1.is_some());
 +                      assert!(as_resp.2.is_some());
 +                      assert!(as_resp.3 == RAACommitmentOrder::CommitmentFirst);
 +              } else {
 +                      assert!(bs_resp.2.as_ref().unwrap().update_add_htlcs.is_empty());
 +                      assert!(bs_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty());
 +                      assert!(bs_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty());
 +                      assert!(bs_resp.2.as_ref().unwrap().update_fee.is_none());
 +                      assert!(bs_resp.2.as_ref().unwrap().update_fulfill_htlcs == vec![bs_initial_fulfill]);
 +                      assert!(bs_resp.2.as_ref().unwrap().commitment_signed == bs_initial_commitment_signed);
 +
 +                      assert!(as_resp.1.is_none());
 +
 +                      nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().update_fulfill_htlcs[0]).unwrap();
 +                      let events_3 = nodes[0].node.get_and_clear_pending_events();
 +                      assert_eq!(events_3.len(), 1);
 +                      match events_3[0] {
 +                              Event::PaymentSent { ref payment_preimage } => {
 +                                      assert_eq!(*payment_preimage, payment_preimage_1);
 +                              },
 +                              _ => panic!("Unexpected event"),
 +                      }
 +
 +                      nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().commitment_signed).unwrap();
 +                      let as_resp_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
 +                      // No commitment_signed so get_event_msg's assert(len == 1) passes
 +                      check_added_monitors!(nodes[0], 1);
 +
 +                      as_resp.1 = Some(as_resp_raa);
 +                      bs_resp.2 = None;
 +              }
 +
 +              if disconnect_count & !disconnect_flags > 1 {
 +                      let (second_reestablish_1, second_reestablish_2, second_as_resp, second_bs_resp) = disconnect_reconnect_peers!();
 +
 +                      if (disconnect_count & 16) == 0 {
 +                              assert!(reestablish_1 == second_reestablish_1);
 +                              assert!(reestablish_2 == second_reestablish_2);
 +                      }
 +                      assert!(as_resp == second_as_resp);
 +                      assert!(bs_resp == second_bs_resp);
 +              }
 +
 +              (SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), as_resp.2.unwrap()), as_resp.1.unwrap())
 +      } else {
 +              let mut events_4 = nodes[0].node.get_and_clear_pending_msg_events();
 +              assert_eq!(events_4.len(), 2);
 +              (SendEvent::from_event(events_4.remove(0)), match events_4[0] {
 +                      MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
 +                              assert_eq!(*node_id, nodes[1].node.get_our_node_id());
 +                              msg.clone()
 +                      },
 +                      _ => panic!("Unexpected event"),
 +              })
 +      };
 +
 +      assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
 +
 +      nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
 +      nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap();
 +      let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
 +      // nodes[1] is awaiting an RAA from nodes[0] still so get_event_msg's assert(len == 1) passes
 +      check_added_monitors!(nodes[1], 1);
 +
 +      if disconnect_count & !disconnect_flags > 2 {
 +              let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
 +
 +              assert!(as_resp.1.unwrap() == initial_revoke_and_ack);
 +              assert!(bs_resp.1.unwrap() == bs_revoke_and_ack);
 +
 +              assert!(as_resp.2.is_none());
 +              assert!(bs_resp.2.is_none());
 +      }
 +
 +      let as_commitment_update;
 +      let bs_second_commitment_update;
 +
 +      macro_rules! handle_bs_raa { () => {
 +              nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
 +              as_commitment_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
 +              assert!(as_commitment_update.update_add_htlcs.is_empty());
 +              assert!(as_commitment_update.update_fulfill_htlcs.is_empty());
 +              assert!(as_commitment_update.update_fail_htlcs.is_empty());
 +              assert!(as_commitment_update.update_fail_malformed_htlcs.is_empty());
 +              assert!(as_commitment_update.update_fee.is_none());
 +              check_added_monitors!(nodes[0], 1);
 +      } }
 +
 +      macro_rules! handle_initial_raa { () => {
 +              nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &initial_revoke_and_ack).unwrap();
 +              bs_second_commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +              assert!(bs_second_commitment_update.update_add_htlcs.is_empty());
 +              assert!(bs_second_commitment_update.update_fulfill_htlcs.is_empty());
 +              assert!(bs_second_commitment_update.update_fail_htlcs.is_empty());
 +              assert!(bs_second_commitment_update.update_fail_malformed_htlcs.is_empty());
 +              assert!(bs_second_commitment_update.update_fee.is_none());
 +              check_added_monitors!(nodes[1], 1);
 +      } }
 +
 +      if (disconnect_count & 8) == 0 {
 +              handle_bs_raa!();
 +
 +              if disconnect_count & !disconnect_flags > 3 {
 +                      let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
 +
 +                      assert!(as_resp.1.unwrap() == initial_revoke_and_ack);
 +                      assert!(bs_resp.1.is_none());
 +
 +                      assert!(as_resp.2.unwrap() == as_commitment_update);
 +                      assert!(bs_resp.2.is_none());
 +
 +                      assert!(as_resp.3 == RAACommitmentOrder::RevokeAndACKFirst);
 +              }
 +
 +              handle_initial_raa!();
 +
 +              if disconnect_count & !disconnect_flags > 4 {
 +                      let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
 +
 +                      assert!(as_resp.1.is_none());
 +                      assert!(bs_resp.1.is_none());
 +
 +                      assert!(as_resp.2.unwrap() == as_commitment_update);
 +                      assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
 +              }
 +      } else {
 +              handle_initial_raa!();
 +
 +              if disconnect_count & !disconnect_flags > 3 {
 +                      let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
 +
 +                      assert!(as_resp.1.is_none());
 +                      assert!(bs_resp.1.unwrap() == bs_revoke_and_ack);
 +
 +                      assert!(as_resp.2.is_none());
 +                      assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
 +
 +                      assert!(bs_resp.3 == RAACommitmentOrder::RevokeAndACKFirst);
 +              }
 +
 +              handle_bs_raa!();
 +
 +              if disconnect_count & !disconnect_flags > 4 {
 +                      let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
 +
 +                      assert!(as_resp.1.is_none());
 +                      assert!(bs_resp.1.is_none());
 +
 +                      assert!(as_resp.2.unwrap() == as_commitment_update);
 +                      assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
 +              }
 +      }
 +
 +      nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_update.commitment_signed).unwrap();
 +      let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
 +      // No commitment_signed so get_event_msg's assert(len == 1) passes
 +      check_added_monitors!(nodes[0], 1);
 +
 +      nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_update.commitment_signed).unwrap();
 +      let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
 +      // No commitment_signed so get_event_msg's assert(len == 1) passes
 +      check_added_monitors!(nodes[1], 1);
 +
 +      nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap();
 +      assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 +      check_added_monitors!(nodes[1], 1);
 +
 +      nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack).unwrap();
 +      assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 +      check_added_monitors!(nodes[0], 1);
 +
 +      expect_pending_htlcs_forwardable!(nodes[1]);
 +
 +      let events_5 = nodes[1].node.get_and_clear_pending_events();
 +      assert_eq!(events_5.len(), 1);
 +      match events_5[0] {
 +              Event::PaymentReceived { ref payment_hash, amt } => {
 +                      assert_eq!(payment_hash_2, *payment_hash);
 +                      assert_eq!(amt, 1000000);
 +              },
 +              _ => panic!("Unexpected event"),
 +      }
 +
-       claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
++      claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2, 1_000_000);
 +}
 +
 +#[test]
 +fn test_monitor_temporary_update_fail_a() {
 +      do_test_monitor_temporary_update_fail(0);
 +      do_test_monitor_temporary_update_fail(1);
 +      do_test_monitor_temporary_update_fail(2);
 +      do_test_monitor_temporary_update_fail(3);
 +      do_test_monitor_temporary_update_fail(4);
 +      do_test_monitor_temporary_update_fail(5);
 +}
 +
 +#[test]
 +fn test_monitor_temporary_update_fail_b() {
 +      do_test_monitor_temporary_update_fail(2 | 8);
 +      do_test_monitor_temporary_update_fail(3 | 8);
 +      do_test_monitor_temporary_update_fail(4 | 8);
 +      do_test_monitor_temporary_update_fail(5 | 8);
 +}
 +
 +#[test]
 +fn test_monitor_temporary_update_fail_c() {
 +      do_test_monitor_temporary_update_fail(1 | 16);
 +      do_test_monitor_temporary_update_fail(2 | 16);
 +      do_test_monitor_temporary_update_fail(3 | 16);
 +      do_test_monitor_temporary_update_fail(2 | 8 | 16);
 +      do_test_monitor_temporary_update_fail(3 | 8 | 16);
 +}
 +
 +#[test]
 +fn test_monitor_update_fail_cs() {
 +      // Tests handling of a monitor update failure when processing an incoming commitment_signed
 +      let mut nodes = create_network(2, &[None, None]);
 +      create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +
 +      let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
 +      let (payment_preimage, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
 +      nodes[0].node.send_payment(route, our_payment_hash).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +
 +      let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
 +      nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]).unwrap();
 +
 +      *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
 +      if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg).unwrap_err() {
 +              assert_eq!(err, "Failed to update ChannelMonitor");
 +      } else { panic!(); }
 +      check_added_monitors!(nodes[1], 1);
 +      assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 +
 +      *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
 +      nodes[1].node.test_restore_channel_monitor();
 +      check_added_monitors!(nodes[1], 1);
 +      let responses = nodes[1].node.get_and_clear_pending_msg_events();
 +      assert_eq!(responses.len(), 2);
 +
 +      match responses[0] {
 +              MessageSendEvent::SendRevokeAndACK { ref msg, ref node_id } => {
 +                      assert_eq!(*node_id, nodes[0].node.get_our_node_id());
 +                      nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &msg).unwrap();
 +                      check_added_monitors!(nodes[0], 1);
 +              },
 +              _ => panic!("Unexpected event"),
 +      }
 +      match responses[1] {
 +              MessageSendEvent::UpdateHTLCs { ref updates, ref node_id } => {
 +                      assert!(updates.update_add_htlcs.is_empty());
 +                      assert!(updates.update_fulfill_htlcs.is_empty());
 +                      assert!(updates.update_fail_htlcs.is_empty());
 +                      assert!(updates.update_fail_malformed_htlcs.is_empty());
 +                      assert!(updates.update_fee.is_none());
 +                      assert_eq!(*node_id, nodes[0].node.get_our_node_id());
 +
 +                      *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
 +                      if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed).unwrap_err() {
 +                              assert_eq!(err, "Failed to update ChannelMonitor");
 +                      } else { panic!(); }
 +                      check_added_monitors!(nodes[0], 1);
 +                      assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 +              },
 +              _ => panic!("Unexpected event"),
 +      }
 +
 +      *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
 +      nodes[0].node.test_restore_channel_monitor();
 +      check_added_monitors!(nodes[0], 1);
 +
 +      let final_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
 +      nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &final_raa).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +
 +      expect_pending_htlcs_forwardable!(nodes[1]);
 +
 +      let events = nodes[1].node.get_and_clear_pending_events();
 +      assert_eq!(events.len(), 1);
 +      match events[0] {
 +              Event::PaymentReceived { payment_hash, amt } => {
 +                      assert_eq!(payment_hash, our_payment_hash);
 +                      assert_eq!(amt, 1000000);
 +              },
 +              _ => panic!("Unexpected event"),
 +      };
 +
-       claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
++      claim_payment(&nodes[0], &[&nodes[1]], payment_preimage, 1_000_000);
 +}
 +
 +#[test]
 +fn test_monitor_update_fail_no_rebroadcast() {
 +      // Tests handling of a monitor update failure when no message rebroadcasting on
 +      // test_restore_channel_monitor() is required. Backported from
 +      // chanmon_fail_consistency fuzz tests.
 +      let mut nodes = create_network(2, &[None, None]);
 +      create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +
 +      let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
 +      let (payment_preimage_1, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
 +      nodes[0].node.send_payment(route, our_payment_hash).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +
 +      let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
 +      nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]).unwrap();
 +      let bs_raa = commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true, false, true);
 +
 +      *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
 +      if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_raa).unwrap_err() {
 +              assert_eq!(err, "Failed to update ChannelMonitor");
 +      } else { panic!(); }
 +      assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 +      assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
 +      check_added_monitors!(nodes[1], 1);
 +
 +      *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
 +      nodes[1].node.test_restore_channel_monitor();
 +      assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 +      check_added_monitors!(nodes[1], 1);
 +      expect_pending_htlcs_forwardable!(nodes[1]);
 +
 +      let events = nodes[1].node.get_and_clear_pending_events();
 +      assert_eq!(events.len(), 1);
 +      match events[0] {
 +              Event::PaymentReceived { payment_hash, .. } => {
 +                      assert_eq!(payment_hash, our_payment_hash);
 +              },
 +              _ => panic!("Unexpected event"),
 +      }
 +
-       send_payment(&nodes[0], &[&nodes[1]], 5000000);
++      claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1, 1_000_000);
 +}
 +
 +#[test]
 +fn test_monitor_update_raa_while_paused() {
 +      // Tests handling of an RAA while monitor updating has already been marked failed.
 +      // Backported from chanmon_fail_consistency fuzz tests as this used to be broken.
 +      let mut nodes = create_network(2, &[None, None]);
 +      create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +
-       claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
-       claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_2);
++      send_payment(&nodes[0], &[&nodes[1]], 5000000, 5_000_000);
 +
 +      let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
 +      let (payment_preimage_1, our_payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
 +      nodes[0].node.send_payment(route, our_payment_hash_1).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      let send_event_1 = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
 +
 +      let route = nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
 +      let (payment_preimage_2, our_payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
 +      nodes[1].node.send_payment(route, our_payment_hash_2).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +      let send_event_2 = SendEvent::from_event(nodes[1].node.get_and_clear_pending_msg_events().remove(0));
 +
 +      nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event_1.msgs[0]).unwrap();
 +      nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event_1.commitment_msg).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +      let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
 +
 +      *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
 +      nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_2.msgs[0]).unwrap();
 +      if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg).unwrap_err() {
 +              assert_eq!(err, "Failed to update ChannelMonitor");
 +      } else { panic!(); }
 +      check_added_monitors!(nodes[0], 1);
 +
 +      if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap_err() {
 +              assert_eq!(err, "Previous monitor update failure prevented responses to RAA");
 +      } else { panic!(); }
 +      check_added_monitors!(nodes[0], 1);
 +
 +      *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
 +      nodes[0].node.test_restore_channel_monitor();
 +      check_added_monitors!(nodes[0], 1);
 +
 +      let as_update_raa = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
 +      nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_update_raa.0).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +      let bs_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +
 +      nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update_raa.1).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +      let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
 +
 +      nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
 +
 +      nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      expect_pending_htlcs_forwardable!(nodes[0]);
 +      expect_payment_received!(nodes[0], our_payment_hash_2, 1000000);
 +
 +      nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +      expect_pending_htlcs_forwardable!(nodes[1]);
 +      expect_payment_received!(nodes[1], our_payment_hash_1, 1000000);
 +
-       send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
++      claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1, 1_000_000);
++      claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_2, 1_000_000);
 +}
 +
 +fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
 +      // Tests handling of a monitor update failure when processing an incoming RAA
 +      let mut nodes = create_network(3, &[None, None, None]);
 +      create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +      let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
 +
 +      // Rebalance a bit so that we can send backwards from 2 to 1.
-               claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_4.unwrap());
++      send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000, 5_000_000);
 +
 +      // Route a first payment that we'll fail backwards
 +      let (_, payment_hash_1) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
 +
 +      // Fail the payment backwards, failing the monitor update on nodes[1]'s receipt of the RAA
 +      assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1));
 +      expect_pending_htlcs_forwardable!(nodes[2]);
 +      check_added_monitors!(nodes[2], 1);
 +
 +      let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
 +      assert!(updates.update_add_htlcs.is_empty());
 +      assert!(updates.update_fulfill_htlcs.is_empty());
 +      assert_eq!(updates.update_fail_htlcs.len(), 1);
 +      assert!(updates.update_fail_malformed_htlcs.is_empty());
 +      assert!(updates.update_fee.is_none());
 +      nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]).unwrap();
 +
 +      let bs_revoke_and_ack = commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true, false, true);
 +      check_added_monitors!(nodes[0], 0);
 +
 +      // While the second channel is AwaitingRAA, forward a second payment to get it into the
 +      // holding cell.
 +      let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
 +      let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
 +      nodes[0].node.send_payment(route, payment_hash_2).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +
 +      let mut send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
 +      nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]).unwrap();
 +      commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false);
 +
 +      expect_pending_htlcs_forwardable!(nodes[1]);
 +      check_added_monitors!(nodes[1], 0);
 +      assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 +
 +      // Now fail monitor updating.
 +      *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
 +      if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack).unwrap_err() {
 +              assert_eq!(err, "Failed to update ChannelMonitor");
 +      } else { panic!(); }
 +      assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
 +      assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 +      check_added_monitors!(nodes[1], 1);
 +
 +      // Attempt to forward a third payment but fail due to the second channel being unavailable
 +      // for forwarding.
 +
 +      let (_, payment_hash_3) = get_payment_preimage_hash!(nodes[0]);
 +      let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
 +      nodes[0].node.send_payment(route, payment_hash_3).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +
 +      *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); // We succeed in updating the monitor for the first channel
 +      send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
 +      nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]).unwrap();
 +      commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true);
 +      check_added_monitors!(nodes[1], 0);
 +
 +      let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events_2.len(), 1);
 +      match events_2.remove(0) {
 +              MessageSendEvent::UpdateHTLCs { node_id, updates } => {
 +                      assert_eq!(node_id, nodes[0].node.get_our_node_id());
 +                      assert!(updates.update_fulfill_htlcs.is_empty());
 +                      assert_eq!(updates.update_fail_htlcs.len(), 1);
 +                      assert!(updates.update_fail_malformed_htlcs.is_empty());
 +                      assert!(updates.update_add_htlcs.is_empty());
 +                      assert!(updates.update_fee.is_none());
 +
 +                      nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]).unwrap();
 +                      commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false, true);
 +
 +                      let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
 +                      assert_eq!(msg_events.len(), 1);
 +                      match msg_events[0] {
 +                              MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg }} => {
 +                                      assert_eq!(msg.contents.short_channel_id, chan_2.0.contents.short_channel_id);
 +                                      assert_eq!(msg.contents.flags & 2, 2); // temp disabled
 +                              },
 +                              _ => panic!("Unexpected event"),
 +                      }
 +
 +                      let events = nodes[0].node.get_and_clear_pending_events();
 +                      assert_eq!(events.len(), 1);
 +                      if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events[0] {
 +                              assert_eq!(payment_hash, payment_hash_3);
 +                              assert!(!rejected_by_dest);
 +                      } else { panic!("Unexpected event!"); }
 +              },
 +              _ => panic!("Unexpected event type!"),
 +      };
 +
 +      let (payment_preimage_4, payment_hash_4) = if test_ignore_second_cs {
 +              // Try to route another payment backwards from 2 to make sure 1 holds off on responding
 +              let (payment_preimage_4, payment_hash_4) = get_payment_preimage_hash!(nodes[0]);
 +              let route = nodes[2].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
 +              nodes[2].node.send_payment(route, payment_hash_4).unwrap();
 +              check_added_monitors!(nodes[2], 1);
 +
 +              send_event = SendEvent::from_event(nodes[2].node.get_and_clear_pending_msg_events().remove(0));
 +              nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &send_event.msgs[0]).unwrap();
 +              if let Err(msgs::LightningError{err, action: msgs::ErrorAction::IgnoreError }) = nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &send_event.commitment_msg) {
 +                      assert_eq!(err, "Previous monitor update failure prevented generation of RAA");
 +              } else { panic!(); }
 +              assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 +              assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
 +              (Some(payment_preimage_4), Some(payment_hash_4))
 +      } else { (None, None) };
 +
 +      // Restore monitor updating, ensuring we immediately get a fail-back update and a
 +      // update_add update.
 +      *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
 +      nodes[1].node.test_restore_channel_monitor();
 +      check_added_monitors!(nodes[1], 1);
 +      expect_pending_htlcs_forwardable!(nodes[1]);
 +      check_added_monitors!(nodes[1], 1);
 +
 +      let mut events_3 = nodes[1].node.get_and_clear_pending_msg_events();
 +      if test_ignore_second_cs {
 +              assert_eq!(events_3.len(), 3);
 +      } else {
 +              assert_eq!(events_3.len(), 2);
 +      }
 +
 +      // Note that the ordering of the events for different nodes is non-prescriptive, though the
 +      // ordering of the two events that both go to nodes[2] have to stay in the same order.
 +      let messages_a = match events_3.pop().unwrap() {
 +              MessageSendEvent::UpdateHTLCs { node_id, mut updates } => {
 +                      assert_eq!(node_id, nodes[0].node.get_our_node_id());
 +                      assert!(updates.update_fulfill_htlcs.is_empty());
 +                      assert_eq!(updates.update_fail_htlcs.len(), 1);
 +                      assert!(updates.update_fail_malformed_htlcs.is_empty());
 +                      assert!(updates.update_add_htlcs.is_empty());
 +                      assert!(updates.update_fee.is_none());
 +                      (updates.update_fail_htlcs.remove(0), updates.commitment_signed)
 +              },
 +              _ => panic!("Unexpected event type!"),
 +      };
 +      let raa = if test_ignore_second_cs {
 +              match events_3.remove(1) {
 +                      MessageSendEvent::SendRevokeAndACK { node_id, msg } => {
 +                              assert_eq!(node_id, nodes[2].node.get_our_node_id());
 +                              Some(msg.clone())
 +                      },
 +                      _ => panic!("Unexpected event"),
 +              }
 +      } else { None };
 +      let send_event_b = SendEvent::from_event(events_3.remove(0));
 +      assert_eq!(send_event_b.node_id, nodes[2].node.get_our_node_id());
 +
 +      // Now deliver the new messages...
 +
 +      nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &messages_a.0).unwrap();
 +      commitment_signed_dance!(nodes[0], nodes[1], messages_a.1, false);
 +      let events_4 = nodes[0].node.get_and_clear_pending_events();
 +      assert_eq!(events_4.len(), 1);
 +      if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events_4[0] {
 +              assert_eq!(payment_hash, payment_hash_1);
 +              assert!(rejected_by_dest);
 +      } else { panic!("Unexpected event!"); }
 +
 +      nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_b.msgs[0]).unwrap();
 +      if test_ignore_second_cs {
 +              nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg).unwrap();
 +              check_added_monitors!(nodes[2], 1);
 +              let bs_revoke_and_ack = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
 +              nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa.unwrap()).unwrap();
 +              check_added_monitors!(nodes[2], 1);
 +              let bs_cs = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
 +              assert!(bs_cs.update_add_htlcs.is_empty());
 +              assert!(bs_cs.update_fail_htlcs.is_empty());
 +              assert!(bs_cs.update_fail_malformed_htlcs.is_empty());
 +              assert!(bs_cs.update_fulfill_htlcs.is_empty());
 +              assert!(bs_cs.update_fee.is_none());
 +
 +              nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
 +              check_added_monitors!(nodes[1], 1);
 +              let as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
 +              assert!(as_cs.update_add_htlcs.is_empty());
 +              assert!(as_cs.update_fail_htlcs.is_empty());
 +              assert!(as_cs.update_fail_malformed_htlcs.is_empty());
 +              assert!(as_cs.update_fulfill_htlcs.is_empty());
 +              assert!(as_cs.update_fee.is_none());
 +
 +              nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_cs.commitment_signed).unwrap();
 +              check_added_monitors!(nodes[1], 1);
 +              let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
 +
 +              nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_cs.commitment_signed).unwrap();
 +              check_added_monitors!(nodes[2], 1);
 +              let bs_second_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
 +
 +              nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa).unwrap();
 +              check_added_monitors!(nodes[2], 1);
 +              assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty());
 +
 +              nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_second_raa).unwrap();
 +              check_added_monitors!(nodes[1], 1);
 +              assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 +      } else {
 +              commitment_signed_dance!(nodes[2], nodes[1], send_event_b.commitment_msg, false);
 +      }
 +
 +      expect_pending_htlcs_forwardable!(nodes[2]);
 +
 +      let events_6 = nodes[2].node.get_and_clear_pending_events();
 +      assert_eq!(events_6.len(), 1);
 +      match events_6[0] {
 +              Event::PaymentReceived { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_2); },
 +              _ => panic!("Unexpected event"),
 +      };
 +
 +      if test_ignore_second_cs {
 +              expect_pending_htlcs_forwardable!(nodes[1]);
 +              check_added_monitors!(nodes[1], 1);
 +
 +              send_event = SendEvent::from_node(&nodes[1]);
 +              assert_eq!(send_event.node_id, nodes[0].node.get_our_node_id());
 +              assert_eq!(send_event.msgs.len(), 1);
 +              nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event.msgs[0]).unwrap();
 +              commitment_signed_dance!(nodes[0], nodes[1], send_event.commitment_msg, false);
 +
 +              expect_pending_htlcs_forwardable!(nodes[0]);
 +
 +              let events_9 = nodes[0].node.get_and_clear_pending_events();
 +              assert_eq!(events_9.len(), 1);
 +              match events_9[0] {
 +                      Event::PaymentReceived { payment_hash, .. } => assert_eq!(payment_hash, payment_hash_4.unwrap()),
 +                      _ => panic!("Unexpected event"),
 +              };
-       claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_2);
++              claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_4.unwrap(), 1_000_000);
 +      }
 +
-       assert!(nodes[2].node.claim_funds(our_payment_preimage));
++      claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_2, 1_000_000);
 +}
 +
 +#[test]
 +fn test_monitor_update_fail_raa() {
 +      do_test_monitor_update_fail_raa(false);
 +      do_test_monitor_update_fail_raa(true);
 +}
 +
 +#[test]
 +fn test_monitor_update_fail_reestablish() {
 +      // Simple test for message retransmission after monitor update failure on
 +      // channel_reestablish generating a monitor update (which comes from freeing holding cell
 +      // HTLCs).
 +      let mut nodes = create_network(3, &[None, None, None]);
 +      create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +      create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
 +
 +      let (our_payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
 +
 +      nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 +      nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
 +
-       claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
-       claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
-       claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3);
++      assert!(nodes[2].node.claim_funds(our_payment_preimage, 1_000_000));
 +      check_added_monitors!(nodes[2], 1);
 +      let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
 +      assert!(updates.update_add_htlcs.is_empty());
 +      assert!(updates.update_fail_htlcs.is_empty());
 +      assert!(updates.update_fail_malformed_htlcs.is_empty());
 +      assert!(updates.update_fee.is_none());
 +      assert_eq!(updates.update_fulfill_htlcs.len(), 1);
 +      nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +      assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 +      commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
 +
 +      *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
 +      nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
 +      nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
 +
 +      let as_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
 +      let bs_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
 +
 +      nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish).unwrap();
 +
 +      if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish).unwrap_err() {
 +              assert_eq!(err, "Failed to update ChannelMonitor");
 +      } else { panic!(); }
 +      check_added_monitors!(nodes[1], 1);
 +
 +      nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 +      nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
 +
 +      nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
 +      nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
 +
 +      assert!(as_reestablish == get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()));
 +      assert!(bs_reestablish == get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()));
 +
 +      nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish).unwrap();
 +
 +      nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish).unwrap();
 +      check_added_monitors!(nodes[1], 0);
 +      assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 +
 +      *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
 +      nodes[1].node.test_restore_channel_monitor();
 +      check_added_monitors!(nodes[1], 1);
 +
 +      updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +      assert!(updates.update_add_htlcs.is_empty());
 +      assert!(updates.update_fail_htlcs.is_empty());
 +      assert!(updates.update_fail_malformed_htlcs.is_empty());
 +      assert!(updates.update_fee.is_none());
 +      assert_eq!(updates.update_fulfill_htlcs.len(), 1);
 +      nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]).unwrap();
 +      commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false);
 +
 +      let events = nodes[0].node.get_and_clear_pending_events();
 +      assert_eq!(events.len(), 1);
 +      match events[0] {
 +              Event::PaymentSent { payment_preimage, .. } => assert_eq!(payment_preimage, our_payment_preimage),
 +              _ => panic!("Unexpected event"),
 +      }
 +}
 +
 +#[test]
 +fn raa_no_response_awaiting_raa_state() {
 +      // This is a rather convoluted test which ensures that if handling of an RAA does not happen
 +      // due to a previous monitor update failure, we still set AwaitingRemoteRevoke on the channel
 +      // in question (assuming it intends to respond with a CS after monitor updating is restored).
 +      // Backported from chanmon_fail_consistency fuzz tests as this used to be broken.
 +      let mut nodes = create_network(2, &[None, None]);
 +      create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +
 +      let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
 +      let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
 +      let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
 +      let (payment_preimage_3, payment_hash_3) = get_payment_preimage_hash!(nodes[0]);
 +
 +      // Queue up two payments - one will be delivered right away, one immediately goes into the
 +      // holding cell as nodes[0] is AwaitingRAA. Ultimately this allows us to deliver an RAA
 +      // immediately after a CS. By setting failing the monitor update failure from the CS (which
 +      // requires only an RAA response due to AwaitingRAA) we can deliver the RAA and require the CS
 +      // generation during RAA while in monitor-update-failed state.
 +      nodes[0].node.send_payment(route.clone(), payment_hash_1).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      nodes[0].node.send_payment(route.clone(), payment_hash_2).unwrap();
 +      check_added_monitors!(nodes[0], 0);
 +
 +      let mut events = nodes[0].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events.len(), 1);
 +      let payment_event = SendEvent::from_event(events.pop().unwrap());
 +      nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
 +      nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +
 +      let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +      nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      let mut events = nodes[0].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events.len(), 1);
 +      let payment_event = SendEvent::from_event(events.pop().unwrap());
 +
 +      nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
 +
 +      // Now we have a CS queued up which adds a new HTLC (which will need a RAA/CS response from
 +      // nodes[1]) followed by an RAA. Fail the monitor updating prior to the CS, deliver the RAA,
 +      // then restore channel monitor updates.
 +      *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
 +      nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
 +      if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() {
 +              assert_eq!(err, "Failed to update ChannelMonitor");
 +      } else { panic!(); }
 +      check_added_monitors!(nodes[1], 1);
 +
 +      if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap_err() {
 +              assert_eq!(err, "Previous monitor update failure prevented responses to RAA");
 +      } else { panic!(); }
 +      check_added_monitors!(nodes[1], 1);
 +
 +      *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
 +      nodes[1].node.test_restore_channel_monitor();
 +      // nodes[1] should be AwaitingRAA here!
 +      check_added_monitors!(nodes[1], 1);
 +      let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +      expect_pending_htlcs_forwardable!(nodes[1]);
 +      expect_payment_received!(nodes[1], payment_hash_1, 1000000);
 +
 +      // We send a third payment here, which is somewhat of a redundant test, but the
 +      // chanmon_fail_consistency test required it to actually find the bug (by seeing out-of-sync
 +      // commitment transaction states) whereas here we can explicitly check for it.
 +      nodes[0].node.send_payment(route.clone(), payment_hash_3).unwrap();
 +      check_added_monitors!(nodes[0], 0);
 +      assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 +
 +      nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      let mut events = nodes[0].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events.len(), 1);
 +      let payment_event = SendEvent::from_event(events.pop().unwrap());
 +
 +      nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
 +
 +      nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
 +      nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +      let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
 +
 +      // Finally deliver the RAA to nodes[1] which results in a CS response to the last update
 +      nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +      expect_pending_htlcs_forwardable!(nodes[1]);
 +      expect_payment_received!(nodes[1], payment_hash_2, 1000000);
 +      let bs_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +
 +      nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +
 +      nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_update.commitment_signed).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
 +
 +      nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +      expect_pending_htlcs_forwardable!(nodes[1]);
 +      expect_payment_received!(nodes[1], payment_hash_3, 1000000);
 +
-       assert!(nodes[1].node.claim_funds(payment_preimage_1));
++      claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1, 1_000_000);
++      claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2, 1_000_000);
++      claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3, 1_000_000);
 +}
 +
 +#[test]
 +fn claim_while_disconnected_monitor_update_fail() {
 +      // Test for claiming a payment while disconnected and then having the resulting
 +      // channel-update-generated monitor update fail. This kind of thing isn't a particularly
 +      // contrived case for nodes with network instability.
 +      // Backported from chanmon_fail_consistency fuzz tests as an unmerged version of the handling
 +      // code introduced a regression in this test (specifically, this caught a removal of the
 +      // channel_reestablish handling ensuring the order was sensical given the messages used).
 +      let mut nodes = create_network(2, &[None, None]);
 +      create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +
 +      // Forward a payment for B to claim
 +      let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
 +
 +      nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
 +      nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 +
-       claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
++      assert!(nodes[1].node.claim_funds(payment_preimage_1, 1_000_000));
 +      check_added_monitors!(nodes[1], 1);
 +
 +      nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
 +      nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
 +
 +      let as_reconnect = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
 +      let bs_reconnect = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
 +
 +      nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect).unwrap();
 +      assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 +
 +      // Now deliver a's reestablish, freeing the claim from the holding cell, but fail the monitor
 +      // update.
 +      *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
 +
 +      if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect).unwrap_err() {
 +              assert_eq!(err, "Failed to update ChannelMonitor");
 +      } else { panic!(); }
 +      check_added_monitors!(nodes[1], 1);
 +      assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 +
 +      // Send a second payment from A to B, resulting in a commitment update that gets swallowed with
 +      // the monitor still failed
 +      let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
 +      let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
 +      nodes[0].node.send_payment(route, payment_hash_2).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +
 +      let as_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
 +      nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_updates.update_add_htlcs[0]).unwrap();
 +      if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.commitment_signed).unwrap_err() {
 +              assert_eq!(err, "Previous monitor update failure prevented generation of RAA");
 +      } else { panic!(); }
 +      // Note that nodes[1] not updating monitor here is OK - it wont take action on the new HTLC
 +      // until we've test_restore_channel_monitor'd and updated for the new commitment transaction.
 +
 +      // Now un-fail the monitor, which will result in B sending its original commitment update,
 +      // receiving the commitment update from A, and the resulting commitment dances.
 +      *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
 +      nodes[1].node.test_restore_channel_monitor();
 +      check_added_monitors!(nodes[1], 1);
 +
 +      let bs_msgs = nodes[1].node.get_and_clear_pending_msg_events();
 +      assert_eq!(bs_msgs.len(), 2);
 +
 +      match bs_msgs[0] {
 +              MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
 +                      assert_eq!(*node_id, nodes[0].node.get_our_node_id());
 +                      nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]).unwrap();
 +                      nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed).unwrap();
 +                      check_added_monitors!(nodes[0], 1);
 +
 +                      let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
 +                      nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap();
 +                      check_added_monitors!(nodes[1], 1);
 +              },
 +              _ => panic!("Unexpected event"),
 +      }
 +
 +      match bs_msgs[1] {
 +              MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
 +                      assert_eq!(*node_id, nodes[0].node.get_our_node_id());
 +                      nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), msg).unwrap();
 +                      check_added_monitors!(nodes[0], 1);
 +              },
 +              _ => panic!("Unexpected event"),
 +      }
 +
 +      let as_commitment = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
 +
 +      let bs_commitment = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +      nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment.commitment_signed).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
 +
 +      nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment.commitment_signed).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +      let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
 +      nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +
 +      expect_pending_htlcs_forwardable!(nodes[1]);
 +      expect_payment_received!(nodes[1], payment_hash_2, 1000000);
 +
 +      nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +
 +      let events = nodes[0].node.get_and_clear_pending_events();
 +      assert_eq!(events.len(), 1);
 +      match events[0] {
 +              Event::PaymentSent { ref payment_preimage } => {
 +                      assert_eq!(*payment_preimage, payment_preimage_1);
 +              },
 +              _ => panic!("Unexpected event"),
 +      }
 +
-       claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
++      claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2, 1_000_000);
 +}
 +
 +#[test]
 +fn monitor_failed_no_reestablish_response() {
 +      // Test for receiving a channel_reestablish after a monitor update failure resulted in no
 +      // response to a commitment_signed.
 +      // Backported from chanmon_fail_consistency fuzz tests as it caught a long-standing
 +      // debug_assert!() failure in channel_reestablish handling.
 +      let mut nodes = create_network(2, &[None, None]);
 +      create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +
 +      // Route the payment and deliver the initial commitment_signed (with a monitor update failure
 +      // on receipt).
 +      let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
 +      let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
 +      nodes[0].node.send_payment(route, payment_hash_1).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +
 +      *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
 +      let mut events = nodes[0].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events.len(), 1);
 +      let payment_event = SendEvent::from_event(events.pop().unwrap());
 +      nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
 +      if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() {
 +              assert_eq!(err, "Failed to update ChannelMonitor");
 +      } else { panic!(); }
 +      check_added_monitors!(nodes[1], 1);
 +
 +      // Now disconnect and immediately reconnect, delivering the channel_reestablish while nodes[1]
 +      // is still failing to update monitors.
 +      nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
 +      nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 +
 +      nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
 +      nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
 +
 +      let as_reconnect = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
 +      let bs_reconnect = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
 +
 +      nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect).unwrap();
 +      nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect).unwrap();
 +
 +      *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
 +      nodes[1].node.test_restore_channel_monitor();
 +      check_added_monitors!(nodes[1], 1);
 +      let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +
 +      nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +
 +      let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
 +      nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +
 +      expect_pending_htlcs_forwardable!(nodes[1]);
 +      expect_payment_received!(nodes[1], payment_hash_1, 1000000);
 +
-       claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
-       claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
++      claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1, 1_000_000);
 +}
 +
 +#[test]
 +fn first_message_on_recv_ordering() {
 +      // Test that if the initial generator of a monitor-update-frozen state doesn't generate
 +      // messages, we're willing to flip the order of response messages if neccessary in resposne to
 +      // a commitment_signed which needs to send an RAA first.
 +      // At a high level, our goal is to fail monitor updating in response to an RAA which needs no
 +      // response and then handle a CS while in the failed state, requiring an RAA followed by a CS
 +      // response. To do this, we start routing two payments, with the final RAA for the first being
 +      // delivered while B is in AwaitingRAA, hence when we deliver the CS for the second B will
 +      // have no pending response but will want to send a RAA/CS (with the updates for the second
 +      // payment applied).
 +      // Backported from chanmon_fail_consistency fuzz tests as it caught a bug here.
 +      let mut nodes = create_network(2, &[None, None]);
 +      create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +
 +      // Route the first payment outbound, holding the last RAA for B until we are set up so that we
 +      // can deliver it and fail the monitor update.
 +      let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
 +      let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
 +      nodes[0].node.send_payment(route, payment_hash_1).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +
 +      let mut events = nodes[0].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events.len(), 1);
 +      let payment_event = SendEvent::from_event(events.pop().unwrap());
 +      assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
 +      nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
 +      nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +      let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +
 +      nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +
 +      let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
 +
 +      // Route the second payment, generating an update_add_htlc/commitment_signed
 +      let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
 +      let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
 +      nodes[0].node.send_payment(route, payment_hash_2).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      let mut events = nodes[0].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events.len(), 1);
 +      let payment_event = SendEvent::from_event(events.pop().unwrap());
 +      assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
 +
 +      *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
 +
 +      // Deliver the final RAA for the first payment, which does not require a response. RAAs
 +      // generally require a commitment_signed, so the fact that we're expecting an opposite response
 +      // to the next message also tests resetting the delivery order.
 +      if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap_err() {
 +              assert_eq!(err, "Failed to update ChannelMonitor");
 +      } else { panic!(); }
 +      check_added_monitors!(nodes[1], 1);
 +
 +      // Now deliver the update_add_htlc/commitment_signed for the second payment, which does need an
 +      // RAA/CS response, which should be generated when we call test_restore_channel_monitor (with
 +      // the appropriate HTLC acceptance).
 +      nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
 +      if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() {
 +              assert_eq!(err, "Previous monitor update failure prevented generation of RAA");
 +      } else { panic!(); }
 +
 +      *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
 +      nodes[1].node.test_restore_channel_monitor();
 +      check_added_monitors!(nodes[1], 1);
 +
 +      expect_pending_htlcs_forwardable!(nodes[1]);
 +      expect_payment_received!(nodes[1], payment_hash_1, 1000000);
 +
 +      let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +      nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +
 +      let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
 +      nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +
 +      expect_pending_htlcs_forwardable!(nodes[1]);
 +      expect_payment_received!(nodes[1], payment_hash_2, 1000000);
 +
-       send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
++      claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1, 1_000_000);
++      claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2, 1_000_000);
 +}
 +
 +#[test]
 +fn test_monitor_update_fail_claim() {
 +      // Basic test for monitor update failures when processing claim_funds calls.
 +      // We set up a simple 3-node network, sending a payment from A to B and failing B's monitor
 +      // update to claim the payment. We then send a payment C->B->A, making the forward of this
 +      // payment from B to A fail due to the paused channel. Finally, we restore the channel monitor
 +      // updating and claim the payment on B.
 +      let mut nodes = create_network(3, &[None, None, None]);
 +      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +      create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
 +
 +      // Rebalance a bit so that we can send backwards from 3 to 2.
-       assert!(nodes[1].node.claim_funds(payment_preimage_1));
++      send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000, 5_000_000);
 +
 +      let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
 +
 +      *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
-       send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
++      assert!(nodes[1].node.claim_funds(payment_preimage_1, 1_000_000));
 +      check_added_monitors!(nodes[1], 1);
 +
 +      let route = nodes[2].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
 +      let (_, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
 +      nodes[2].node.send_payment(route, payment_hash_2).unwrap();
 +      check_added_monitors!(nodes[2], 1);
 +
 +      // Successfully update the monitor on the 1<->2 channel, but the 0<->1 channel should still be
 +      // paused, so forward shouldn't succeed until we call test_restore_channel_monitor().
 +      *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
 +
 +      let mut events = nodes[2].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events.len(), 1);
 +      let payment_event = SendEvent::from_event(events.pop().unwrap());
 +      nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
 +      commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true);
 +
 +      let bs_fail_update = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
 +      nodes[2].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[0]).unwrap();
 +      commitment_signed_dance!(nodes[2], nodes[1], bs_fail_update.commitment_signed, false, true);
 +
 +      let msg_events = nodes[2].node.get_and_clear_pending_msg_events();
 +      assert_eq!(msg_events.len(), 1);
 +      match msg_events[0] {
 +              MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg }} => {
 +                      assert_eq!(msg.contents.short_channel_id, chan_1.0.contents.short_channel_id);
 +                      assert_eq!(msg.contents.flags & 2, 2); // temp disabled
 +              },
 +              _ => panic!("Unexpected event"),
 +      }
 +
 +      let events = nodes[2].node.get_and_clear_pending_events();
 +      assert_eq!(events.len(), 1);
 +      if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events[0] {
 +              assert_eq!(payment_hash, payment_hash_2);
 +              assert!(!rejected_by_dest);
 +      } else { panic!("Unexpected event!"); }
 +
 +      // Now restore monitor updating on the 0<->1 channel and claim the funds on B.
 +      nodes[1].node.test_restore_channel_monitor();
 +      check_added_monitors!(nodes[1], 1);
 +
 +      let bs_fulfill_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +      nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_fulfill_update.update_fulfill_htlcs[0]).unwrap();
 +      commitment_signed_dance!(nodes[0], nodes[1], bs_fulfill_update.commitment_signed, false);
 +
 +      let events = nodes[0].node.get_and_clear_pending_events();
 +      assert_eq!(events.len(), 1);
 +      if let Event::PaymentSent { payment_preimage, .. } = events[0] {
 +              assert_eq!(payment_preimage, payment_preimage_1);
 +      } else { panic!("Unexpected event!"); }
 +}
 +
 +#[test]
 +fn test_monitor_update_on_pending_forwards() {
 +      // Basic test for monitor update failures when processing pending HTLC fail/add forwards.
 +      // We do this with a simple 3-node network, sending a payment from A to C and one from C to A.
 +      // The payment from A to C will be failed by C and pending a back-fail to A, while the payment
 +      // from C to A will be pending a forward to A.
 +      let mut nodes = create_network(3, &[None, None, None]);
 +      create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +      create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
 +
 +      // Rebalance a bit so that we can send backwards from 3 to 1.
-       claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_2);
++      send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000, 5_000_000);
 +
 +      let (_, payment_hash_1) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
 +      assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1));
 +      expect_pending_htlcs_forwardable!(nodes[2]);
 +      check_added_monitors!(nodes[2], 1);
 +
 +      let cs_fail_update = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
 +      nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &cs_fail_update.update_fail_htlcs[0]).unwrap();
 +      commitment_signed_dance!(nodes[1], nodes[2], cs_fail_update.commitment_signed, true, true);
 +      assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 +
 +      let route = nodes[2].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
 +      let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
 +      nodes[2].node.send_payment(route, payment_hash_2).unwrap();
 +      check_added_monitors!(nodes[2], 1);
 +
 +      let mut events = nodes[2].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events.len(), 1);
 +      let payment_event = SendEvent::from_event(events.pop().unwrap());
 +      nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
 +      commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false);
 +
 +      *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
 +      expect_pending_htlcs_forwardable!(nodes[1]);
 +      check_added_monitors!(nodes[1], 1);
 +      assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 +
 +      *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
 +      nodes[1].node.test_restore_channel_monitor();
 +      check_added_monitors!(nodes[1], 1);
 +
 +      let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +      nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]).unwrap();
 +      nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_add_htlcs[0]).unwrap();
 +      commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false, true);
 +
 +      let events = nodes[0].node.get_and_clear_pending_events();
 +      assert_eq!(events.len(), 2);
 +      if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events[0] {
 +              assert_eq!(payment_hash, payment_hash_1);
 +              assert!(rejected_by_dest);
 +      } else { panic!("Unexpected event!"); }
 +      match events[1] {
 +              Event::PendingHTLCsForwardable { .. } => { },
 +              _ => panic!("Unexpected event"),
 +      };
 +      nodes[0].node.process_pending_htlc_forwards();
 +      expect_payment_received!(nodes[0], payment_hash_2, 1000000);
 +
-       assert!(nodes[1].node.claim_funds(payment_preimage_1));
++      claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_2, 1_000_000);
 +}
 +
 +#[test]
 +fn monitor_update_claim_fail_no_response() {
 +      // Test for claim_funds resulting in both a monitor update failure and no message response (due
 +      // to channel being AwaitingRAA).
 +      // Backported from chanmon_fail_consistency fuzz tests as an unmerged version of the handling
 +      // code was broken.
 +      let mut nodes = create_network(2, &[None, None]);
 +      create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +
 +      // Forward a payment for B to claim
 +      let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
 +
 +      // Now start forwarding a second payment, skipping the last RAA so B is in AwaitingRAA
 +      let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
 +      let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
 +      nodes[0].node.send_payment(route, payment_hash_2).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +
 +      let mut events = nodes[0].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events.len(), 1);
 +      let payment_event = SendEvent::from_event(events.pop().unwrap());
 +      nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
 +      let as_raa = commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true, false, true);
 +
 +      *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
-       claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
++      assert!(nodes[1].node.claim_funds(payment_preimage_1, 1_000_000));
 +      check_added_monitors!(nodes[1], 1);
 +      assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 +
 +      *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
 +      nodes[1].node.test_restore_channel_monitor();
 +      check_added_monitors!(nodes[1], 1);
 +      assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 +
 +      nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +      expect_pending_htlcs_forwardable!(nodes[1]);
 +      expect_payment_received!(nodes[1], payment_hash_2, 1000000);
 +
 +      let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +      nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]).unwrap();
 +      commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false);
 +
 +      let events = nodes[0].node.get_and_clear_pending_events();
 +      assert_eq!(events.len(), 1);
 +      match events[0] {
 +              Event::PaymentSent { ref payment_preimage } => {
 +                      assert_eq!(*payment_preimage, payment_preimage_1);
 +              },
 +              _ => panic!("Unexpected event"),
 +      }
 +
-       send_payment(&nodes[0], &[&nodes[1]], 8000000);
++      claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2, 1_000_000);
 +}
 +
 +// Note that restore_between_fails with !fail_on_generate is useless
 +// Also note that !fail_on_generate && !fail_on_signed is useless
 +// Finally, note that !fail_on_signed is not possible with fail_on_generate && !restore_between_fails
 +// confirm_a_first and restore_b_before_conf are wholly unrelated to earlier bools and
 +// restore_b_before_conf has no meaning if !confirm_a_first
 +fn do_during_funding_monitor_fail(fail_on_generate: bool, restore_between_fails: bool, fail_on_signed: bool, confirm_a_first: bool, restore_b_before_conf: bool) {
 +      // Test that if the monitor update generated by funding_transaction_generated fails we continue
 +      // the channel setup happily after the update is restored.
 +      let mut nodes = create_network(2, &[None, None]);
 +
 +      nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43).unwrap();
 +      nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), LocalFeatures::new(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id())).unwrap();
 +      nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), LocalFeatures::new(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id())).unwrap();
 +
 +      let (temporary_channel_id, funding_tx, funding_output) = create_funding_transaction(&nodes[0], 100000, 43);
 +
 +      if fail_on_generate {
 +              *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
 +      }
 +      nodes[0].node.funding_transaction_generated(&temporary_channel_id, funding_output);
 +      check_added_monitors!(nodes[0], 1);
 +
 +      *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
 +      nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id())).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +
 +      if restore_between_fails {
 +              assert!(fail_on_generate);
 +              *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
 +              nodes[0].node.test_restore_channel_monitor();
 +              check_added_monitors!(nodes[0], 1);
 +              assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
 +              assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 +      }
 +
 +      if fail_on_signed {
 +              *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
 +      } else {
 +              assert!(restore_between_fails || !fail_on_generate); // We can't switch to good now (there's no monitor update)
 +              assert!(fail_on_generate); // Somebody has to fail
 +      }
 +      let funding_signed_res = nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
 +      if fail_on_signed || !restore_between_fails {
 +              if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = funding_signed_res.unwrap_err() {
 +                      if fail_on_generate && !restore_between_fails {
 +                              assert_eq!(err, "Previous monitor update failure prevented funding_signed from allowing funding broadcast");
 +                              check_added_monitors!(nodes[0], 0);
 +                      } else {
 +                              assert_eq!(err, "Failed to update ChannelMonitor");
 +                              check_added_monitors!(nodes[0], 1);
 +                      }
 +              } else { panic!(); }
 +
 +              assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
 +              *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
 +              nodes[0].node.test_restore_channel_monitor();
 +      } else {
 +              funding_signed_res.unwrap();
 +      }
 +
 +      check_added_monitors!(nodes[0], 1);
 +
 +      let events = nodes[0].node.get_and_clear_pending_events();
 +      assert_eq!(events.len(), 1);
 +      match events[0] {
 +              Event::FundingBroadcastSafe { ref funding_txo, user_channel_id } => {
 +                      assert_eq!(user_channel_id, 43);
 +                      assert_eq!(*funding_txo, funding_output);
 +              },
 +              _ => panic!("Unexpected event"),
 +      };
 +
 +      if confirm_a_first {
 +              confirm_transaction(&nodes[0].chain_monitor, &funding_tx, funding_tx.version);
 +              nodes[1].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingLocked, nodes[1].node.get_our_node_id())).unwrap();
 +      } else {
 +              assert!(!restore_b_before_conf);
 +              confirm_transaction(&nodes[1].chain_monitor, &funding_tx, funding_tx.version);
 +              assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 +      }
 +
 +      // Make sure nodes[1] isn't stupid enough to re-send the FundingLocked on reconnect
 +      nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
 +      nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 +      reconnect_nodes(&nodes[0], &nodes[1], (false, confirm_a_first), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 +      assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 +      assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 +
 +      if !restore_b_before_conf {
 +              confirm_transaction(&nodes[1].chain_monitor, &funding_tx, funding_tx.version);
 +              assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 +              assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
 +      }
 +
 +      *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
 +      nodes[1].node.test_restore_channel_monitor();
 +      check_added_monitors!(nodes[1], 1);
 +
 +      let (channel_id, (announcement, as_update, bs_update)) = if !confirm_a_first {
 +              nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[0].node.get_our_node_id())).unwrap();
 +
 +              confirm_transaction(&nodes[0].chain_monitor, &funding_tx, funding_tx.version);
 +              let (funding_locked, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]);
 +              (channel_id, create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked))
 +      } else {
 +              if restore_b_before_conf {
 +                      confirm_transaction(&nodes[1].chain_monitor, &funding_tx, funding_tx.version);
 +              }
 +              let (funding_locked, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]);
 +              (channel_id, create_chan_between_nodes_with_value_b(&nodes[1], &nodes[0], &funding_locked))
 +      };
 +      for node in nodes.iter() {
 +              assert!(node.router.handle_channel_announcement(&announcement).unwrap());
 +              node.router.handle_channel_update(&as_update).unwrap();
 +              node.router.handle_channel_update(&bs_update).unwrap();
 +      }
 +
++      send_payment(&nodes[0], &[&nodes[1]], 8000000, 8_000_000);
 +      close_channel(&nodes[0], &nodes[1], &channel_id, funding_tx, true);
 +}
 +
 +#[test]
 +fn during_funding_monitor_fail() {
 +      do_during_funding_monitor_fail(false, false, true, true, true);
 +      do_during_funding_monitor_fail(true, false, true, false, false);
 +      do_during_funding_monitor_fail(true, true, true, true, false);
 +      do_during_funding_monitor_fail(true, true, false, false, false);
 +}
index fad5b30b3a5648b680c441a22099515028db1398,0000000000000000000000000000000000000000..c863f46b613c77c27198d85e8f3121d9a2eb5c20
mode 100644,000000..100644
--- /dev/null
@@@ -1,3260 -1,0 +1,3273 @@@
-       pub fn claim_funds(&self, payment_preimage: PaymentPreimage) -> bool {
 +//! The top-level channel management and payment tracking stuff lives here.
 +//!
 +//! The ChannelManager is the main chunk of logic implementing the lightning protocol and is
 +//! responsible for tracking which channels are open, HTLCs are in flight and reestablishing those
 +//! upon reconnect to the relevant peer(s).
 +//!
 +//! It does not manage routing logic (see ln::router for that) nor does it manage constructing
 +//! on-chain transactions (it only monitors the chain to watch for any force-closes that might
 +//! imply it needs to fail HTLCs/payments/channels it manages).
 +
 +use bitcoin::blockdata::block::BlockHeader;
 +use bitcoin::blockdata::transaction::Transaction;
 +use bitcoin::blockdata::constants::genesis_block;
 +use bitcoin::network::constants::Network;
 +use bitcoin::util::hash::BitcoinHash;
 +
 +use bitcoin_hashes::{Hash, HashEngine};
 +use bitcoin_hashes::hmac::{Hmac, HmacEngine};
 +use bitcoin_hashes::sha256::Hash as Sha256;
 +use bitcoin_hashes::sha256d::Hash as Sha256dHash;
 +use bitcoin_hashes::cmp::fixed_time_eq;
 +
 +use secp256k1::key::{SecretKey,PublicKey};
 +use secp256k1::Secp256k1;
 +use secp256k1::ecdh::SharedSecret;
 +use secp256k1;
 +
 +use chain::chaininterface::{BroadcasterInterface,ChainListener,ChainWatchInterface,FeeEstimator};
 +use chain::transaction::OutPoint;
 +use ln::channel::{Channel, ChannelError};
 +use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
 +use ln::router::Route;
 +use ln::msgs;
 +use ln::msgs::LocalFeatures;
 +use ln::onion_utils;
 +use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError};
 +use chain::keysinterface::KeysInterface;
 +use util::config::UserConfig;
 +use util::{byte_utils, events};
 +use util::ser::{Readable, ReadableArgs, Writeable, Writer};
 +use util::chacha20::ChaCha20;
 +use util::logger::Logger;
 +use util::errors::APIError;
 +
 +use std::{cmp, mem};
 +use std::collections::{HashMap, hash_map, HashSet};
 +use std::io::Cursor;
 +use std::sync::{Arc, Mutex, MutexGuard, RwLock};
 +use std::sync::atomic::{AtomicUsize, Ordering};
 +use std::time::Duration;
 +
 +// We hold various information about HTLC relay in the HTLC objects in Channel itself:
 +//
 +// Upon receipt of an HTLC from a peer, we'll give it a PendingHTLCStatus indicating if it should
 +// forward the HTLC with information it will give back to us when it does so, or if it should Fail
 +// the HTLC with the relevant message for the Channel to handle giving to the remote peer.
 +//
 +// When a Channel forwards an HTLC to its peer, it will give us back the PendingForwardHTLCInfo
 +// which we will use to construct an outbound HTLC, with a relevant HTLCSource::PreviousHopData
 +// filled in to indicate where it came from (which we can use to either fail-backwards or fulfill
 +// the HTLC backwards along the relevant path).
 +// Alternatively, we can fill an outbound HTLC with a HTLCSource::OutboundRoute indicating this is
 +// our payment, which we can use to decode errors or inform the user that the payment was sent.
 +/// Stores the info we will need to send when we want to forward an HTLC onwards
 +#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
 +pub(super) struct PendingForwardHTLCInfo {
 +      onion_packet: Option<msgs::OnionPacket>,
 +      incoming_shared_secret: [u8; 32],
 +      payment_hash: PaymentHash,
 +      short_channel_id: u64,
 +      pub(super) amt_to_forward: u64,
 +      pub(super) outgoing_cltv_value: u32,
 +}
 +
 +#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
 +pub(super) enum HTLCFailureMsg {
 +      Relay(msgs::UpdateFailHTLC),
 +      Malformed(msgs::UpdateFailMalformedHTLC),
 +}
 +
 +/// Stores whether we can't forward an HTLC or relevant forwarding info
 +#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
 +pub(super) enum PendingHTLCStatus {
 +      Forward(PendingForwardHTLCInfo),
 +      Fail(HTLCFailureMsg),
 +}
 +
 +/// Tracks the inbound corresponding to an outbound HTLC
 +#[derive(Clone, PartialEq)]
 +pub(super) struct HTLCPreviousHopData {
 +      short_channel_id: u64,
 +      htlc_id: u64,
 +      incoming_packet_shared_secret: [u8; 32],
 +}
 +
 +/// Tracks the inbound corresponding to an outbound HTLC
 +#[derive(Clone, PartialEq)]
 +pub(super) enum HTLCSource {
 +      PreviousHopData(HTLCPreviousHopData),
 +      OutboundRoute {
 +              route: Route,
 +              session_priv: SecretKey,
 +              /// Technically we can recalculate this from the route, but we cache it here to avoid
 +              /// doing a double-pass on route when we get a failure back
 +              first_hop_htlc_msat: u64,
 +      },
 +}
 +#[cfg(test)]
 +impl HTLCSource {
 +      pub fn dummy() -> Self {
 +              HTLCSource::OutboundRoute {
 +                      route: Route { hops: Vec::new() },
 +                      session_priv: SecretKey::from_slice(&[1; 32]).unwrap(),
 +                      first_hop_htlc_msat: 0,
 +              }
 +      }
 +}
 +
 +#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
 +pub(super) enum HTLCFailReason {
 +      LightningError {
 +              err: msgs::OnionErrorPacket,
 +      },
 +      Reason {
 +              failure_code: u16,
 +              data: Vec<u8>,
 +      }
 +}
 +
 +/// payment_hash type, use to cross-lock hop
 +#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
 +pub struct PaymentHash(pub [u8;32]);
 +/// payment_preimage type, use to route payment between hop
 +#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
 +pub struct PaymentPreimage(pub [u8;32]);
 +
 +type ShutdownResult = (Vec<Transaction>, Vec<(HTLCSource, PaymentHash)>);
 +
 +/// Error type returned across the channel_state mutex boundary. When an Err is generated for a
 +/// Channel, we generally end up with a ChannelError::Close for which we have to close the channel
 +/// immediately (ie with no further calls on it made). Thus, this step happens inside a
 +/// channel_state lock. We then return the set of things that need to be done outside the lock in
 +/// this struct and call handle_error!() on it.
 +
 +struct MsgHandleErrInternal {
 +      err: msgs::LightningError,
 +      shutdown_finish: Option<(ShutdownResult, Option<msgs::ChannelUpdate>)>,
 +}
 +impl MsgHandleErrInternal {
 +      #[inline]
 +      fn send_err_msg_no_close(err: &'static str, channel_id: [u8; 32]) -> Self {
 +              Self {
 +                      err: LightningError {
 +                              err,
 +                              action: msgs::ErrorAction::SendErrorMessage {
 +                                      msg: msgs::ErrorMessage {
 +                                              channel_id,
 +                                              data: err.to_string()
 +                                      },
 +                              },
 +                      },
 +                      shutdown_finish: None,
 +              }
 +      }
 +      #[inline]
 +      fn ignore_no_close(err: &'static str) -> Self {
 +              Self {
 +                      err: LightningError {
 +                              err,
 +                              action: msgs::ErrorAction::IgnoreError,
 +                      },
 +                      shutdown_finish: None,
 +              }
 +      }
 +      #[inline]
 +      fn from_no_close(err: msgs::LightningError) -> Self {
 +              Self { err, shutdown_finish: None }
 +      }
 +      #[inline]
 +      fn from_finish_shutdown(err: &'static str, channel_id: [u8; 32], shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>) -> Self {
 +              Self {
 +                      err: LightningError {
 +                              err,
 +                              action: msgs::ErrorAction::SendErrorMessage {
 +                                      msg: msgs::ErrorMessage {
 +                                              channel_id,
 +                                              data: err.to_string()
 +                                      },
 +                              },
 +                      },
 +                      shutdown_finish: Some((shutdown_res, channel_update)),
 +              }
 +      }
 +      #[inline]
 +      fn from_chan_no_close(err: ChannelError, channel_id: [u8; 32]) -> Self {
 +              Self {
 +                      err: match err {
 +                              ChannelError::Ignore(msg) => LightningError {
 +                                      err: msg,
 +                                      action: msgs::ErrorAction::IgnoreError,
 +                              },
 +                              ChannelError::Close(msg) => LightningError {
 +                                      err: msg,
 +                                      action: msgs::ErrorAction::SendErrorMessage {
 +                                              msg: msgs::ErrorMessage {
 +                                                      channel_id,
 +                                                      data: msg.to_string()
 +                                              },
 +                                      },
 +                              },
 +                              ChannelError::CloseDelayBroadcast { msg, .. } => LightningError {
 +                                      err: msg,
 +                                      action: msgs::ErrorAction::SendErrorMessage {
 +                                              msg: msgs::ErrorMessage {
 +                                                      channel_id,
 +                                                      data: msg.to_string()
 +                                              },
 +                                      },
 +                              },
 +                      },
 +                      shutdown_finish: None,
 +              }
 +      }
 +}
 +
 +/// We hold back HTLCs we intend to relay for a random interval greater than this (see
 +/// Event::PendingHTLCsForwardable for the API guidelines indicating how long should be waited).
 +/// This provides some limited amount of privacy. Ideally this would range from somewhere like one
 +/// second to 30 seconds, but people expect lightning to be, you know, kinda fast, sadly.
 +const MIN_HTLC_RELAY_HOLDING_CELL_MILLIS: u64 = 100;
 +
 +pub(super) enum HTLCForwardInfo {
 +      AddHTLC {
 +              prev_short_channel_id: u64,
 +              prev_htlc_id: u64,
 +              forward_info: PendingForwardHTLCInfo,
 +      },
 +      FailHTLC {
 +              htlc_id: u64,
 +              err_packet: msgs::OnionErrorPacket,
 +      },
 +}
 +
 +/// For events which result in both a RevokeAndACK and a CommitmentUpdate, by default they should
 +/// be sent in the order they appear in the return value, however sometimes the order needs to be
 +/// variable at runtime (eg Channel::channel_reestablish needs to re-send messages in the order
 +/// they were originally sent). In those cases, this enum is also returned.
 +#[derive(Clone, PartialEq)]
 +pub(super) enum RAACommitmentOrder {
 +      /// Send the CommitmentUpdate messages first
 +      CommitmentFirst,
 +      /// Send the RevokeAndACK message first
 +      RevokeAndACKFirst,
 +}
 +
 +// Note this is only exposed in cfg(test):
 +pub(super) struct ChannelHolder {
 +      pub(super) by_id: HashMap<[u8; 32], Channel>,
 +      pub(super) short_to_id: HashMap<u64, [u8; 32]>,
 +      /// short channel id -> forward infos. Key of 0 means payments received
 +      /// Note that while this is held in the same mutex as the channels themselves, no consistency
 +      /// guarantees are made about the existence of a channel with the short id here, nor the short
 +      /// ids in the PendingForwardHTLCInfo!
 +      pub(super) forward_htlcs: HashMap<u64, Vec<HTLCForwardInfo>>,
 +      /// payment_hash -> Vec<(amount_received, htlc_source)> for tracking things that were to us and
 +      /// can be failed/claimed by the user
 +      /// Note that while this is held in the same mutex as the channels themselves, no consistency
 +      /// guarantees are made about the channels given here actually existing anymore by the time you
 +      /// go to read them!
 +      pub(super) claimable_htlcs: HashMap<PaymentHash, Vec<(u64, HTLCPreviousHopData)>>,
 +      /// Messages to send to peers - pushed to in the same lock that they are generated in (except
 +      /// for broadcast messages, where ordering isn't as strict).
 +      pub(super) pending_msg_events: Vec<events::MessageSendEvent>,
 +}
 +pub(super) struct MutChannelHolder<'a> {
 +      pub(super) by_id: &'a mut HashMap<[u8; 32], Channel>,
 +      pub(super) short_to_id: &'a mut HashMap<u64, [u8; 32]>,
 +      pub(super) forward_htlcs: &'a mut HashMap<u64, Vec<HTLCForwardInfo>>,
 +      pub(super) claimable_htlcs: &'a mut HashMap<PaymentHash, Vec<(u64, HTLCPreviousHopData)>>,
 +      pub(super) pending_msg_events: &'a mut Vec<events::MessageSendEvent>,
 +}
 +impl ChannelHolder {
 +      pub(super) fn borrow_parts(&mut self) -> MutChannelHolder {
 +              MutChannelHolder {
 +                      by_id: &mut self.by_id,
 +                      short_to_id: &mut self.short_to_id,
 +                      forward_htlcs: &mut self.forward_htlcs,
 +                      claimable_htlcs: &mut self.claimable_htlcs,
 +                      pending_msg_events: &mut self.pending_msg_events,
 +              }
 +      }
 +}
 +
 +#[cfg(not(any(target_pointer_width = "32", target_pointer_width = "64")))]
 +const ERR: () = "You need at least 32 bit pointers (well, usize, but we'll assume they're the same) for ChannelManager::latest_block_height";
 +
 +/// Manager which keeps track of a number of channels and sends messages to the appropriate
 +/// channel, also tracking HTLC preimages and forwarding onion packets appropriately.
 +///
 +/// Implements ChannelMessageHandler, handling the multi-channel parts and passing things through
 +/// to individual Channels.
 +///
 +/// Implements Writeable to write out all channel state to disk. Implies peer_disconnected() for
 +/// all peers during write/read (though does not modify this instance, only the instance being
 +/// serialized). This will result in any channels which have not yet exchanged funding_created (ie
 +/// called funding_transaction_generated for outbound channels).
 +///
 +/// Note that you can be a bit lazier about writing out ChannelManager than you can be with
 +/// ChannelMonitors. With ChannelMonitors you MUST write each monitor update out to disk before
 +/// returning from ManyChannelMonitor::add_update_monitor, with ChannelManagers, writing updates
 +/// happens out-of-band (and will prevent any other ChannelManager operations from occurring during
 +/// the serialization process). If the deserialized version is out-of-date compared to the
 +/// ChannelMonitors passed by reference to read(), those channels will be force-closed based on the
 +/// ChannelMonitor state and no funds will be lost (mod on-chain transaction fees).
 +///
 +/// Note that the deserializer is only implemented for (Sha256dHash, ChannelManager), which
 +/// tells you the last block hash which was block_connect()ed. You MUST rescan any blocks along
 +/// the "reorg path" (ie call block_disconnected() until you get to a common block and then call
 +/// block_connected() to step towards your best block) upon deserialization before using the
 +/// object!
 +pub struct ChannelManager {
 +      default_configuration: UserConfig,
 +      genesis_hash: Sha256dHash,
 +      fee_estimator: Arc<FeeEstimator>,
 +      monitor: Arc<ManyChannelMonitor>,
 +      chain_monitor: Arc<ChainWatchInterface>,
 +      tx_broadcaster: Arc<BroadcasterInterface>,
 +
 +      #[cfg(test)]
 +      pub(super) latest_block_height: AtomicUsize,
 +      #[cfg(not(test))]
 +      latest_block_height: AtomicUsize,
 +      last_block_hash: Mutex<Sha256dHash>,
 +      secp_ctx: Secp256k1<secp256k1::All>,
 +
 +      #[cfg(test)]
 +      pub(super) channel_state: Mutex<ChannelHolder>,
 +      #[cfg(not(test))]
 +      channel_state: Mutex<ChannelHolder>,
 +      our_network_key: SecretKey,
 +
 +      pending_events: Mutex<Vec<events::Event>>,
 +      /// Used when we have to take a BIG lock to make sure everything is self-consistent.
 +      /// Essentially just when we're serializing ourselves out.
 +      /// Taken first everywhere where we are making changes before any other locks.
 +      total_consistency_lock: RwLock<()>,
 +
 +      keys_manager: Arc<KeysInterface>,
 +
 +      logger: Arc<Logger>,
 +}
 +
 +/// The amount of time we require our counterparty wait to claim their money (ie time between when
 +/// we, or our watchtower, must check for them having broadcast a theft transaction).
 +pub(crate) const BREAKDOWN_TIMEOUT: u16 = 6 * 24;
 +/// The amount of time we're willing to wait to claim money back to us
 +pub(crate) const MAX_LOCAL_BREAKDOWN_TIMEOUT: u16 = 6 * 24 * 7;
 +
 +/// The minimum number of blocks between an inbound HTLC's CLTV and the corresponding outbound
 +/// HTLC's CLTV. This should always be a few blocks greater than channelmonitor::CLTV_CLAIM_BUFFER,
 +/// ie the node we forwarded the payment on to should always have enough room to reliably time out
 +/// the HTLC via a full update_fail_htlc/commitment_signed dance before we hit the
 +/// CLTV_CLAIM_BUFFER point (we static assert that it's at least 3 blocks more).
 +const CLTV_EXPIRY_DELTA: u16 = 6 * 12; //TODO?
 +pub(super) const CLTV_FAR_FAR_AWAY: u32 = 6 * 24 * 7; //TODO?
 +
 +// Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + ANTI_REORG_DELAY + LATENCY_GRACE_PERIOD_BLOCKS,
 +// ie that if the next-hop peer fails the HTLC within
 +// LATENCY_GRACE_PERIOD_BLOCKS then we'll still have CLTV_CLAIM_BUFFER left to timeout it onchain,
 +// then waiting ANTI_REORG_DELAY to be reorg-safe on the outbound HLTC and
 +// failing the corresponding htlc backward, and us now seeing the last block of ANTI_REORG_DELAY before
 +// LATENCY_GRACE_PERIOD_BLOCKS.
 +#[deny(const_err)]
 +#[allow(dead_code)]
 +const CHECK_CLTV_EXPIRY_SANITY: u32 = CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - CLTV_CLAIM_BUFFER - ANTI_REORG_DELAY - LATENCY_GRACE_PERIOD_BLOCKS;
 +
 +// Check for ability of an attacker to make us fail on-chain by delaying inbound claim. See
 +// ChannelMontior::would_broadcast_at_height for a description of why this is needed.
 +#[deny(const_err)]
 +#[allow(dead_code)]
 +const CHECK_CLTV_EXPIRY_SANITY_2: u32 = CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER;
 +
 +macro_rules! secp_call {
 +      ( $res: expr, $err: expr ) => {
 +              match $res {
 +                      Ok(key) => key,
 +                      Err(_) => return Err($err),
 +              }
 +      };
 +}
 +
 +/// Details of a channel, as returned by ChannelManager::list_channels and ChannelManager::list_usable_channels
 +pub struct ChannelDetails {
 +      /// The channel's ID (prior to funding transaction generation, this is a random 32 bytes,
 +      /// thereafter this is the txid of the funding transaction xor the funding transaction output).
 +      /// Note that this means this value is *not* persistent - it can change once during the
 +      /// lifetime of the channel.
 +      pub channel_id: [u8; 32],
 +      /// The position of the funding transaction in the chain. None if the funding transaction has
 +      /// not yet been confirmed and the channel fully opened.
 +      pub short_channel_id: Option<u64>,
 +      /// The node_id of our counterparty
 +      pub remote_network_id: PublicKey,
 +      /// The value, in satoshis, of this channel as appears in the funding output
 +      pub channel_value_satoshis: u64,
 +      /// The user_id passed in to create_channel, or 0 if the channel was inbound.
 +      pub user_id: u64,
 +      /// The available outbound capacity for sending HTLCs to the remote peer. This does not include
 +      /// any pending HTLCs which are not yet fully resolved (and, thus, who's balance is not
 +      /// available for inclusion in new outbound HTLCs). This further does not include any pending
 +      /// outgoing HTLCs which are awaiting some other resolution to be sent.
 +      pub outbound_capacity_msat: u64,
 +      /// The available inbound capacity for the remote peer to send HTLCs to us. This does not
 +      /// include any pending HTLCs which are not yet fully resolved (and, thus, who's balance is not
 +      /// available for inclusion in new inbound HTLCs).
 +      /// Note that there are some corner cases not fully handled here, so the actual available
 +      /// inbound capacity may be slightly higher than this.
 +      pub inbound_capacity_msat: u64,
 +      /// True if the channel is (a) confirmed and funding_locked messages have been exchanged, (b)
 +      /// the peer is connected, and (c) no monitor update failure is pending resolution.
 +      pub is_live: bool,
 +}
 +
 +macro_rules! handle_error {
 +      ($self: ident, $internal: expr) => {
 +              match $internal {
 +                      Ok(msg) => Ok(msg),
 +                      Err(MsgHandleErrInternal { err, shutdown_finish }) => {
 +                              if let Some((shutdown_res, update_option)) = shutdown_finish {
 +                                      $self.finish_force_close_channel(shutdown_res);
 +                                      if let Some(update) = update_option {
 +                                              let mut channel_state = $self.channel_state.lock().unwrap();
 +                                              channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
 +                                                      msg: update
 +                                              });
 +                                      }
 +                              }
 +                              Err(err)
 +                      },
 +              }
 +      }
 +}
 +
 +macro_rules! break_chan_entry {
 +      ($self: ident, $res: expr, $channel_state: expr, $entry: expr) => {
 +              match $res {
 +                      Ok(res) => res,
 +                      Err(ChannelError::Ignore(msg)) => {
 +                              break Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $entry.key().clone()))
 +                      },
 +                      Err(ChannelError::Close(msg)) => {
 +                              log_trace!($self, "Closing channel {} due to Close-required error: {}", log_bytes!($entry.key()[..]), msg);
 +                              let (channel_id, mut chan) = $entry.remove_entry();
 +                              if let Some(short_id) = chan.get_short_channel_id() {
 +                                      $channel_state.short_to_id.remove(&short_id);
 +                              }
 +                              break Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok()))
 +                      },
 +                      Err(ChannelError::CloseDelayBroadcast { .. }) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); }
 +              }
 +      }
 +}
 +
 +macro_rules! try_chan_entry {
 +      ($self: ident, $res: expr, $channel_state: expr, $entry: expr) => {
 +              match $res {
 +                      Ok(res) => res,
 +                      Err(ChannelError::Ignore(msg)) => {
 +                              return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $entry.key().clone()))
 +                      },
 +                      Err(ChannelError::Close(msg)) => {
 +                              log_trace!($self, "Closing channel {} due to Close-required error: {}", log_bytes!($entry.key()[..]), msg);
 +                              let (channel_id, mut chan) = $entry.remove_entry();
 +                              if let Some(short_id) = chan.get_short_channel_id() {
 +                                      $channel_state.short_to_id.remove(&short_id);
 +                              }
 +                              return Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok()))
 +                      },
 +                      Err(ChannelError::CloseDelayBroadcast { msg, update }) => {
 +                              log_error!($self, "Channel {} need to be shutdown but closing transactions not broadcast due to {}", log_bytes!($entry.key()[..]), msg);
 +                              let (channel_id, mut chan) = $entry.remove_entry();
 +                              if let Some(short_id) = chan.get_short_channel_id() {
 +                                      $channel_state.short_to_id.remove(&short_id);
 +                              }
 +                              if let Some(update) = update {
 +                                      if let Err(e) = $self.monitor.add_update_monitor(update.get_funding_txo().unwrap(), update) {
 +                                              match e {
 +                                                      // Upstream channel is dead, but we want at least to fail backward HTLCs to save
 +                                                      // downstream channels. In case of PermanentFailure, we are not going to be able
 +                                                      // to claim back to_remote output on remote commitment transaction. Doesn't
 +                                                      // make a difference here, we are concern about HTLCs circuit, not onchain funds.
 +                                                      ChannelMonitorUpdateErr::PermanentFailure => {},
 +                                                      ChannelMonitorUpdateErr::TemporaryFailure => {},
 +                                              }
 +                                      }
 +                              }
 +                              let mut shutdown_res = chan.force_shutdown();
 +                              if shutdown_res.0.len() >= 1 {
 +                                      log_error!($self, "You have a toxic local commitment transaction {} avaible in channel monitor, read comment in ChannelMonitor::get_latest_local_commitment_txn to be informed of manual action to take", shutdown_res.0[0].txid());
 +                              }
 +                              shutdown_res.0.clear();
 +                              return Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, shutdown_res, $self.get_channel_update(&chan).ok()))
 +                      }
 +              }
 +      }
 +}
 +
 +macro_rules! handle_monitor_err {
 +      ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
 +              handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, Vec::new(), Vec::new())
 +      };
 +      ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => {
 +              match $err {
 +                      ChannelMonitorUpdateErr::PermanentFailure => {
 +                              log_error!($self, "Closing channel {} due to monitor update PermanentFailure", log_bytes!($entry.key()[..]));
 +                              let (channel_id, mut chan) = $entry.remove_entry();
 +                              if let Some(short_id) = chan.get_short_channel_id() {
 +                                      $channel_state.short_to_id.remove(&short_id);
 +                              }
 +                              // TODO: $failed_fails is dropped here, which will cause other channels to hit the
 +                              // chain in a confused state! We need to move them into the ChannelMonitor which
 +                              // will be responsible for failing backwards once things confirm on-chain.
 +                              // It's ok that we drop $failed_forwards here - at this point we'd rather they
 +                              // broadcast HTLC-Timeout and pay the associated fees to get their funds back than
 +                              // us bother trying to claim it just to forward on to another peer. If we're
 +                              // splitting hairs we'd prefer to claim payments that were to us, but we haven't
 +                              // given up the preimage yet, so might as well just wait until the payment is
 +                              // retried, avoiding the on-chain fees.
 +                              let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok()));
 +                              res
 +                      },
 +                      ChannelMonitorUpdateErr::TemporaryFailure => {
 +                              log_info!($self, "Disabling channel {} due to monitor update TemporaryFailure. On restore will send {} and process {} forwards and {} fails",
 +                                              log_bytes!($entry.key()[..]),
 +                                              if $resend_commitment && $resend_raa {
 +                                                              match $action_type {
 +                                                                      RAACommitmentOrder::CommitmentFirst => { "commitment then RAA" },
 +                                                                      RAACommitmentOrder::RevokeAndACKFirst => { "RAA then commitment" },
 +                                                              }
 +                                                      } else if $resend_commitment { "commitment" }
 +                                                      else if $resend_raa { "RAA" }
 +                                                      else { "nothing" },
 +                                              (&$failed_forwards as &Vec<(PendingForwardHTLCInfo, u64)>).len(),
 +                                              (&$failed_fails as &Vec<(HTLCSource, PaymentHash, HTLCFailReason)>).len());
 +                              if !$resend_commitment {
 +                                      debug_assert!($action_type == RAACommitmentOrder::RevokeAndACKFirst || !$resend_raa);
 +                              }
 +                              if !$resend_raa {
 +                                      debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst || !$resend_commitment);
 +                              }
 +                              $entry.get_mut().monitor_update_failed($resend_raa, $resend_commitment, $failed_forwards, $failed_fails);
 +                              Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor"), *$entry.key()))
 +                      },
 +              }
 +      }
 +}
 +
 +macro_rules! return_monitor_err {
 +      ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
 +              return handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment);
 +      };
 +      ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => {
 +              return handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails);
 +      }
 +}
 +
 +// Does not break in case of TemporaryFailure!
 +macro_rules! maybe_break_monitor_err {
 +      ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
 +              match (handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment), $err) {
 +                      (e, ChannelMonitorUpdateErr::PermanentFailure) => {
 +                              break e;
 +                      },
 +                      (_, ChannelMonitorUpdateErr::TemporaryFailure) => { },
 +              }
 +      }
 +}
 +
 +impl ChannelManager {
 +      /// Constructs a new ChannelManager to hold several channels and route between them.
 +      ///
 +      /// This is the main "logic hub" for all channel-related actions, and implements
 +      /// ChannelMessageHandler.
 +      ///
 +      /// Non-proportional fees are fixed according to our risk using the provided fee estimator.
 +      ///
 +      /// panics if channel_value_satoshis is >= `MAX_FUNDING_SATOSHIS`!
 +      ///
 +      /// User must provide the current blockchain height from which to track onchain channel
 +      /// funding outpoints and send payments with reliable timelocks.
 +      pub fn new(network: Network, feeest: Arc<FeeEstimator>, monitor: Arc<ManyChannelMonitor>, chain_monitor: Arc<ChainWatchInterface>, tx_broadcaster: Arc<BroadcasterInterface>, logger: Arc<Logger>,keys_manager: Arc<KeysInterface>, config: UserConfig, current_blockchain_height: usize) -> Result<Arc<ChannelManager>, secp256k1::Error> {
 +              let secp_ctx = Secp256k1::new();
 +
 +              let res = Arc::new(ChannelManager {
 +                      default_configuration: config.clone(),
 +                      genesis_hash: genesis_block(network).header.bitcoin_hash(),
 +                      fee_estimator: feeest.clone(),
 +                      monitor: monitor.clone(),
 +                      chain_monitor,
 +                      tx_broadcaster,
 +
 +                      latest_block_height: AtomicUsize::new(current_blockchain_height),
 +                      last_block_hash: Mutex::new(Default::default()),
 +                      secp_ctx,
 +
 +                      channel_state: Mutex::new(ChannelHolder{
 +                              by_id: HashMap::new(),
 +                              short_to_id: HashMap::new(),
 +                              forward_htlcs: HashMap::new(),
 +                              claimable_htlcs: HashMap::new(),
 +                              pending_msg_events: Vec::new(),
 +                      }),
 +                      our_network_key: keys_manager.get_node_secret(),
 +
 +                      pending_events: Mutex::new(Vec::new()),
 +                      total_consistency_lock: RwLock::new(()),
 +
 +                      keys_manager,
 +
 +                      logger,
 +              });
 +              let weak_res = Arc::downgrade(&res);
 +              res.chain_monitor.register_listener(weak_res);
 +              Ok(res)
 +      }
 +
 +      /// Creates a new outbound channel to the given remote node and with the given value.
 +      ///
 +      /// user_id will be provided back as user_channel_id in FundingGenerationReady and
 +      /// FundingBroadcastSafe events to allow tracking of which events correspond with which
 +      /// create_channel call. Note that user_channel_id defaults to 0 for inbound channels, so you
 +      /// may wish to avoid using 0 for user_id here.
 +      ///
 +      /// If successful, will generate a SendOpenChannel message event, so you should probably poll
 +      /// PeerManager::process_events afterwards.
 +      ///
 +      /// Raises APIError::APIMisuseError when channel_value_satoshis > 2**24 or push_msat is
 +      /// greater than channel_value_satoshis * 1k or channel_value_satoshis is < 1000.
 +      pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_id: u64) -> Result<(), APIError> {
 +              if channel_value_satoshis < 1000 {
 +                      return Err(APIError::APIMisuseError { err: "channel_value must be at least 1000 satoshis" });
 +              }
 +
 +              let channel = Channel::new_outbound(&*self.fee_estimator, &self.keys_manager, their_network_key, channel_value_satoshis, push_msat, user_id, Arc::clone(&self.logger), &self.default_configuration)?;
 +              let res = channel.get_open_channel(self.genesis_hash.clone(), &*self.fee_estimator);
 +
 +              let _ = self.total_consistency_lock.read().unwrap();
 +              let mut channel_state = self.channel_state.lock().unwrap();
 +              match channel_state.by_id.entry(channel.channel_id()) {
 +                      hash_map::Entry::Occupied(_) => {
 +                              if cfg!(feature = "fuzztarget") {
 +                                      return Err(APIError::APIMisuseError { err: "Fuzzy bad RNG" });
 +                              } else {
 +                                      panic!("RNG is bad???");
 +                              }
 +                      },
 +                      hash_map::Entry::Vacant(entry) => { entry.insert(channel); }
 +              }
 +              channel_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
 +                      node_id: their_network_key,
 +                      msg: res,
 +              });
 +              Ok(())
 +      }
 +
 +      /// Gets the list of open channels, in random order. See ChannelDetail field documentation for
 +      /// more information.
 +      pub fn list_channels(&self) -> Vec<ChannelDetails> {
 +              let channel_state = self.channel_state.lock().unwrap();
 +              let mut res = Vec::with_capacity(channel_state.by_id.len());
 +              for (channel_id, channel) in channel_state.by_id.iter() {
 +                      let (inbound_capacity_msat, outbound_capacity_msat) = channel.get_inbound_outbound_available_balance_msat();
 +                      res.push(ChannelDetails {
 +                              channel_id: (*channel_id).clone(),
 +                              short_channel_id: channel.get_short_channel_id(),
 +                              remote_network_id: channel.get_their_node_id(),
 +                              channel_value_satoshis: channel.get_value_satoshis(),
 +                              inbound_capacity_msat,
 +                              outbound_capacity_msat,
 +                              user_id: channel.get_user_id(),
 +                              is_live: channel.is_live(),
 +                      });
 +              }
 +              res
 +      }
 +
 +      /// Gets the list of usable channels, in random order. Useful as an argument to
 +      /// Router::get_route to ensure non-announced channels are used.
 +      ///
 +      /// These are guaranteed to have their is_live value set to true, see the documentation for
 +      /// ChannelDetails::is_live for more info on exactly what the criteria are.
 +      pub fn list_usable_channels(&self) -> Vec<ChannelDetails> {
 +              let channel_state = self.channel_state.lock().unwrap();
 +              let mut res = Vec::with_capacity(channel_state.by_id.len());
 +              for (channel_id, channel) in channel_state.by_id.iter() {
 +                      // Note we use is_live here instead of usable which leads to somewhat confused
 +                      // internal/external nomenclature, but that's ok cause that's probably what the user
 +                      // really wanted anyway.
 +                      if channel.is_live() {
 +                              let (inbound_capacity_msat, outbound_capacity_msat) = channel.get_inbound_outbound_available_balance_msat();
 +                              res.push(ChannelDetails {
 +                                      channel_id: (*channel_id).clone(),
 +                                      short_channel_id: channel.get_short_channel_id(),
 +                                      remote_network_id: channel.get_their_node_id(),
 +                                      channel_value_satoshis: channel.get_value_satoshis(),
 +                                      inbound_capacity_msat,
 +                                      outbound_capacity_msat,
 +                                      user_id: channel.get_user_id(),
 +                                      is_live: true,
 +                              });
 +                      }
 +              }
 +              res
 +      }
 +
 +      /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs
 +      /// will be accepted on the given channel, and after additional timeout/the closing of all
 +      /// pending HTLCs, the channel will be closed on chain.
 +      ///
 +      /// May generate a SendShutdown message event on success, which should be relayed.
 +      pub fn close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> {
 +              let _ = self.total_consistency_lock.read().unwrap();
 +
 +              let (mut failed_htlcs, chan_option) = {
 +                      let mut channel_state_lock = self.channel_state.lock().unwrap();
 +                      let channel_state = channel_state_lock.borrow_parts();
 +                      match channel_state.by_id.entry(channel_id.clone()) {
 +                              hash_map::Entry::Occupied(mut chan_entry) => {
 +                                      let (shutdown_msg, failed_htlcs) = chan_entry.get_mut().get_shutdown()?;
 +                                      channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
 +                                              node_id: chan_entry.get().get_their_node_id(),
 +                                              msg: shutdown_msg
 +                                      });
 +                                      if chan_entry.get().is_shutdown() {
 +                                              if let Some(short_id) = chan_entry.get().get_short_channel_id() {
 +                                                      channel_state.short_to_id.remove(&short_id);
 +                                              }
 +                                              (failed_htlcs, Some(chan_entry.remove_entry().1))
 +                                      } else { (failed_htlcs, None) }
 +                              },
 +                              hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable{err: "No such channel"})
 +                      }
 +              };
 +              for htlc_source in failed_htlcs.drain(..) {
 +                      self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
 +              }
 +              let chan_update = if let Some(chan) = chan_option {
 +                      if let Ok(update) = self.get_channel_update(&chan) {
 +                              Some(update)
 +                      } else { None }
 +              } else { None };
 +
 +              if let Some(update) = chan_update {
 +                      let mut channel_state = self.channel_state.lock().unwrap();
 +                      channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
 +                              msg: update
 +                      });
 +              }
 +
 +              Ok(())
 +      }
 +
 +      #[inline]
 +      fn finish_force_close_channel(&self, shutdown_res: ShutdownResult) {
 +              let (local_txn, mut failed_htlcs) = shutdown_res;
 +              log_trace!(self, "Finishing force-closure of channel with {} transactions to broadcast and {} HTLCs to fail", local_txn.len(), failed_htlcs.len());
 +              for htlc_source in failed_htlcs.drain(..) {
 +                      self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
 +              }
 +              for tx in local_txn {
 +                      self.tx_broadcaster.broadcast_transaction(&tx);
 +              }
 +      }
 +
 +      /// Force closes a channel, immediately broadcasting the latest local commitment transaction to
 +      /// the chain and rejecting new HTLCs on the given channel.
 +      pub fn force_close_channel(&self, channel_id: &[u8; 32]) {
 +              let _ = self.total_consistency_lock.read().unwrap();
 +
 +              let mut chan = {
 +                      let mut channel_state_lock = self.channel_state.lock().unwrap();
 +                      let channel_state = channel_state_lock.borrow_parts();
 +                      if let Some(chan) = channel_state.by_id.remove(channel_id) {
 +                              if let Some(short_id) = chan.get_short_channel_id() {
 +                                      channel_state.short_to_id.remove(&short_id);
 +                              }
 +                              chan
 +                      } else {
 +                              return;
 +                      }
 +              };
 +              log_trace!(self, "Force-closing channel {}", log_bytes!(channel_id[..]));
 +              self.finish_force_close_channel(chan.force_shutdown());
 +              if let Ok(update) = self.get_channel_update(&chan) {
 +                      let mut channel_state = self.channel_state.lock().unwrap();
 +                      channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
 +                              msg: update
 +                      });
 +              }
 +      }
 +
 +      /// Force close all channels, immediately broadcasting the latest local commitment transaction
 +      /// for each to the chain and rejecting new HTLCs on each.
 +      pub fn force_close_all_channels(&self) {
 +              for chan in self.list_channels() {
 +                      self.force_close_channel(&chan.channel_id);
 +              }
 +      }
 +
 +      const ZERO:[u8; 65] = [0; 65];
 +      fn decode_update_add_htlc_onion(&self, msg: &msgs::UpdateAddHTLC) -> (PendingHTLCStatus, MutexGuard<ChannelHolder>) {
 +              macro_rules! return_malformed_err {
 +                      ($msg: expr, $err_code: expr) => {
 +                              {
 +                                      log_info!(self, "Failed to accept/forward incoming HTLC: {}", $msg);
 +                                      return (PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
 +                                              channel_id: msg.channel_id,
 +                                              htlc_id: msg.htlc_id,
 +                                              sha256_of_onion: Sha256::hash(&msg.onion_routing_packet.hop_data).into_inner(),
 +                                              failure_code: $err_code,
 +                                      })), self.channel_state.lock().unwrap());
 +                              }
 +                      }
 +              }
 +
 +              if let Err(_) = msg.onion_routing_packet.public_key {
 +                      return_malformed_err!("invalid ephemeral pubkey", 0x8000 | 0x4000 | 6);
 +              }
 +
 +              let shared_secret = {
 +                      let mut arr = [0; 32];
 +                      arr.copy_from_slice(&SharedSecret::new(&msg.onion_routing_packet.public_key.unwrap(), &self.our_network_key)[..]);
 +                      arr
 +              };
 +              let (rho, mu) = onion_utils::gen_rho_mu_from_shared_secret(&shared_secret);
 +
 +              if msg.onion_routing_packet.version != 0 {
 +                      //TODO: Spec doesn't indicate if we should only hash hop_data here (and in other
 +                      //sha256_of_onion error data packets), or the entire onion_routing_packet. Either way,
 +                      //the hash doesn't really serve any purpose - in the case of hashing all data, the
 +                      //receiving node would have to brute force to figure out which version was put in the
 +                      //packet by the node that send us the message, in the case of hashing the hop_data, the
 +                      //node knows the HMAC matched, so they already know what is there...
 +                      return_malformed_err!("Unknown onion packet version", 0x8000 | 0x4000 | 4);
 +              }
 +
 +              let mut hmac = HmacEngine::<Sha256>::new(&mu);
 +              hmac.input(&msg.onion_routing_packet.hop_data);
 +              hmac.input(&msg.payment_hash.0[..]);
 +              if !fixed_time_eq(&Hmac::from_engine(hmac).into_inner(), &msg.onion_routing_packet.hmac) {
 +                      return_malformed_err!("HMAC Check failed", 0x8000 | 0x4000 | 5);
 +              }
 +
 +              let mut channel_state = None;
 +              macro_rules! return_err {
 +                      ($msg: expr, $err_code: expr, $data: expr) => {
 +                              {
 +                                      log_info!(self, "Failed to accept/forward incoming HTLC: {}", $msg);
 +                                      if channel_state.is_none() {
 +                                              channel_state = Some(self.channel_state.lock().unwrap());
 +                                      }
 +                                      return (PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
 +                                              channel_id: msg.channel_id,
 +                                              htlc_id: msg.htlc_id,
 +                                              reason: onion_utils::build_first_hop_failure_packet(&shared_secret, $err_code, $data),
 +                                      })), channel_state.unwrap());
 +                              }
 +                      }
 +              }
 +
 +              let mut chacha = ChaCha20::new(&rho, &[0u8; 8]);
 +              let next_hop_data = {
 +                      let mut decoded = [0; 65];
 +                      chacha.process(&msg.onion_routing_packet.hop_data[0..65], &mut decoded);
 +                      match msgs::OnionHopData::read(&mut Cursor::new(&decoded[..])) {
 +                              Err(err) => {
 +                                      let error_code = match err {
 +                                              msgs::DecodeError::UnknownVersion => 0x4000 | 1, // unknown realm byte
 +                                              _ => 0x2000 | 2, // Should never happen
 +                                      };
 +                                      return_err!("Unable to decode our hop data", error_code, &[0;0]);
 +                              },
 +                              Ok(msg) => msg
 +                      }
 +              };
 +
 +              let pending_forward_info = if next_hop_data.hmac == [0; 32] {
 +                              // OUR PAYMENT!
 +                              // final_expiry_too_soon
 +                              if (msg.cltv_expiry as u64) < self.latest_block_height.load(Ordering::Acquire) as u64 + (CLTV_CLAIM_BUFFER + LATENCY_GRACE_PERIOD_BLOCKS) as u64 {
 +                                      return_err!("The final CLTV expiry is too soon to handle", 17, &[0;0]);
 +                              }
 +                              // final_incorrect_htlc_amount
 +                              if next_hop_data.data.amt_to_forward > msg.amount_msat {
 +                                      return_err!("Upstream node sent less than we were supposed to receive in payment", 19, &byte_utils::be64_to_array(msg.amount_msat));
 +                              }
 +                              // final_incorrect_cltv_expiry
 +                              if next_hop_data.data.outgoing_cltv_value != msg.cltv_expiry {
 +                                      return_err!("Upstream node set CLTV to the wrong value", 18, &byte_utils::be32_to_array(msg.cltv_expiry));
 +                              }
 +
 +                              // Note that we could obviously respond immediately with an update_fulfill_htlc
 +                              // message, however that would leak that we are the recipient of this payment, so
 +                              // instead we stay symmetric with the forwarding case, only responding (after a
 +                              // delay) once they've send us a commitment_signed!
 +
 +                              PendingHTLCStatus::Forward(PendingForwardHTLCInfo {
 +                                      onion_packet: None,
 +                                      payment_hash: msg.payment_hash.clone(),
 +                                      short_channel_id: 0,
 +                                      incoming_shared_secret: shared_secret,
 +                                      amt_to_forward: next_hop_data.data.amt_to_forward,
 +                                      outgoing_cltv_value: next_hop_data.data.outgoing_cltv_value,
 +                              })
 +                      } else {
 +                              let mut new_packet_data = [0; 20*65];
 +                              chacha.process(&msg.onion_routing_packet.hop_data[65..], &mut new_packet_data[0..19*65]);
 +                              chacha.process(&ChannelManager::ZERO[..], &mut new_packet_data[19*65..]);
 +
 +                              let mut new_pubkey = msg.onion_routing_packet.public_key.unwrap();
 +
 +                              let blinding_factor = {
 +                                      let mut sha = Sha256::engine();
 +                                      sha.input(&new_pubkey.serialize()[..]);
 +                                      sha.input(&shared_secret);
 +                                      Sha256::from_engine(sha).into_inner()
 +                              };
 +
 +                              let public_key = if let Err(e) = new_pubkey.mul_assign(&self.secp_ctx, &blinding_factor[..]) {
 +                                      Err(e)
 +                              } else { Ok(new_pubkey) };
 +
 +                              let outgoing_packet = msgs::OnionPacket {
 +                                      version: 0,
 +                                      public_key,
 +                                      hop_data: new_packet_data,
 +                                      hmac: next_hop_data.hmac.clone(),
 +                              };
 +
 +                              PendingHTLCStatus::Forward(PendingForwardHTLCInfo {
 +                                      onion_packet: Some(outgoing_packet),
 +                                      payment_hash: msg.payment_hash.clone(),
 +                                      short_channel_id: next_hop_data.data.short_channel_id,
 +                                      incoming_shared_secret: shared_secret,
 +                                      amt_to_forward: next_hop_data.data.amt_to_forward,
 +                                      outgoing_cltv_value: next_hop_data.data.outgoing_cltv_value,
 +                              })
 +                      };
 +
 +              channel_state = Some(self.channel_state.lock().unwrap());
 +              if let &PendingHTLCStatus::Forward(PendingForwardHTLCInfo { ref onion_packet, ref short_channel_id, ref amt_to_forward, ref outgoing_cltv_value, .. }) = &pending_forward_info {
 +                      if onion_packet.is_some() { // If short_channel_id is 0 here, we'll reject them in the body here
 +                              let id_option = channel_state.as_ref().unwrap().short_to_id.get(&short_channel_id).cloned();
 +                              let forwarding_id = match id_option {
 +                                      None => { // unknown_next_peer
 +                                              return_err!("Don't have available channel for forwarding as requested.", 0x4000 | 10, &[0;0]);
 +                                      },
 +                                      Some(id) => id.clone(),
 +                              };
 +                              if let Some((err, code, chan_update)) = loop {
 +                                      let chan = channel_state.as_mut().unwrap().by_id.get_mut(&forwarding_id).unwrap();
 +
 +                                      // Note that we could technically not return an error yet here and just hope
 +                                      // that the connection is reestablished or monitor updated by the time we get
 +                                      // around to doing the actual forward, but better to fail early if we can and
 +                                      // hopefully an attacker trying to path-trace payments cannot make this occur
 +                                      // on a small/per-node/per-channel scale.
 +                                      if !chan.is_live() { // channel_disabled
 +                                              break Some(("Forwarding channel is not in a ready state.", 0x1000 | 20, Some(self.get_channel_update(chan).unwrap())));
 +                                      }
 +                                      if *amt_to_forward < chan.get_their_htlc_minimum_msat() { // amount_below_minimum
 +                                              break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, Some(self.get_channel_update(chan).unwrap())));
 +                                      }
 +                                      let fee = amt_to_forward.checked_mul(chan.get_fee_proportional_millionths() as u64).and_then(|prop_fee| { (prop_fee / 1000000).checked_add(chan.get_our_fee_base_msat(&*self.fee_estimator) as u64) });
 +                                      if fee.is_none() || msg.amount_msat < fee.unwrap() || (msg.amount_msat - fee.unwrap()) < *amt_to_forward { // fee_insufficient
 +                                              break Some(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, Some(self.get_channel_update(chan).unwrap())));
 +                                      }
 +                                      if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + CLTV_EXPIRY_DELTA as u64 { // incorrect_cltv_expiry
 +                                              break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update(chan).unwrap())));
 +                                      }
 +                                      let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1;
 +                                      // We want to have at least LATENCY_GRACE_PERIOD_BLOCKS to fail prior to going on chain CLAIM_BUFFER blocks before expiration
 +                                      if msg.cltv_expiry <= cur_height + CLTV_CLAIM_BUFFER + LATENCY_GRACE_PERIOD_BLOCKS as u32 { // expiry_too_soon
 +                                              break Some(("CLTV expiry is too close", 0x1000 | 14, Some(self.get_channel_update(chan).unwrap())));
 +                                      }
 +                                      if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far
 +                                              break Some(("CLTV expiry is too far in the future", 21, None));
 +                                      }
 +                                      break None;
 +                              }
 +                              {
 +                                      let mut res = Vec::with_capacity(8 + 128);
 +                                      if let Some(chan_update) = chan_update {
 +                                              if code == 0x1000 | 11 || code == 0x1000 | 12 {
 +                                                      res.extend_from_slice(&byte_utils::be64_to_array(msg.amount_msat));
 +                                              }
 +                                              else if code == 0x1000 | 13 {
 +                                                      res.extend_from_slice(&byte_utils::be32_to_array(msg.cltv_expiry));
 +                                              }
 +                                              else if code == 0x1000 | 20 {
 +                                                      res.extend_from_slice(&byte_utils::be16_to_array(chan_update.contents.flags));
 +                                              }
 +                                              res.extend_from_slice(&chan_update.encode_with_len()[..]);
 +                                      }
 +                                      return_err!(err, code, &res[..]);
 +                              }
 +                      }
 +              }
 +
 +              (pending_forward_info, channel_state.unwrap())
 +      }
 +
 +      /// only fails if the channel does not yet have an assigned short_id
 +      /// May be called with channel_state already locked!
 +      fn get_channel_update(&self, chan: &Channel) -> Result<msgs::ChannelUpdate, LightningError> {
 +              let short_channel_id = match chan.get_short_channel_id() {
 +                      None => return Err(LightningError{err: "Channel not yet established", action: msgs::ErrorAction::IgnoreError}),
 +                      Some(id) => id,
 +              };
 +
 +              let were_node_one = PublicKey::from_secret_key(&self.secp_ctx, &self.our_network_key).serialize()[..] < chan.get_their_node_id().serialize()[..];
 +
 +              let unsigned = msgs::UnsignedChannelUpdate {
 +                      chain_hash: self.genesis_hash,
 +                      short_channel_id: short_channel_id,
 +                      timestamp: chan.get_channel_update_count(),
 +                      flags: (!were_node_one) as u16 | ((!chan.is_live() as u16) << 1),
 +                      cltv_expiry_delta: CLTV_EXPIRY_DELTA,
 +                      htlc_minimum_msat: chan.get_our_htlc_minimum_msat(),
 +                      fee_base_msat: chan.get_our_fee_base_msat(&*self.fee_estimator),
 +                      fee_proportional_millionths: chan.get_fee_proportional_millionths(),
 +                      excess_data: Vec::new(),
 +              };
 +
 +              let msg_hash = Sha256dHash::hash(&unsigned.encode()[..]);
 +              let sig = self.secp_ctx.sign(&hash_to_message!(&msg_hash[..]), &self.our_network_key);
 +
 +              Ok(msgs::ChannelUpdate {
 +                      signature: sig,
 +                      contents: unsigned
 +              })
 +      }
 +
 +      /// Sends a payment along a given route.
 +      ///
 +      /// Value parameters are provided via the last hop in route, see documentation for RouteHop
 +      /// fields for more info.
 +      ///
 +      /// Note that if the payment_hash already exists elsewhere (eg you're sending a duplicative
 +      /// payment), we don't do anything to stop you! We always try to ensure that if the provided
 +      /// next hop knows the preimage to payment_hash they can claim an additional amount as
 +      /// specified in the last hop in the route! Thus, you should probably do your own
 +      /// payment_preimage tracking (which you should already be doing as they represent "proof of
 +      /// payment") and prevent double-sends yourself.
 +      ///
 +      /// May generate a SendHTLCs message event on success, which should be relayed.
 +      ///
 +      /// Raises APIError::RoutError when invalid route or forward parameter
 +      /// (cltv_delta, fee, node public key) is specified.
 +      /// Raises APIError::ChannelUnavailable if the next-hop channel is not available for updates
 +      /// (including due to previous monitor update failure or new permanent monitor update failure).
 +      /// Raised APIError::MonitorUpdateFailed if a new monitor update failure prevented sending the
 +      /// relevant updates.
 +      ///
 +      /// In case of APIError::RouteError/APIError::ChannelUnavailable, the payment send has failed
 +      /// and you may wish to retry via a different route immediately.
 +      /// In case of APIError::MonitorUpdateFailed, the commitment update has been irrevocably
 +      /// committed on our end and we're just waiting for a monitor update to send it. Do NOT retry
 +      /// the payment via a different route unless you intend to pay twice!
 +      pub fn send_payment(&self, route: Route, payment_hash: PaymentHash) -> Result<(), APIError> {
 +              if route.hops.len() < 1 || route.hops.len() > 20 {
 +                      return Err(APIError::RouteError{err: "Route didn't go anywhere/had bogus size"});
 +              }
 +              let our_node_id = self.get_our_node_id();
 +              for (idx, hop) in route.hops.iter().enumerate() {
 +                      if idx != route.hops.len() - 1 && hop.pubkey == our_node_id {
 +                              return Err(APIError::RouteError{err: "Route went through us but wasn't a simple rebalance loop to us"});
 +                      }
 +              }
 +
 +              let session_priv = self.keys_manager.get_session_key();
 +
 +              let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1;
 +
 +              let onion_keys = secp_call!(onion_utils::construct_onion_keys(&self.secp_ctx, &route, &session_priv),
 +                              APIError::RouteError{err: "Pubkey along hop was maliciously selected"});
 +              let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route, cur_height)?;
 +              let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, &payment_hash);
 +
 +              let _ = self.total_consistency_lock.read().unwrap();
 +
 +              let err: Result<(), _> = loop {
 +                      let mut channel_lock = self.channel_state.lock().unwrap();
 +
 +                      let id = match channel_lock.short_to_id.get(&route.hops.first().unwrap().short_channel_id) {
 +                              None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!"}),
 +                              Some(id) => id.clone(),
 +                      };
 +
 +                      let channel_state = channel_lock.borrow_parts();
 +                      if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(id) {
 +                              match {
 +                                      if chan.get().get_their_node_id() != route.hops.first().unwrap().pubkey {
 +                                              return Err(APIError::RouteError{err: "Node ID mismatch on first hop!"});
 +                                      }
 +                                      if !chan.get().is_live() {
 +                                              return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!"});
 +                                      }
 +                                      break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute {
 +                                              route: route.clone(),
 +                                              session_priv: session_priv.clone(),
 +                                              first_hop_htlc_msat: htlc_msat,
 +                                      }, onion_packet), channel_state, chan)
 +                              } {
 +                                      Some((update_add, commitment_signed, chan_monitor)) => {
 +                                              if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
 +                                                      maybe_break_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true);
 +                                                      // Note that MonitorUpdateFailed here indicates (per function docs)
 +                                                      // that we will resent the commitment update once we unfree monitor
 +                                                      // updating, so we have to take special care that we don't return
 +                                                      // something else in case we will resend later!
 +                                                      return Err(APIError::MonitorUpdateFailed);
 +                                              }
 +
 +                                              channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
 +                                                      node_id: route.hops.first().unwrap().pubkey,
 +                                                      updates: msgs::CommitmentUpdate {
 +                                                              update_add_htlcs: vec![update_add],
 +                                                              update_fulfill_htlcs: Vec::new(),
 +                                                              update_fail_htlcs: Vec::new(),
 +                                                              update_fail_malformed_htlcs: Vec::new(),
 +                                                              update_fee: None,
 +                                                              commitment_signed,
 +                                                      },
 +                                              });
 +                                      },
 +                                      None => {},
 +                              }
 +                      } else { unreachable!(); }
 +                      return Ok(());
 +              };
 +
 +              match handle_error!(self, err) {
 +                      Ok(_) => unreachable!(),
 +                      Err(e) => {
 +                              if let msgs::ErrorAction::IgnoreError = e.action {
 +                              } else {
 +                                      log_error!(self, "Got bad keys: {}!", e.err);
 +                                      let mut channel_state = self.channel_state.lock().unwrap();
 +                                      channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
 +                                              node_id: route.hops.first().unwrap().pubkey,
 +                                              action: e.action,
 +                                      });
 +                              }
 +                              Err(APIError::ChannelUnavailable { err: e.err })
 +                      },
 +              }
 +      }
 +
 +      /// Call this upon creation of a funding transaction for the given channel.
 +      ///
 +      /// Note that ALL inputs in the transaction pointed to by funding_txo MUST spend SegWit outputs
 +      /// or your counterparty can steal your funds!
 +      ///
 +      /// Panics if a funding transaction has already been provided for this channel.
 +      ///
 +      /// May panic if the funding_txo is duplicative with some other channel (note that this should
 +      /// be trivially prevented by using unique funding transaction keys per-channel).
 +      pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], funding_txo: OutPoint) {
 +              let _ = self.total_consistency_lock.read().unwrap();
 +
 +              let (mut chan, msg, chan_monitor) = {
 +                      let (res, chan) = {
 +                              let mut channel_state = self.channel_state.lock().unwrap();
 +                              match channel_state.by_id.remove(temporary_channel_id) {
 +                                      Some(mut chan) => {
 +                                              (chan.get_outbound_funding_created(funding_txo)
 +                                                      .map_err(|e| if let ChannelError::Close(msg) = e {
 +                                                              MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.force_shutdown(), None)
 +                                                      } else { unreachable!(); })
 +                                              , chan)
 +                                      },
 +                                      None => return
 +                              }
 +                      };
 +                      match handle_error!(self, res) {
 +                              Ok(funding_msg) => {
 +                                      (chan, funding_msg.0, funding_msg.1)
 +                              },
 +                              Err(e) => {
 +                                      log_error!(self, "Got bad signatures: {}!", e.err);
 +                                      let mut channel_state = self.channel_state.lock().unwrap();
 +                                      channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
 +                                              node_id: chan.get_their_node_id(),
 +                                              action: e.action,
 +                                      });
 +                                      return;
 +                              },
 +                      }
 +              };
 +              // Because we have exclusive ownership of the channel here we can release the channel_state
 +              // lock before add_update_monitor
 +              if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
 +                      match e {
 +                              ChannelMonitorUpdateErr::PermanentFailure => {
 +                                      match handle_error!(self, Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", *temporary_channel_id, chan.force_shutdown(), None))) {
 +                                              Err(e) => {
 +                                                      log_error!(self, "Failed to store ChannelMonitor update for funding tx generation");
 +                                                      let mut channel_state = self.channel_state.lock().unwrap();
 +                                                      channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
 +                                                              node_id: chan.get_their_node_id(),
 +                                                              action: e.action,
 +                                                      });
 +                                                      return;
 +                                              },
 +                                              Ok(()) => unreachable!(),
 +                                      }
 +                              },
 +                              ChannelMonitorUpdateErr::TemporaryFailure => {
 +                                      // Its completely fine to continue with a FundingCreated until the monitor
 +                                      // update is persisted, as long as we don't generate the FundingBroadcastSafe
 +                                      // until the monitor has been safely persisted (as funding broadcast is not,
 +                                      // in fact, safe).
 +                                      chan.monitor_update_failed(false, false, Vec::new(), Vec::new());
 +                              },
 +                      }
 +              }
 +
 +              let mut channel_state = self.channel_state.lock().unwrap();
 +              channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingCreated {
 +                      node_id: chan.get_their_node_id(),
 +                      msg: msg,
 +              });
 +              match channel_state.by_id.entry(chan.channel_id()) {
 +                      hash_map::Entry::Occupied(_) => {
 +                              panic!("Generated duplicate funding txid?");
 +                      },
 +                      hash_map::Entry::Vacant(e) => {
 +                              e.insert(chan);
 +                      }
 +              }
 +      }
 +
 +      fn get_announcement_sigs(&self, chan: &Channel) -> Option<msgs::AnnouncementSignatures> {
 +              if !chan.should_announce() { return None }
 +
 +              let (announcement, our_bitcoin_sig) = match chan.get_channel_announcement(self.get_our_node_id(), self.genesis_hash.clone()) {
 +                      Ok(res) => res,
 +                      Err(_) => return None, // Only in case of state precondition violations eg channel is closing
 +              };
 +              let msghash = hash_to_message!(&Sha256dHash::hash(&announcement.encode()[..])[..]);
 +              let our_node_sig = self.secp_ctx.sign(&msghash, &self.our_network_key);
 +
 +              Some(msgs::AnnouncementSignatures {
 +                      channel_id: chan.channel_id(),
 +                      short_channel_id: chan.get_short_channel_id().unwrap(),
 +                      node_signature: our_node_sig,
 +                      bitcoin_signature: our_bitcoin_sig,
 +              })
 +      }
 +
 +      /// Processes HTLCs which are pending waiting on random forward delay.
 +      ///
 +      /// Should only really ever be called in response to a PendingHTLCsForwardable event.
 +      /// Will likely generate further events.
 +      pub fn process_pending_htlc_forwards(&self) {
 +              let _ = self.total_consistency_lock.read().unwrap();
 +
 +              let mut new_events = Vec::new();
 +              let mut failed_forwards = Vec::new();
 +              let mut handle_errors = Vec::new();
 +              {
 +                      let mut channel_state_lock = self.channel_state.lock().unwrap();
 +                      let channel_state = channel_state_lock.borrow_parts();
 +
 +                      for (short_chan_id, mut pending_forwards) in channel_state.forward_htlcs.drain() {
 +                              if short_chan_id != 0 {
 +                                      let forward_chan_id = match channel_state.short_to_id.get(&short_chan_id) {
 +                                              Some(chan_id) => chan_id.clone(),
 +                                              None => {
 +                                                      failed_forwards.reserve(pending_forwards.len());
 +                                                      for forward_info in pending_forwards.drain(..) {
 +                                                              match forward_info {
 +                                                                      HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info } => {
 +                                                                              let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
 +                                                                                      short_channel_id: prev_short_channel_id,
 +                                                                                      htlc_id: prev_htlc_id,
 +                                                                                      incoming_packet_shared_secret: forward_info.incoming_shared_secret,
 +                                                                              });
 +                                                                              failed_forwards.push((htlc_source, forward_info.payment_hash, 0x4000 | 10, None));
 +                                                                      },
 +                                                                      HTLCForwardInfo::FailHTLC { .. } => {
 +                                                                              // Channel went away before we could fail it. This implies
 +                                                                              // the channel is now on chain and our counterparty is
 +                                                                              // trying to broadcast the HTLC-Timeout, but that's their
 +                                                                              // problem, not ours.
 +                                                                      }
 +                                                              }
 +                                                      }
 +                                                      continue;
 +                                              }
 +                                      };
 +                                      if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(forward_chan_id) {
 +                                              let mut add_htlc_msgs = Vec::new();
 +                                              let mut fail_htlc_msgs = Vec::new();
 +                                              for forward_info in pending_forwards.drain(..) {
 +                                                      match forward_info {
 +                                                              HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info } => {
 +                                                                      log_trace!(self, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", log_bytes!(forward_info.payment_hash.0), prev_short_channel_id, short_chan_id);
 +                                                                      let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
 +                                                                              short_channel_id: prev_short_channel_id,
 +                                                                              htlc_id: prev_htlc_id,
 +                                                                              incoming_packet_shared_secret: forward_info.incoming_shared_secret,
 +                                                                      });
 +                                                                      match chan.get_mut().send_htlc(forward_info.amt_to_forward, forward_info.payment_hash, forward_info.outgoing_cltv_value, htlc_source.clone(), forward_info.onion_packet.unwrap()) {
 +                                                                              Err(e) => {
 +                                                                                      if let ChannelError::Ignore(msg) = e {
 +                                                                                              log_trace!(self, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(forward_info.payment_hash.0), msg);
 +                                                                                      } else {
 +                                                                                              panic!("Stated return value requirements in send_htlc() were not met");
 +                                                                                      }
 +                                                                                      let chan_update = self.get_channel_update(chan.get()).unwrap();
 +                                                                                      failed_forwards.push((htlc_source, forward_info.payment_hash, 0x1000 | 7, Some(chan_update)));
 +                                                                                      continue;
 +                                                                              },
 +                                                                              Ok(update_add) => {
 +                                                                                      match update_add {
 +                                                                                              Some(msg) => { add_htlc_msgs.push(msg); },
 +                                                                                              None => {
 +                                                                                                      // Nothing to do here...we're waiting on a remote
 +                                                                                                      // revoke_and_ack before we can add anymore HTLCs. The Channel
 +                                                                                                      // will automatically handle building the update_add_htlc and
 +                                                                                                      // commitment_signed messages when we can.
 +                                                                                                      // TODO: Do some kind of timer to set the channel as !is_live()
 +                                                                                                      // as we don't really want others relying on us relaying through
 +                                                                                                      // this channel currently :/.
 +                                                                                              }
 +                                                                                      }
 +                                                                              }
 +                                                                      }
 +                                                              },
 +                                                              HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
 +                                                                      log_trace!(self, "Failing HTLC back to channel with short id {} after delay", short_chan_id);
 +                                                                      match chan.get_mut().get_update_fail_htlc(htlc_id, err_packet) {
 +                                                                              Err(e) => {
 +                                                                                      if let ChannelError::Ignore(msg) = e {
 +                                                                                              log_trace!(self, "Failed to fail backwards to short_id {}: {}", short_chan_id, msg);
 +                                                                                      } else {
 +                                                                                              panic!("Stated return value requirements in get_update_fail_htlc() were not met");
 +                                                                                      }
 +                                                                                      // fail-backs are best-effort, we probably already have one
 +                                                                                      // pending, and if not that's OK, if not, the channel is on
 +                                                                                      // the chain and sending the HTLC-Timeout is their problem.
 +                                                                                      continue;
 +                                                                              },
 +                                                                              Ok(Some(msg)) => { fail_htlc_msgs.push(msg); },
 +                                                                              Ok(None) => {
 +                                                                                      // Nothing to do here...we're waiting on a remote
 +                                                                                      // revoke_and_ack before we can update the commitment
 +                                                                                      // transaction. The Channel will automatically handle
 +                                                                                      // building the update_fail_htlc and commitment_signed
 +                                                                                      // messages when we can.
 +                                                                                      // We don't need any kind of timer here as they should fail
 +                                                                                      // the channel onto the chain if they can't get our
 +                                                                                      // update_fail_htlc in time, it's not our problem.
 +                                                                              }
 +                                                                      }
 +                                                              },
 +                                                      }
 +                                              }
 +
 +                                              if !add_htlc_msgs.is_empty() || !fail_htlc_msgs.is_empty() {
 +                                                      let (commitment_msg, monitor) = match chan.get_mut().send_commitment() {
 +                                                              Ok(res) => res,
 +                                                              Err(e) => {
 +                                                                      // We surely failed send_commitment due to bad keys, in that case
 +                                                                      // close channel and then send error message to peer.
 +                                                                      let their_node_id = chan.get().get_their_node_id();
 +                                                                      let err: Result<(), _>  = match e {
 +                                                                              ChannelError::Ignore(_) => {
 +                                                                                      panic!("Stated return value requirements in send_commitment() were not met");
 +                                                                              },
 +                                                                              ChannelError::Close(msg) => {
 +                                                                                      log_trace!(self, "Closing channel {} due to Close-required error: {}", log_bytes!(chan.key()[..]), msg);
 +                                                                                      let (channel_id, mut channel) = chan.remove_entry();
 +                                                                                      if let Some(short_id) = channel.get_short_channel_id() {
 +                                                                                              channel_state.short_to_id.remove(&short_id);
 +                                                                                      }
 +                                                                                      Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.force_shutdown(), self.get_channel_update(&channel).ok()))
 +                                                                              },
 +                                                                              ChannelError::CloseDelayBroadcast { .. } => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); }
 +                                                                      };
 +                                                                      match handle_error!(self, err) {
 +                                                                              Ok(_) => unreachable!(),
 +                                                                              Err(e) => {
 +                                                                                      match e.action {
 +                                                                                              msgs::ErrorAction::IgnoreError => {},
 +                                                                                              _ => {
 +                                                                                                      log_error!(self, "Got bad keys: {}!", e.err);
 +                                                                                                      let mut channel_state = self.channel_state.lock().unwrap();
 +                                                                                                      channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
 +                                                                                                              node_id: their_node_id,
 +                                                                                                              action: e.action,
 +                                                                                                      });
 +                                                                                              },
 +                                                                                      }
 +                                                                                      continue;
 +                                                                              },
 +                                                                      }
 +                                                              }
 +                                                      };
 +                                                      if let Err(e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) {
 +                                                              handle_errors.push((chan.get().get_their_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true)));
 +                                                              continue;
 +                                                      }
 +                                                      channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
 +                                                              node_id: chan.get().get_their_node_id(),
 +                                                              updates: msgs::CommitmentUpdate {
 +                                                                      update_add_htlcs: add_htlc_msgs,
 +                                                                      update_fulfill_htlcs: Vec::new(),
 +                                                                      update_fail_htlcs: fail_htlc_msgs,
 +                                                                      update_fail_malformed_htlcs: Vec::new(),
 +                                                                      update_fee: None,
 +                                                                      commitment_signed: commitment_msg,
 +                                                              },
 +                                                      });
 +                                              }
 +                                      } else {
 +                                              unreachable!();
 +                                      }
 +                              } else {
 +                                      for forward_info in pending_forwards.drain(..) {
 +                                              match forward_info {
 +                                                      HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info } => {
 +                                                              let prev_hop_data = HTLCPreviousHopData {
 +                                                                      short_channel_id: prev_short_channel_id,
 +                                                                      htlc_id: prev_htlc_id,
 +                                                                      incoming_packet_shared_secret: forward_info.incoming_shared_secret,
 +                                                              };
 +                                                              match channel_state.claimable_htlcs.entry(forward_info.payment_hash) {
 +                                                                      hash_map::Entry::Occupied(mut entry) => entry.get_mut().push((forward_info.amt_to_forward, prev_hop_data)),
 +                                                                      hash_map::Entry::Vacant(entry) => { entry.insert(vec![(forward_info.amt_to_forward, prev_hop_data)]); },
 +                                                              };
 +                                                              new_events.push(events::Event::PaymentReceived {
 +                                                                      payment_hash: forward_info.payment_hash,
 +                                                                      amt: forward_info.amt_to_forward,
 +                                                              });
 +                                                      },
 +                                                      HTLCForwardInfo::FailHTLC { .. } => {
 +                                                              panic!("Got pending fail of our own HTLC");
 +                                                      }
 +                                              }
 +                                      }
 +                              }
 +                      }
 +              }
 +
 +              for (htlc_source, payment_hash, failure_code, update) in failed_forwards.drain(..) {
 +                      match update {
 +                              None => self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, HTLCFailReason::Reason { failure_code, data: Vec::new() }),
 +                              Some(chan_update) => self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, HTLCFailReason::Reason { failure_code, data: chan_update.encode_with_len() }),
 +                      };
 +              }
 +
 +              for (their_node_id, err) in handle_errors.drain(..) {
 +                      match handle_error!(self, err) {
 +                              Ok(_) => {},
 +                              Err(e) => {
 +                                      if let msgs::ErrorAction::IgnoreError = e.action {
 +                                      } else {
 +                                              let mut channel_state = self.channel_state.lock().unwrap();
 +                                              channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
 +                                                      node_id: their_node_id,
 +                                                      action: e.action,
 +                                              });
 +                                      }
 +                              },
 +                      }
 +              }
 +
 +              if new_events.is_empty() { return }
 +              let mut events = self.pending_events.lock().unwrap();
 +              events.append(&mut new_events);
 +      }
 +
 +      /// Indicates that the preimage for payment_hash is unknown or the received amount is incorrect
 +      /// after a PaymentReceived event, failing the HTLC back to its origin and freeing resources
 +      /// along the path (including in our own channel on which we received it).
 +      /// Returns false if no payment was found to fail backwards, true if the process of failing the
 +      /// HTLC backwards has been started.
 +      pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) -> bool {
 +              let _ = self.total_consistency_lock.read().unwrap();
 +
 +              let mut channel_state = Some(self.channel_state.lock().unwrap());
 +              let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(payment_hash);
 +              if let Some(mut sources) = removed_source {
 +                      for (recvd_value, htlc_with_hash) in sources.drain(..) {
 +                              if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); }
 +                              self.fail_htlc_backwards_internal(channel_state.take().unwrap(),
 +                                              HTLCSource::PreviousHopData(htlc_with_hash), payment_hash,
 +                                              HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: byte_utils::be64_to_array(recvd_value).to_vec() });
 +                      }
 +                      true
 +              } else { false }
 +      }
 +
 +      /// Fails an HTLC backwards to the sender of it to us.
 +      /// Note that while we take a channel_state lock as input, we do *not* assume consistency here.
 +      /// There are several callsites that do stupid things like loop over a list of payment_hashes
 +      /// to fail and take the channel_state lock for each iteration (as we take ownership and may
 +      /// drop it). In other words, no assumptions are made that entries in claimable_htlcs point to
 +      /// still-available channels.
 +      fn fail_htlc_backwards_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder>, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason) {
 +              //TODO: There is a timing attack here where if a node fails an HTLC back to us they can
 +              //identify whether we sent it or not based on the (I presume) very different runtime
 +              //between the branches here. We should make this async and move it into the forward HTLCs
 +              //timer handling.
 +              match source {
 +                      HTLCSource::OutboundRoute { ref route, .. } => {
 +                              log_trace!(self, "Failing outbound payment HTLC with payment_hash {}", log_bytes!(payment_hash.0));
 +                              mem::drop(channel_state_lock);
 +                              match &onion_error {
 +                                      &HTLCFailReason::LightningError { ref err } => {
 +#[cfg(test)]
 +                                              let (channel_update, payment_retryable, onion_error_code) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone());
 +#[cfg(not(test))]
 +                                              let (channel_update, payment_retryable, _) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone());
 +                                              // TODO: If we decided to blame ourselves (or one of our channels) in
 +                                              // process_onion_failure we should close that channel as it implies our
 +                                              // next-hop is needlessly blaming us!
 +                                              if let Some(update) = channel_update {
 +                                                      self.channel_state.lock().unwrap().pending_msg_events.push(
 +                                                              events::MessageSendEvent::PaymentFailureNetworkUpdate {
 +                                                                      update,
 +                                                              }
 +                                                      );
 +                                              }
 +                                              self.pending_events.lock().unwrap().push(
 +                                                      events::Event::PaymentFailed {
 +                                                              payment_hash: payment_hash.clone(),
 +                                                              rejected_by_dest: !payment_retryable,
 +#[cfg(test)]
 +                                                              error_code: onion_error_code
 +                                                      }
 +                                              );
 +                                      },
 +                                      &HTLCFailReason::Reason {
 +#[cfg(test)]
 +                                                      ref failure_code,
 +                                                      .. } => {
 +                                              // we get a fail_malformed_htlc from the first hop
 +                                              // TODO: We'd like to generate a PaymentFailureNetworkUpdate for temporary
 +                                              // failures here, but that would be insufficient as Router::get_route
 +                                              // generally ignores its view of our own channels as we provide them via
 +                                              // ChannelDetails.
 +                                              // TODO: For non-temporary failures, we really should be closing the
 +                                              // channel here as we apparently can't relay through them anyway.
 +                                              self.pending_events.lock().unwrap().push(
 +                                                      events::Event::PaymentFailed {
 +                                                              payment_hash: payment_hash.clone(),
 +                                                              rejected_by_dest: route.hops.len() == 1,
 +#[cfg(test)]
 +                                                              error_code: Some(*failure_code),
 +                                                      }
 +                                              );
 +                                      }
 +                              }
 +                      },
 +                      HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, incoming_packet_shared_secret }) => {
 +                              let err_packet = match onion_error {
 +                                      HTLCFailReason::Reason { failure_code, data } => {
 +                                              log_trace!(self, "Failing HTLC with payment_hash {} backwards from us with code {}", log_bytes!(payment_hash.0), failure_code);
 +                                              let packet = onion_utils::build_failure_packet(&incoming_packet_shared_secret, failure_code, &data[..]).encode();
 +                                              onion_utils::encrypt_failure_packet(&incoming_packet_shared_secret, &packet)
 +                                      },
 +                                      HTLCFailReason::LightningError { err } => {
 +                                              log_trace!(self, "Failing HTLC with payment_hash {} backwards with pre-built LightningError", log_bytes!(payment_hash.0));
 +                                              onion_utils::encrypt_failure_packet(&incoming_packet_shared_secret, &err.data)
 +                                      }
 +                              };
 +
 +                              let mut forward_event = None;
 +                              if channel_state_lock.forward_htlcs.is_empty() {
 +                                      forward_event = Some(Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS));
 +                              }
 +                              match channel_state_lock.forward_htlcs.entry(short_channel_id) {
 +                                      hash_map::Entry::Occupied(mut entry) => {
 +                                              entry.get_mut().push(HTLCForwardInfo::FailHTLC { htlc_id, err_packet });
 +                                      },
 +                                      hash_map::Entry::Vacant(entry) => {
 +                                              entry.insert(vec!(HTLCForwardInfo::FailHTLC { htlc_id, err_packet }));
 +                                      }
 +                              }
 +                              mem::drop(channel_state_lock);
 +                              if let Some(time) = forward_event {
 +                                      let mut pending_events = self.pending_events.lock().unwrap();
 +                                      pending_events.push(events::Event::PendingHTLCsForwardable {
 +                                              time_forwardable: time
 +                                      });
 +                              }
 +                      },
 +              }
 +      }
 +
 +      /// Provides a payment preimage in response to a PaymentReceived event, returning true and
 +      /// generating message events for the net layer to claim the payment, if possible. Thus, you
 +      /// should probably kick the net layer to go send messages if this returns true!
 +      ///
++      /// You must specify the expected amounts for this HTLC, and we will only claim HTLCs
++      /// available within a few percent of the expected amount. This is critical for several
++      /// reasons : a) it avoids providing senders with `proof-of-payment` (in the form of the
++      /// payment_preimage without having provided the full value and b) it avoids certain
++      /// privacy-breaking recipient-probing attacks which may reveal payment activity to
++      /// motivated attackers.
++      ///
 +      /// May panic if called except in response to a PaymentReceived event.
-                       // TODO: We should require the user specify the expected amount so that we can claim
-                       // only payments for the correct amount, and reject payments for incorrect amounts
-                       // (which are probably middle nodes probing to break our privacy).
-                       for (_, htlc_with_hash) in sources.drain(..) {
++      pub fn claim_funds(&self, payment_preimage: PaymentPreimage, expected_amount: u64) -> bool {
 +              let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
 +
 +              let _ = self.total_consistency_lock.read().unwrap();
 +
 +              let mut channel_state = Some(self.channel_state.lock().unwrap());
 +              let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(&payment_hash);
 +              if let Some(mut sources) = removed_source {
-                               self.claim_funds_internal(channel_state.take().unwrap(), HTLCSource::PreviousHopData(htlc_with_hash), payment_preimage);
++                      for (received_amount, htlc_with_hash) in sources.drain(..) {
 +                              if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); }
++                              if received_amount < expected_amount || received_amount > expected_amount * 2 {
++                                      let mut htlc_msat_data = byte_utils::be64_to_array(received_amount).to_vec();
++                                      let mut height_data = byte_utils::be32_to_array(self.latest_block_height.load(Ordering::Acquire) as u32).to_vec();
++                                      htlc_msat_data.append(&mut height_data);
++                                      self.fail_htlc_backwards_internal(channel_state.take().unwrap(),
++                                                                       HTLCSource::PreviousHopData(htlc_with_hash), &payment_hash,
++                                                                       HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_data });
++                              } else {
++                                      self.claim_funds_internal(channel_state.take().unwrap(), HTLCSource::PreviousHopData(htlc_with_hash), payment_preimage);
++                              }
 +                      }
 +                      true
 +              } else { false }
 +      }
 +      fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder>, source: HTLCSource, payment_preimage: PaymentPreimage) {
 +              let (their_node_id, err) = loop {
 +                      match source {
 +                              HTLCSource::OutboundRoute { .. } => {
 +                                      mem::drop(channel_state_lock);
 +                                      let mut pending_events = self.pending_events.lock().unwrap();
 +                                      pending_events.push(events::Event::PaymentSent {
 +                                              payment_preimage
 +                                      });
 +                              },
 +                              HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, .. }) => {
 +                                      //TODO: Delay the claimed_funds relaying just like we do outbound relay!
 +                                      let channel_state = channel_state_lock.borrow_parts();
 +
 +                                      let chan_id = match channel_state.short_to_id.get(&short_channel_id) {
 +                                              Some(chan_id) => chan_id.clone(),
 +                                              None => {
 +                                                      // TODO: There is probably a channel manager somewhere that needs to
 +                                                      // learn the preimage as the channel already hit the chain and that's
 +                                                      // why it's missing.
 +                                                      return
 +                                              }
 +                                      };
 +
 +                                      if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(chan_id) {
 +                                              let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update();
 +                                              match chan.get_mut().get_update_fulfill_htlc_and_commit(htlc_id, payment_preimage) {
 +                                                      Ok((msgs, monitor_option)) => {
 +                                                              if let Some(chan_monitor) = monitor_option {
 +                                                                      if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
 +                                                                              if was_frozen_for_monitor {
 +                                                                                      assert!(msgs.is_none());
 +                                                                              } else {
 +                                                                                      break (chan.get().get_their_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()));
 +                                                                              }
 +                                                                      }
 +                                                              }
 +                                                              if let Some((msg, commitment_signed)) = msgs {
 +                                                                      channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
 +                                                                              node_id: chan.get().get_their_node_id(),
 +                                                                              updates: msgs::CommitmentUpdate {
 +                                                                                      update_add_htlcs: Vec::new(),
 +                                                                                      update_fulfill_htlcs: vec![msg],
 +                                                                                      update_fail_htlcs: Vec::new(),
 +                                                                                      update_fail_malformed_htlcs: Vec::new(),
 +                                                                                      update_fee: None,
 +                                                                                      commitment_signed,
 +                                                                              }
 +                                                                      });
 +                                                              }
 +                                                      },
 +                                                      Err(_e) => {
 +                                                              // TODO: There is probably a channel manager somewhere that needs to
 +                                                              // learn the preimage as the channel may be about to hit the chain.
 +                                                              //TODO: Do something with e?
 +                                                              return
 +                                                      },
 +                                              }
 +                                      } else { unreachable!(); }
 +                              },
 +                      }
 +                      return;
 +              };
 +
 +              match handle_error!(self, err) {
 +                      Ok(_) => {},
 +                      Err(e) => {
 +                              if let msgs::ErrorAction::IgnoreError = e.action {
 +                              } else {
 +                                      let mut channel_state = self.channel_state.lock().unwrap();
 +                                      channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
 +                                              node_id: their_node_id,
 +                                              action: e.action,
 +                                      });
 +                              }
 +                      },
 +              }
 +      }
 +
 +      /// Gets the node_id held by this ChannelManager
 +      pub fn get_our_node_id(&self) -> PublicKey {
 +              PublicKey::from_secret_key(&self.secp_ctx, &self.our_network_key)
 +      }
 +
 +      /// Used to restore channels to normal operation after a
 +      /// ChannelMonitorUpdateErr::TemporaryFailure was returned from a channel monitor update
 +      /// operation.
 +      pub fn test_restore_channel_monitor(&self) {
 +              let mut close_results = Vec::new();
 +              let mut htlc_forwards = Vec::new();
 +              let mut htlc_failures = Vec::new();
 +              let mut pending_events = Vec::new();
 +              let _ = self.total_consistency_lock.read().unwrap();
 +
 +              {
 +                      let mut channel_lock = self.channel_state.lock().unwrap();
 +                      let channel_state = channel_lock.borrow_parts();
 +                      let short_to_id = channel_state.short_to_id;
 +                      let pending_msg_events = channel_state.pending_msg_events;
 +                      channel_state.by_id.retain(|_, channel| {
 +                              if channel.is_awaiting_monitor_update() {
 +                                      let chan_monitor = channel.channel_monitor();
 +                                      if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
 +                                              match e {
 +                                                      ChannelMonitorUpdateErr::PermanentFailure => {
 +                                                              // TODO: There may be some pending HTLCs that we intended to fail
 +                                                              // backwards when a monitor update failed. We should make sure
 +                                                              // knowledge of those gets moved into the appropriate in-memory
 +                                                              // ChannelMonitor and they get failed backwards once we get
 +                                                              // on-chain confirmations.
 +                                                              // Note I think #198 addresses this, so once it's merged a test
 +                                                              // should be written.
 +                                                              if let Some(short_id) = channel.get_short_channel_id() {
 +                                                                      short_to_id.remove(&short_id);
 +                                                              }
 +                                                              close_results.push(channel.force_shutdown());
 +                                                              if let Ok(update) = self.get_channel_update(&channel) {
 +                                                                      pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
 +                                                                              msg: update
 +                                                                      });
 +                                                              }
 +                                                              false
 +                                                      },
 +                                                      ChannelMonitorUpdateErr::TemporaryFailure => true,
 +                                              }
 +                                      } else {
 +                                              let (raa, commitment_update, order, pending_forwards, mut pending_failures, needs_broadcast_safe, funding_locked) = channel.monitor_updating_restored();
 +                                              if !pending_forwards.is_empty() {
 +                                                      htlc_forwards.push((channel.get_short_channel_id().expect("We can't have pending forwards before funding confirmation"), pending_forwards));
 +                                              }
 +                                              htlc_failures.append(&mut pending_failures);
 +
 +                                              macro_rules! handle_cs { () => {
 +                                                      if let Some(update) = commitment_update {
 +                                                              pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
 +                                                                      node_id: channel.get_their_node_id(),
 +                                                                      updates: update,
 +                                                              });
 +                                                      }
 +                                              } }
 +                                              macro_rules! handle_raa { () => {
 +                                                      if let Some(revoke_and_ack) = raa {
 +                                                              pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
 +                                                                      node_id: channel.get_their_node_id(),
 +                                                                      msg: revoke_and_ack,
 +                                                              });
 +                                                      }
 +                                              } }
 +                                              match order {
 +                                                      RAACommitmentOrder::CommitmentFirst => {
 +                                                              handle_cs!();
 +                                                              handle_raa!();
 +                                                      },
 +                                                      RAACommitmentOrder::RevokeAndACKFirst => {
 +                                                              handle_raa!();
 +                                                              handle_cs!();
 +                                                      },
 +                                              }
 +                                              if needs_broadcast_safe {
 +                                                      pending_events.push(events::Event::FundingBroadcastSafe {
 +                                                              funding_txo: channel.get_funding_txo().unwrap(),
 +                                                              user_channel_id: channel.get_user_id(),
 +                                                      });
 +                                              }
 +                                              if let Some(msg) = funding_locked {
 +                                                      pending_msg_events.push(events::MessageSendEvent::SendFundingLocked {
 +                                                              node_id: channel.get_their_node_id(),
 +                                                              msg,
 +                                                      });
 +                                                      if let Some(announcement_sigs) = self.get_announcement_sigs(channel) {
 +                                                              pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
 +                                                                      node_id: channel.get_their_node_id(),
 +                                                                      msg: announcement_sigs,
 +                                                              });
 +                                                      }
 +                                                      short_to_id.insert(channel.get_short_channel_id().unwrap(), channel.channel_id());
 +                                              }
 +                                              true
 +                                      }
 +                              } else { true }
 +                      });
 +              }
 +
 +              self.pending_events.lock().unwrap().append(&mut pending_events);
 +
 +              for failure in htlc_failures.drain(..) {
 +                      self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
 +              }
 +              self.forward_htlcs(&mut htlc_forwards[..]);
 +
 +              for res in close_results.drain(..) {
 +                      self.finish_force_close_channel(res);
 +              }
 +      }
 +
 +      fn internal_open_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> {
 +              if msg.chain_hash != self.genesis_hash {
 +                      return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash", msg.temporary_channel_id.clone()));
 +              }
 +
 +              let channel = Channel::new_from_req(&*self.fee_estimator, &self.keys_manager, their_node_id.clone(), their_local_features, msg, 0, Arc::clone(&self.logger), &self.default_configuration)
 +                      .map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id))?;
 +              let mut channel_state_lock = self.channel_state.lock().unwrap();
 +              let channel_state = channel_state_lock.borrow_parts();
 +              match channel_state.by_id.entry(channel.channel_id()) {
 +                      hash_map::Entry::Occupied(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision!", msg.temporary_channel_id.clone())),
 +                      hash_map::Entry::Vacant(entry) => {
 +                              channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
 +                                      node_id: their_node_id.clone(),
 +                                      msg: channel.get_accept_channel(),
 +                              });
 +                              entry.insert(channel);
 +                      }
 +              }
 +              Ok(())
 +      }
 +
 +      fn internal_accept_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &msgs::AcceptChannel) -> Result<(), MsgHandleErrInternal> {
 +              let (value, output_script, user_id) = {
 +                      let mut channel_lock = self.channel_state.lock().unwrap();
 +                      let channel_state = channel_lock.borrow_parts();
 +                      match channel_state.by_id.entry(msg.temporary_channel_id) {
 +                              hash_map::Entry::Occupied(mut chan) => {
 +                                      if chan.get().get_their_node_id() != *their_node_id {
 +                                              //TODO: see issue #153, need a consistent behavior on obnoxious behavior from random node
 +                                              return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.temporary_channel_id));
 +                                      }
 +                                      try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration, their_local_features), channel_state, chan);
 +                                      (chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id())
 +                              },
 +                              //TODO: same as above
 +                              hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.temporary_channel_id))
 +                      }
 +              };
 +              let mut pending_events = self.pending_events.lock().unwrap();
 +              pending_events.push(events::Event::FundingGenerationReady {
 +                      temporary_channel_id: msg.temporary_channel_id,
 +                      channel_value_satoshis: value,
 +                      output_script: output_script,
 +                      user_channel_id: user_id,
 +              });
 +              Ok(())
 +      }
 +
 +      fn internal_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> {
 +              let ((funding_msg, monitor_update), mut chan) = {
 +                      let mut channel_lock = self.channel_state.lock().unwrap();
 +                      let channel_state = channel_lock.borrow_parts();
 +                      match channel_state.by_id.entry(msg.temporary_channel_id.clone()) {
 +                              hash_map::Entry::Occupied(mut chan) => {
 +                                      if chan.get().get_their_node_id() != *their_node_id {
 +                                              //TODO: here and below MsgHandleErrInternal, #153 case
 +                                              return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.temporary_channel_id));
 +                                      }
 +                                      (try_chan_entry!(self, chan.get_mut().funding_created(msg), channel_state, chan), chan.remove())
 +                              },
 +                              hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.temporary_channel_id))
 +                      }
 +              };
 +              // Because we have exclusive ownership of the channel here we can release the channel_state
 +              // lock before add_update_monitor
 +              if let Err(e) = self.monitor.add_update_monitor(monitor_update.get_funding_txo().unwrap(), monitor_update) {
 +                      match e {
 +                              ChannelMonitorUpdateErr::PermanentFailure => {
 +                                      // Note that we reply with the new channel_id in error messages if we gave up on the
 +                                      // channel, not the temporary_channel_id. This is compatible with ourselves, but the
 +                                      // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
 +                                      // any messages referencing a previously-closed channel anyway.
 +                                      return Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", funding_msg.channel_id, chan.force_shutdown(), None));
 +                              },
 +                              ChannelMonitorUpdateErr::TemporaryFailure => {
 +                                      // There's no problem signing a counterparty's funding transaction if our monitor
 +                                      // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
 +                                      // accepted payment from yet. We do, however, need to wait to send our funding_locked
 +                                      // until we have persisted our monitor.
 +                                      chan.monitor_update_failed(false, false, Vec::new(), Vec::new());
 +                              },
 +                      }
 +              }
 +              let mut channel_state_lock = self.channel_state.lock().unwrap();
 +              let channel_state = channel_state_lock.borrow_parts();
 +              match channel_state.by_id.entry(funding_msg.channel_id) {
 +                      hash_map::Entry::Occupied(_) => {
 +                              return Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id", funding_msg.channel_id))
 +                      },
 +                      hash_map::Entry::Vacant(e) => {
 +                              channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
 +                                      node_id: their_node_id.clone(),
 +                                      msg: funding_msg,
 +                              });
 +                              e.insert(chan);
 +                      }
 +              }
 +              Ok(())
 +      }
 +
 +      fn internal_funding_signed(&self, their_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> {
 +              let (funding_txo, user_id) = {
 +                      let mut channel_lock = self.channel_state.lock().unwrap();
 +                      let channel_state = channel_lock.borrow_parts();
 +                      match channel_state.by_id.entry(msg.channel_id) {
 +                              hash_map::Entry::Occupied(mut chan) => {
 +                                      if chan.get().get_their_node_id() != *their_node_id {
 +                                              //TODO: here and below MsgHandleErrInternal, #153 case
 +                                              return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
 +                                      }
 +                                      let chan_monitor = try_chan_entry!(self, chan.get_mut().funding_signed(&msg), channel_state, chan);
 +                                      if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
 +                                              return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, false, false);
 +                                      }
 +                                      (chan.get().get_funding_txo().unwrap(), chan.get().get_user_id())
 +                              },
 +                              hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
 +                      }
 +              };
 +              let mut pending_events = self.pending_events.lock().unwrap();
 +              pending_events.push(events::Event::FundingBroadcastSafe {
 +                      funding_txo: funding_txo,
 +                      user_channel_id: user_id,
 +              });
 +              Ok(())
 +      }
 +
 +      fn internal_funding_locked(&self, their_node_id: &PublicKey, msg: &msgs::FundingLocked) -> Result<(), MsgHandleErrInternal> {
 +              let mut channel_state_lock = self.channel_state.lock().unwrap();
 +              let channel_state = channel_state_lock.borrow_parts();
 +              match channel_state.by_id.entry(msg.channel_id) {
 +                      hash_map::Entry::Occupied(mut chan) => {
 +                              if chan.get().get_their_node_id() != *their_node_id {
 +                                      //TODO: here and below MsgHandleErrInternal, #153 case
 +                                      return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
 +                              }
 +                              try_chan_entry!(self, chan.get_mut().funding_locked(&msg), channel_state, chan);
 +                              if let Some(announcement_sigs) = self.get_announcement_sigs(chan.get()) {
 +                                      channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
 +                                              node_id: their_node_id.clone(),
 +                                              msg: announcement_sigs,
 +                                      });
 +                              }
 +                              Ok(())
 +                      },
 +                      hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
 +              }
 +      }
 +
 +      fn internal_shutdown(&self, their_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(), MsgHandleErrInternal> {
 +              let (mut dropped_htlcs, chan_option) = {
 +                      let mut channel_state_lock = self.channel_state.lock().unwrap();
 +                      let channel_state = channel_state_lock.borrow_parts();
 +
 +                      match channel_state.by_id.entry(msg.channel_id.clone()) {
 +                              hash_map::Entry::Occupied(mut chan_entry) => {
 +                                      if chan_entry.get().get_their_node_id() != *their_node_id {
 +                                              //TODO: here and below MsgHandleErrInternal, #153 case
 +                                              return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
 +                                      }
 +                                      let (shutdown, closing_signed, dropped_htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&*self.fee_estimator, &msg), channel_state, chan_entry);
 +                                      if let Some(msg) = shutdown {
 +                                              channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
 +                                                      node_id: their_node_id.clone(),
 +                                                      msg,
 +                                              });
 +                                      }
 +                                      if let Some(msg) = closing_signed {
 +                                              channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
 +                                                      node_id: their_node_id.clone(),
 +                                                      msg,
 +                                              });
 +                                      }
 +                                      if chan_entry.get().is_shutdown() {
 +                                              if let Some(short_id) = chan_entry.get().get_short_channel_id() {
 +                                                      channel_state.short_to_id.remove(&short_id);
 +                                              }
 +                                              (dropped_htlcs, Some(chan_entry.remove_entry().1))
 +                                      } else { (dropped_htlcs, None) }
 +                              },
 +                              hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
 +                      }
 +              };
 +              for htlc_source in dropped_htlcs.drain(..) {
 +                      self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
 +              }
 +              if let Some(chan) = chan_option {
 +                      if let Ok(update) = self.get_channel_update(&chan) {
 +                              let mut channel_state = self.channel_state.lock().unwrap();
 +                              channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
 +                                      msg: update
 +                              });
 +                      }
 +              }
 +              Ok(())
 +      }
 +
 +      fn internal_closing_signed(&self, their_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), MsgHandleErrInternal> {
 +              let (tx, chan_option) = {
 +                      let mut channel_state_lock = self.channel_state.lock().unwrap();
 +                      let channel_state = channel_state_lock.borrow_parts();
 +                      match channel_state.by_id.entry(msg.channel_id.clone()) {
 +                              hash_map::Entry::Occupied(mut chan_entry) => {
 +                                      if chan_entry.get().get_their_node_id() != *their_node_id {
 +                                              //TODO: here and below MsgHandleErrInternal, #153 case
 +                                              return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
 +                                      }
 +                                      let (closing_signed, tx) = try_chan_entry!(self, chan_entry.get_mut().closing_signed(&*self.fee_estimator, &msg), channel_state, chan_entry);
 +                                      if let Some(msg) = closing_signed {
 +                                              channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
 +                                                      node_id: their_node_id.clone(),
 +                                                      msg,
 +                                              });
 +                                      }
 +                                      if tx.is_some() {
 +                                              // We're done with this channel, we've got a signed closing transaction and
 +                                              // will send the closing_signed back to the remote peer upon return. This
 +                                              // also implies there are no pending HTLCs left on the channel, so we can
 +                                              // fully delete it from tracking (the channel monitor is still around to
 +                                              // watch for old state broadcasts)!
 +                                              if let Some(short_id) = chan_entry.get().get_short_channel_id() {
 +                                                      channel_state.short_to_id.remove(&short_id);
 +                                              }
 +                                              (tx, Some(chan_entry.remove_entry().1))
 +                                      } else { (tx, None) }
 +                              },
 +                              hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
 +                      }
 +              };
 +              if let Some(broadcast_tx) = tx {
 +                      self.tx_broadcaster.broadcast_transaction(&broadcast_tx);
 +              }
 +              if let Some(chan) = chan_option {
 +                      if let Ok(update) = self.get_channel_update(&chan) {
 +                              let mut channel_state = self.channel_state.lock().unwrap();
 +                              channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
 +                                      msg: update
 +                              });
 +                      }
 +              }
 +              Ok(())
 +      }
 +
 +      fn internal_update_add_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) -> Result<(), MsgHandleErrInternal> {
 +              //TODO: BOLT 4 points out a specific attack where a peer may re-send an onion packet and
 +              //determine the state of the payment based on our response/if we forward anything/the time
 +              //we take to respond. We should take care to avoid allowing such an attack.
 +              //
 +              //TODO: There exists a further attack where a node may garble the onion data, forward it to
 +              //us repeatedly garbled in different ways, and compare our error messages, which are
 +              //encrypted with the same key. It's not immediately obvious how to usefully exploit that,
 +              //but we should prevent it anyway.
 +
 +              let (mut pending_forward_info, mut channel_state_lock) = self.decode_update_add_htlc_onion(msg);
 +              let channel_state = channel_state_lock.borrow_parts();
 +
 +              match channel_state.by_id.entry(msg.channel_id) {
 +                      hash_map::Entry::Occupied(mut chan) => {
 +                              if chan.get().get_their_node_id() != *their_node_id {
 +                                      //TODO: here MsgHandleErrInternal, #153 case
 +                                      return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
 +                              }
 +                              if !chan.get().is_usable() {
 +                                      // If the update_add is completely bogus, the call will Err and we will close,
 +                                      // but if we've sent a shutdown and they haven't acknowledged it yet, we just
 +                                      // want to reject the new HTLC and fail it backwards instead of forwarding.
 +                                      if let PendingHTLCStatus::Forward(PendingForwardHTLCInfo { incoming_shared_secret, .. }) = pending_forward_info {
 +                                              let chan_update = self.get_channel_update(chan.get());
 +                                              pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
 +                                                      channel_id: msg.channel_id,
 +                                                      htlc_id: msg.htlc_id,
 +                                                      reason: if let Ok(update) = chan_update {
 +                                                              // TODO: Note that |20 is defined as "channel FROM the processing
 +                                                              // node has been disabled" (emphasis mine), which seems to imply
 +                                                              // that we can't return |20 for an inbound channel being disabled.
 +                                                              // This probably needs a spec update but should definitely be
 +                                                              // allowed.
 +                                                              onion_utils::build_first_hop_failure_packet(&incoming_shared_secret, 0x1000|20, &{
 +                                                                      let mut res = Vec::with_capacity(8 + 128);
 +                                                                      res.extend_from_slice(&byte_utils::be16_to_array(update.contents.flags));
 +                                                                      res.extend_from_slice(&update.encode_with_len()[..]);
 +                                                                      res
 +                                                              }[..])
 +                                                      } else {
 +                                                              // This can only happen if the channel isn't in the fully-funded
 +                                                              // state yet, implying our counterparty is trying to route payments
 +                                                              // over the channel back to themselves (cause no one else should
 +                                                              // know the short_id is a lightning channel yet). We should have no
 +                                                              // problem just calling this unknown_next_peer
 +                                                              onion_utils::build_first_hop_failure_packet(&incoming_shared_secret, 0x4000|10, &[])
 +                                                      },
 +                                              }));
 +                                      }
 +                              }
 +                              try_chan_entry!(self, chan.get_mut().update_add_htlc(&msg, pending_forward_info), channel_state, chan);
 +                      },
 +                      hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
 +              }
 +              Ok(())
 +      }
 +
 +      fn internal_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> {
 +              let mut channel_lock = self.channel_state.lock().unwrap();
 +              let htlc_source = {
 +                      let channel_state = channel_lock.borrow_parts();
 +                      match channel_state.by_id.entry(msg.channel_id) {
 +                              hash_map::Entry::Occupied(mut chan) => {
 +                                      if chan.get().get_their_node_id() != *their_node_id {
 +                                              //TODO: here and below MsgHandleErrInternal, #153 case
 +                                              return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
 +                                      }
 +                                      try_chan_entry!(self, chan.get_mut().update_fulfill_htlc(&msg), channel_state, chan)
 +                              },
 +                              hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
 +                      }
 +              };
 +              self.claim_funds_internal(channel_lock, htlc_source, msg.payment_preimage.clone());
 +              Ok(())
 +      }
 +
 +      fn internal_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), MsgHandleErrInternal> {
 +              let mut channel_lock = self.channel_state.lock().unwrap();
 +              let channel_state = channel_lock.borrow_parts();
 +              match channel_state.by_id.entry(msg.channel_id) {
 +                      hash_map::Entry::Occupied(mut chan) => {
 +                              if chan.get().get_their_node_id() != *their_node_id {
 +                                      //TODO: here and below MsgHandleErrInternal, #153 case
 +                                      return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
 +                              }
 +                              try_chan_entry!(self, chan.get_mut().update_fail_htlc(&msg, HTLCFailReason::LightningError { err: msg.reason.clone() }), channel_state, chan);
 +                      },
 +                      hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
 +              }
 +              Ok(())
 +      }
 +
 +      fn internal_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), MsgHandleErrInternal> {
 +              let mut channel_lock = self.channel_state.lock().unwrap();
 +              let channel_state = channel_lock.borrow_parts();
 +              match channel_state.by_id.entry(msg.channel_id) {
 +                      hash_map::Entry::Occupied(mut chan) => {
 +                              if chan.get().get_their_node_id() != *their_node_id {
 +                                      //TODO: here and below MsgHandleErrInternal, #153 case
 +                                      return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
 +                              }
 +                              if (msg.failure_code & 0x8000) == 0 {
 +                                      try_chan_entry!(self, Err(ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set")), channel_state, chan);
 +                              }
 +                              try_chan_entry!(self, chan.get_mut().update_fail_malformed_htlc(&msg, HTLCFailReason::Reason { failure_code: msg.failure_code, data: Vec::new() }), channel_state, chan);
 +                              Ok(())
 +                      },
 +                      hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
 +              }
 +      }
 +
 +      fn internal_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(), MsgHandleErrInternal> {
 +              let mut channel_state_lock = self.channel_state.lock().unwrap();
 +              let channel_state = channel_state_lock.borrow_parts();
 +              match channel_state.by_id.entry(msg.channel_id) {
 +                      hash_map::Entry::Occupied(mut chan) => {
 +                              if chan.get().get_their_node_id() != *their_node_id {
 +                                      //TODO: here and below MsgHandleErrInternal, #153 case
 +                                      return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
 +                              }
 +                              let (revoke_and_ack, commitment_signed, closing_signed, chan_monitor) =
 +                                      try_chan_entry!(self, chan.get_mut().commitment_signed(&msg, &*self.fee_estimator), channel_state, chan);
 +                              if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
 +                                      return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some());
 +                                      //TODO: Rebroadcast closing_signed if present on monitor update restoration
 +                              }
 +                              channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
 +                                      node_id: their_node_id.clone(),
 +                                      msg: revoke_and_ack,
 +                              });
 +                              if let Some(msg) = commitment_signed {
 +                                      channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
 +                                              node_id: their_node_id.clone(),
 +                                              updates: msgs::CommitmentUpdate {
 +                                                      update_add_htlcs: Vec::new(),
 +                                                      update_fulfill_htlcs: Vec::new(),
 +                                                      update_fail_htlcs: Vec::new(),
 +                                                      update_fail_malformed_htlcs: Vec::new(),
 +                                                      update_fee: None,
 +                                                      commitment_signed: msg,
 +                                              },
 +                                      });
 +                              }
 +                              if let Some(msg) = closing_signed {
 +                                      channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
 +                                              node_id: their_node_id.clone(),
 +                                              msg,
 +                                      });
 +                              }
 +                              Ok(())
 +                      },
 +                      hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
 +              }
 +      }
 +
 +      #[inline]
 +      fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, Vec<(PendingForwardHTLCInfo, u64)>)]) {
 +              for &mut (prev_short_channel_id, ref mut pending_forwards) in per_source_pending_forwards {
 +                      let mut forward_event = None;
 +                      if !pending_forwards.is_empty() {
 +                              let mut channel_state = self.channel_state.lock().unwrap();
 +                              if channel_state.forward_htlcs.is_empty() {
 +                                      forward_event = Some(Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS))
 +                              }
 +                              for (forward_info, prev_htlc_id) in pending_forwards.drain(..) {
 +                                      match channel_state.forward_htlcs.entry(forward_info.short_channel_id) {
 +                                              hash_map::Entry::Occupied(mut entry) => {
 +                                                      entry.get_mut().push(HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info });
 +                                              },
 +                                              hash_map::Entry::Vacant(entry) => {
 +                                                      entry.insert(vec!(HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info }));
 +                                              }
 +                                      }
 +                              }
 +                      }
 +                      match forward_event {
 +                              Some(time) => {
 +                                      let mut pending_events = self.pending_events.lock().unwrap();
 +                                      pending_events.push(events::Event::PendingHTLCsForwardable {
 +                                              time_forwardable: time
 +                                      });
 +                              }
 +                              None => {},
 +                      }
 +              }
 +      }
 +
 +      fn internal_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), MsgHandleErrInternal> {
 +              let (pending_forwards, mut pending_failures, short_channel_id) = {
 +                      let mut channel_state_lock = self.channel_state.lock().unwrap();
 +                      let channel_state = channel_state_lock.borrow_parts();
 +                      match channel_state.by_id.entry(msg.channel_id) {
 +                              hash_map::Entry::Occupied(mut chan) => {
 +                                      if chan.get().get_their_node_id() != *their_node_id {
 +                                              //TODO: here and below MsgHandleErrInternal, #153 case
 +                                              return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
 +                                      }
 +                                      let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update();
 +                                      let (commitment_update, pending_forwards, pending_failures, closing_signed, chan_monitor) =
 +                                              try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &*self.fee_estimator), channel_state, chan);
 +                                      if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
 +                                              if was_frozen_for_monitor {
 +                                                      assert!(commitment_update.is_none() && closing_signed.is_none() && pending_forwards.is_empty() && pending_failures.is_empty());
 +                                                      return Err(MsgHandleErrInternal::ignore_no_close("Previous monitor update failure prevented responses to RAA"));
 +                                              } else {
 +                                                      return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, commitment_update.is_some(), pending_forwards, pending_failures);
 +                                              }
 +                                      }
 +                                      if let Some(updates) = commitment_update {
 +                                              channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
 +                                                      node_id: their_node_id.clone(),
 +                                                      updates,
 +                                              });
 +                                      }
 +                                      if let Some(msg) = closing_signed {
 +                                              channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
 +                                                      node_id: their_node_id.clone(),
 +                                                      msg,
 +                                              });
 +                                      }
 +                                      (pending_forwards, pending_failures, chan.get().get_short_channel_id().expect("RAA should only work on a short-id-available channel"))
 +                              },
 +                              hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
 +                      }
 +              };
 +              for failure in pending_failures.drain(..) {
 +                      self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
 +              }
 +              self.forward_htlcs(&mut [(short_channel_id, pending_forwards)]);
 +
 +              Ok(())
 +      }
 +
 +      fn internal_update_fee(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), MsgHandleErrInternal> {
 +              let mut channel_lock = self.channel_state.lock().unwrap();
 +              let channel_state = channel_lock.borrow_parts();
 +              match channel_state.by_id.entry(msg.channel_id) {
 +                      hash_map::Entry::Occupied(mut chan) => {
 +                              if chan.get().get_their_node_id() != *their_node_id {
 +                                      //TODO: here and below MsgHandleErrInternal, #153 case
 +                                      return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
 +                              }
 +                              try_chan_entry!(self, chan.get_mut().update_fee(&*self.fee_estimator, &msg), channel_state, chan);
 +                      },
 +                      hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
 +              }
 +              Ok(())
 +      }
 +
 +      fn internal_announcement_signatures(&self, their_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), MsgHandleErrInternal> {
 +              let mut channel_state_lock = self.channel_state.lock().unwrap();
 +              let channel_state = channel_state_lock.borrow_parts();
 +
 +              match channel_state.by_id.entry(msg.channel_id) {
 +                      hash_map::Entry::Occupied(mut chan) => {
 +                              if chan.get().get_their_node_id() != *their_node_id {
 +                                      return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
 +                              }
 +                              if !chan.get().is_usable() {
 +                                      return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it", action: msgs::ErrorAction::IgnoreError}));
 +                              }
 +
 +                              let our_node_id = self.get_our_node_id();
 +                              let (announcement, our_bitcoin_sig) =
 +                                      try_chan_entry!(self, chan.get_mut().get_channel_announcement(our_node_id.clone(), self.genesis_hash.clone()), channel_state, chan);
 +
 +                              let were_node_one = announcement.node_id_1 == our_node_id;
 +                              let msghash = hash_to_message!(&Sha256dHash::hash(&announcement.encode()[..])[..]);
 +                              if self.secp_ctx.verify(&msghash, &msg.node_signature, if were_node_one { &announcement.node_id_2 } else { &announcement.node_id_1 }).is_err() ||
 +                                              self.secp_ctx.verify(&msghash, &msg.bitcoin_signature, if were_node_one { &announcement.bitcoin_key_2 } else { &announcement.bitcoin_key_1 }).is_err() {
 +                                      try_chan_entry!(self, Err(ChannelError::Close("Bad announcement_signatures node_signature")), channel_state, chan);
 +                              }
 +
 +                              let our_node_sig = self.secp_ctx.sign(&msghash, &self.our_network_key);
 +
 +                              channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
 +                                      msg: msgs::ChannelAnnouncement {
 +                                              node_signature_1: if were_node_one { our_node_sig } else { msg.node_signature },
 +                                              node_signature_2: if were_node_one { msg.node_signature } else { our_node_sig },
 +                                              bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { msg.bitcoin_signature },
 +                                              bitcoin_signature_2: if were_node_one { msg.bitcoin_signature } else { our_bitcoin_sig },
 +                                              contents: announcement,
 +                                      },
 +                                      update_msg: self.get_channel_update(chan.get()).unwrap(), // can only fail if we're not in a ready state
 +                              });
 +                      },
 +                      hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
 +              }
 +              Ok(())
 +      }
 +
 +      fn internal_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> {
 +              let mut channel_state_lock = self.channel_state.lock().unwrap();
 +              let channel_state = channel_state_lock.borrow_parts();
 +
 +              match channel_state.by_id.entry(msg.channel_id) {
 +                      hash_map::Entry::Occupied(mut chan) => {
 +                              if chan.get().get_their_node_id() != *their_node_id {
 +                                      return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
 +                              }
 +                              let (funding_locked, revoke_and_ack, commitment_update, channel_monitor, mut order, shutdown) =
 +                                      try_chan_entry!(self, chan.get_mut().channel_reestablish(msg), channel_state, chan);
 +                              if let Some(monitor) = channel_monitor {
 +                                      if let Err(e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) {
 +                                              // channel_reestablish doesn't guarantee the order it returns is sensical
 +                                              // for the messages it returns, but if we're setting what messages to
 +                                              // re-transmit on monitor update success, we need to make sure it is sane.
 +                                              if revoke_and_ack.is_none() {
 +                                                      order = RAACommitmentOrder::CommitmentFirst;
 +                                              }
 +                                              if commitment_update.is_none() {
 +                                                      order = RAACommitmentOrder::RevokeAndACKFirst;
 +                                              }
 +                                              return_monitor_err!(self, e, channel_state, chan, order, revoke_and_ack.is_some(), commitment_update.is_some());
 +                                              //TODO: Resend the funding_locked if needed once we get the monitor running again
 +                                      }
 +                              }
 +                              if let Some(msg) = funding_locked {
 +                                      channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingLocked {
 +                                              node_id: their_node_id.clone(),
 +                                              msg
 +                                      });
 +                              }
 +                              macro_rules! send_raa { () => {
 +                                      if let Some(msg) = revoke_and_ack {
 +                                              channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
 +                                                      node_id: their_node_id.clone(),
 +                                                      msg
 +                                              });
 +                                      }
 +                              } }
 +                              macro_rules! send_cu { () => {
 +                                      if let Some(updates) = commitment_update {
 +                                              channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
 +                                                      node_id: their_node_id.clone(),
 +                                                      updates
 +                                              });
 +                                      }
 +                              } }
 +                              match order {
 +                                      RAACommitmentOrder::RevokeAndACKFirst => {
 +                                              send_raa!();
 +                                              send_cu!();
 +                                      },
 +                                      RAACommitmentOrder::CommitmentFirst => {
 +                                              send_cu!();
 +                                              send_raa!();
 +                                      },
 +                              }
 +                              if let Some(msg) = shutdown {
 +                                      channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
 +                                              node_id: their_node_id.clone(),
 +                                              msg,
 +                                      });
 +                              }
 +                              Ok(())
 +                      },
 +                      hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
 +              }
 +      }
 +
 +      /// Begin Update fee process. Allowed only on an outbound channel.
 +      /// If successful, will generate a UpdateHTLCs event, so you should probably poll
 +      /// PeerManager::process_events afterwards.
 +      /// Note: This API is likely to change!
 +      #[doc(hidden)]
 +      pub fn update_fee(&self, channel_id: [u8;32], feerate_per_kw: u64) -> Result<(), APIError> {
 +              let _ = self.total_consistency_lock.read().unwrap();
 +              let their_node_id;
 +              let err: Result<(), _> = loop {
 +                      let mut channel_state_lock = self.channel_state.lock().unwrap();
 +                      let channel_state = channel_state_lock.borrow_parts();
 +
 +                      match channel_state.by_id.entry(channel_id) {
 +                              hash_map::Entry::Vacant(_) => return Err(APIError::APIMisuseError{err: "Failed to find corresponding channel"}),
 +                              hash_map::Entry::Occupied(mut chan) => {
 +                                      if !chan.get().is_outbound() {
 +                                              return Err(APIError::APIMisuseError{err: "update_fee cannot be sent for an inbound channel"});
 +                                      }
 +                                      if chan.get().is_awaiting_monitor_update() {
 +                                              return Err(APIError::MonitorUpdateFailed);
 +                                      }
 +                                      if !chan.get().is_live() {
 +                                              return Err(APIError::ChannelUnavailable{err: "Channel is either not yet fully established or peer is currently disconnected"});
 +                                      }
 +                                      their_node_id = chan.get().get_their_node_id();
 +                                      if let Some((update_fee, commitment_signed, chan_monitor)) =
 +                                                      break_chan_entry!(self, chan.get_mut().send_update_fee_and_commit(feerate_per_kw), channel_state, chan)
 +                                      {
 +                                              if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
 +                                                      unimplemented!();
 +                                              }
 +                                              channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
 +                                                      node_id: chan.get().get_their_node_id(),
 +                                                      updates: msgs::CommitmentUpdate {
 +                                                              update_add_htlcs: Vec::new(),
 +                                                              update_fulfill_htlcs: Vec::new(),
 +                                                              update_fail_htlcs: Vec::new(),
 +                                                              update_fail_malformed_htlcs: Vec::new(),
 +                                                              update_fee: Some(update_fee),
 +                                                              commitment_signed,
 +                                                      },
 +                                              });
 +                                      }
 +                              },
 +                      }
 +                      return Ok(())
 +              };
 +
 +              match handle_error!(self, err) {
 +                      Ok(_) => unreachable!(),
 +                      Err(e) => {
 +                              if let msgs::ErrorAction::IgnoreError = e.action {
 +                              } else {
 +                                      log_error!(self, "Got bad keys: {}!", e.err);
 +                                      let mut channel_state = self.channel_state.lock().unwrap();
 +                                      channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
 +                                              node_id: their_node_id,
 +                                              action: e.action,
 +                                      });
 +                              }
 +                              Err(APIError::APIMisuseError { err: e.err })
 +                      },
 +              }
 +      }
 +}
 +
 +impl events::MessageSendEventsProvider for ChannelManager {
 +      fn get_and_clear_pending_msg_events(&self) -> Vec<events::MessageSendEvent> {
 +              // TODO: Event release to users and serialization is currently race-y: it's very easy for a
 +              // user to serialize a ChannelManager with pending events in it and lose those events on
 +              // restart. This is doubly true for the fail/fulfill-backs from monitor events!
 +              {
 +                      //TODO: This behavior should be documented.
 +                      for htlc_update in self.monitor.fetch_pending_htlc_updated() {
 +                              if let Some(preimage) = htlc_update.payment_preimage {
 +                                      log_trace!(self, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
 +                                      self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage);
 +                              } else {
 +                                      log_trace!(self, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
 +                                      self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
 +                              }
 +                      }
 +              }
 +
 +              let mut ret = Vec::new();
 +              let mut channel_state = self.channel_state.lock().unwrap();
 +              mem::swap(&mut ret, &mut channel_state.pending_msg_events);
 +              ret
 +      }
 +}
 +
 +impl events::EventsProvider for ChannelManager {
 +      fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
 +              // TODO: Event release to users and serialization is currently race-y: it's very easy for a
 +              // user to serialize a ChannelManager with pending events in it and lose those events on
 +              // restart. This is doubly true for the fail/fulfill-backs from monitor events!
 +              {
 +                      //TODO: This behavior should be documented.
 +                      for htlc_update in self.monitor.fetch_pending_htlc_updated() {
 +                              if let Some(preimage) = htlc_update.payment_preimage {
 +                                      log_trace!(self, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
 +                                      self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage);
 +                              } else {
 +                                      log_trace!(self, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
 +                                      self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
 +                              }
 +                      }
 +              }
 +
 +              let mut ret = Vec::new();
 +              let mut pending_events = self.pending_events.lock().unwrap();
 +              mem::swap(&mut ret, &mut *pending_events);
 +              ret
 +      }
 +}
 +
 +impl ChainListener for ChannelManager {
 +      fn block_connected(&self, header: &BlockHeader, height: u32, txn_matched: &[&Transaction], indexes_of_txn_matched: &[u32]) {
 +              let header_hash = header.bitcoin_hash();
 +              log_trace!(self, "Block {} at height {} connected with {} txn matched", header_hash, height, txn_matched.len());
 +              let _ = self.total_consistency_lock.read().unwrap();
 +              let mut failed_channels = Vec::new();
 +              {
 +                      let mut channel_lock = self.channel_state.lock().unwrap();
 +                      let channel_state = channel_lock.borrow_parts();
 +                      let short_to_id = channel_state.short_to_id;
 +                      let pending_msg_events = channel_state.pending_msg_events;
 +                      channel_state.by_id.retain(|_, channel| {
 +                              let chan_res = channel.block_connected(header, height, txn_matched, indexes_of_txn_matched);
 +                              if let Ok(Some(funding_locked)) = chan_res {
 +                                      pending_msg_events.push(events::MessageSendEvent::SendFundingLocked {
 +                                              node_id: channel.get_their_node_id(),
 +                                              msg: funding_locked,
 +                                      });
 +                                      if let Some(announcement_sigs) = self.get_announcement_sigs(channel) {
 +                                              pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
 +                                                      node_id: channel.get_their_node_id(),
 +                                                      msg: announcement_sigs,
 +                                              });
 +                                      }
 +                                      short_to_id.insert(channel.get_short_channel_id().unwrap(), channel.channel_id());
 +                              } else if let Err(e) = chan_res {
 +                                      pending_msg_events.push(events::MessageSendEvent::HandleError {
 +                                              node_id: channel.get_their_node_id(),
 +                                              action: msgs::ErrorAction::SendErrorMessage { msg: e },
 +                                      });
 +                                      return false;
 +                              }
 +                              if let Some(funding_txo) = channel.get_funding_txo() {
 +                                      for tx in txn_matched {
 +                                              for inp in tx.input.iter() {
 +                                                      if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
 +                                                              log_trace!(self, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, log_bytes!(channel.channel_id()));
 +                                                              if let Some(short_id) = channel.get_short_channel_id() {
 +                                                                      short_to_id.remove(&short_id);
 +                                                              }
 +                                                              // It looks like our counterparty went on-chain. We go ahead and
 +                                                              // broadcast our latest local state as well here, just in case its
 +                                                              // some kind of SPV attack, though we expect these to be dropped.
 +                                                              failed_channels.push(channel.force_shutdown());
 +                                                              if let Ok(update) = self.get_channel_update(&channel) {
 +                                                                      pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
 +                                                                              msg: update
 +                                                                      });
 +                                                              }
 +                                                              return false;
 +                                                      }
 +                                              }
 +                                      }
 +                              }
 +                              if channel.is_funding_initiated() && channel.channel_monitor().would_broadcast_at_height(height) {
 +                                      if let Some(short_id) = channel.get_short_channel_id() {
 +                                              short_to_id.remove(&short_id);
 +                                      }
 +                                      failed_channels.push(channel.force_shutdown());
 +                                      // If would_broadcast_at_height() is true, the channel_monitor will broadcast
 +                                      // the latest local tx for us, so we should skip that here (it doesn't really
 +                                      // hurt anything, but does make tests a bit simpler).
 +                                      failed_channels.last_mut().unwrap().0 = Vec::new();
 +                                      if let Ok(update) = self.get_channel_update(&channel) {
 +                                              pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
 +                                                      msg: update
 +                                              });
 +                                      }
 +                                      return false;
 +                              }
 +                              true
 +                      });
 +              }
 +              for failure in failed_channels.drain(..) {
 +                      self.finish_force_close_channel(failure);
 +              }
 +              self.latest_block_height.store(height as usize, Ordering::Release);
 +              *self.last_block_hash.try_lock().expect("block_(dis)connected must not be called in parallel") = header_hash;
 +      }
 +
 +      /// We force-close the channel without letting our counterparty participate in the shutdown
 +      fn block_disconnected(&self, header: &BlockHeader, _: u32) {
 +              let _ = self.total_consistency_lock.read().unwrap();
 +              let mut failed_channels = Vec::new();
 +              {
 +                      let mut channel_lock = self.channel_state.lock().unwrap();
 +                      let channel_state = channel_lock.borrow_parts();
 +                      let short_to_id = channel_state.short_to_id;
 +                      let pending_msg_events = channel_state.pending_msg_events;
 +                      channel_state.by_id.retain(|_,  v| {
 +                              if v.block_disconnected(header) {
 +                                      if let Some(short_id) = v.get_short_channel_id() {
 +                                              short_to_id.remove(&short_id);
 +                                      }
 +                                      failed_channels.push(v.force_shutdown());
 +                                      if let Ok(update) = self.get_channel_update(&v) {
 +                                              pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
 +                                                      msg: update
 +                                              });
 +                                      }
 +                                      false
 +                              } else {
 +                                      true
 +                              }
 +                      });
 +              }
 +              for failure in failed_channels.drain(..) {
 +                      self.finish_force_close_channel(failure);
 +              }
 +              self.latest_block_height.fetch_sub(1, Ordering::AcqRel);
 +              *self.last_block_hash.try_lock().expect("block_(dis)connected must not be called in parallel") = header.bitcoin_hash();
 +      }
 +}
 +
 +impl ChannelMessageHandler for ChannelManager {
 +      //TODO: Handle errors and close channel (or so)
 +      fn handle_open_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &msgs::OpenChannel) -> Result<(), LightningError> {
 +              let _ = self.total_consistency_lock.read().unwrap();
 +              handle_error!(self, self.internal_open_channel(their_node_id, their_local_features, msg))
 +      }
 +
 +      fn handle_accept_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &msgs::AcceptChannel) -> Result<(), LightningError> {
 +              let _ = self.total_consistency_lock.read().unwrap();
 +              handle_error!(self, self.internal_accept_channel(their_node_id, their_local_features, msg))
 +      }
 +
 +      fn handle_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), LightningError> {
 +              let _ = self.total_consistency_lock.read().unwrap();
 +              handle_error!(self, self.internal_funding_created(their_node_id, msg))
 +      }
 +
 +      fn handle_funding_signed(&self, their_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), LightningError> {
 +              let _ = self.total_consistency_lock.read().unwrap();
 +              handle_error!(self, self.internal_funding_signed(their_node_id, msg))
 +      }
 +
 +      fn handle_funding_locked(&self, their_node_id: &PublicKey, msg: &msgs::FundingLocked) -> Result<(), LightningError> {
 +              let _ = self.total_consistency_lock.read().unwrap();
 +              handle_error!(self, self.internal_funding_locked(their_node_id, msg))
 +      }
 +
 +      fn handle_shutdown(&self, their_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(), LightningError> {
 +              let _ = self.total_consistency_lock.read().unwrap();
 +              handle_error!(self, self.internal_shutdown(their_node_id, msg))
 +      }
 +
 +      fn handle_closing_signed(&self, their_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), LightningError> {
 +              let _ = self.total_consistency_lock.read().unwrap();
 +              handle_error!(self, self.internal_closing_signed(their_node_id, msg))
 +      }
 +
 +      fn handle_update_add_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) -> Result<(), LightningError> {
 +              let _ = self.total_consistency_lock.read().unwrap();
 +              handle_error!(self, self.internal_update_add_htlc(their_node_id, msg))
 +      }
 +
 +      fn handle_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), LightningError> {
 +              let _ = self.total_consistency_lock.read().unwrap();
 +              handle_error!(self, self.internal_update_fulfill_htlc(their_node_id, msg))
 +      }
 +
 +      fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), LightningError> {
 +              let _ = self.total_consistency_lock.read().unwrap();
 +              handle_error!(self, self.internal_update_fail_htlc(their_node_id, msg))
 +      }
 +
 +      fn handle_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), LightningError> {
 +              let _ = self.total_consistency_lock.read().unwrap();
 +              handle_error!(self, self.internal_update_fail_malformed_htlc(their_node_id, msg))
 +      }
 +
 +      fn handle_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(), LightningError> {
 +              let _ = self.total_consistency_lock.read().unwrap();
 +              handle_error!(self, self.internal_commitment_signed(their_node_id, msg))
 +      }
 +
 +      fn handle_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), LightningError> {
 +              let _ = self.total_consistency_lock.read().unwrap();
 +              handle_error!(self, self.internal_revoke_and_ack(their_node_id, msg))
 +      }
 +
 +      fn handle_update_fee(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), LightningError> {
 +              let _ = self.total_consistency_lock.read().unwrap();
 +              handle_error!(self, self.internal_update_fee(their_node_id, msg))
 +      }
 +
 +      fn handle_announcement_signatures(&self, their_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), LightningError> {
 +              let _ = self.total_consistency_lock.read().unwrap();
 +              handle_error!(self, self.internal_announcement_signatures(their_node_id, msg))
 +      }
 +
 +      fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), LightningError> {
 +              let _ = self.total_consistency_lock.read().unwrap();
 +              handle_error!(self, self.internal_channel_reestablish(their_node_id, msg))
 +      }
 +
 +      fn peer_disconnected(&self, their_node_id: &PublicKey, no_connection_possible: bool) {
 +              let _ = self.total_consistency_lock.read().unwrap();
 +              let mut failed_channels = Vec::new();
 +              let mut failed_payments = Vec::new();
 +              {
 +                      let mut channel_state_lock = self.channel_state.lock().unwrap();
 +                      let channel_state = channel_state_lock.borrow_parts();
 +                      let short_to_id = channel_state.short_to_id;
 +                      let pending_msg_events = channel_state.pending_msg_events;
 +                      if no_connection_possible {
 +                              log_debug!(self, "Failing all channels with {} due to no_connection_possible", log_pubkey!(their_node_id));
 +                              channel_state.by_id.retain(|_, chan| {
 +                                      if chan.get_their_node_id() == *their_node_id {
 +                                              if let Some(short_id) = chan.get_short_channel_id() {
 +                                                      short_to_id.remove(&short_id);
 +                                              }
 +                                              failed_channels.push(chan.force_shutdown());
 +                                              if let Ok(update) = self.get_channel_update(&chan) {
 +                                                      pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
 +                                                              msg: update
 +                                                      });
 +                                              }
 +                                              false
 +                                      } else {
 +                                              true
 +                                      }
 +                              });
 +                      } else {
 +                              log_debug!(self, "Marking channels with {} disconnected and generating channel_updates", log_pubkey!(their_node_id));
 +                              channel_state.by_id.retain(|_, chan| {
 +                                      if chan.get_their_node_id() == *their_node_id {
 +                                              //TODO: mark channel disabled (and maybe announce such after a timeout).
 +                                              let failed_adds = chan.remove_uncommitted_htlcs_and_mark_paused();
 +                                              if !failed_adds.is_empty() {
 +                                                      let chan_update = self.get_channel_update(&chan).map(|u| u.encode_with_len()).unwrap(); // Cannot add/recv HTLCs before we have a short_id so unwrap is safe
 +                                                      failed_payments.push((chan_update, failed_adds));
 +                                              }
 +                                              if chan.is_shutdown() {
 +                                                      if let Some(short_id) = chan.get_short_channel_id() {
 +                                                              short_to_id.remove(&short_id);
 +                                                      }
 +                                                      return false;
 +                                              }
 +                                      }
 +                                      true
 +                              })
 +                      }
 +                      pending_msg_events.retain(|msg| {
 +                              match msg {
 +                                      &events::MessageSendEvent::SendAcceptChannel { ref node_id, .. } => node_id != their_node_id,
 +                                      &events::MessageSendEvent::SendOpenChannel { ref node_id, .. } => node_id != their_node_id,
 +                                      &events::MessageSendEvent::SendFundingCreated { ref node_id, .. } => node_id != their_node_id,
 +                                      &events::MessageSendEvent::SendFundingSigned { ref node_id, .. } => node_id != their_node_id,
 +                                      &events::MessageSendEvent::SendFundingLocked { ref node_id, .. } => node_id != their_node_id,
 +                                      &events::MessageSendEvent::SendAnnouncementSignatures { ref node_id, .. } => node_id != their_node_id,
 +                                      &events::MessageSendEvent::UpdateHTLCs { ref node_id, .. } => node_id != their_node_id,
 +                                      &events::MessageSendEvent::SendRevokeAndACK { ref node_id, .. } => node_id != their_node_id,
 +                                      &events::MessageSendEvent::SendClosingSigned { ref node_id, .. } => node_id != their_node_id,
 +                                      &events::MessageSendEvent::SendShutdown { ref node_id, .. } => node_id != their_node_id,
 +                                      &events::MessageSendEvent::SendChannelReestablish { ref node_id, .. } => node_id != their_node_id,
 +                                      &events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true,
 +                                      &events::MessageSendEvent::BroadcastChannelUpdate { .. } => true,
 +                                      &events::MessageSendEvent::HandleError { ref node_id, .. } => node_id != their_node_id,
 +                                      &events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => true,
 +                              }
 +                      });
 +              }
 +              for failure in failed_channels.drain(..) {
 +                      self.finish_force_close_channel(failure);
 +              }
 +              for (chan_update, mut htlc_sources) in failed_payments {
 +                      for (htlc_source, payment_hash) in htlc_sources.drain(..) {
 +                              self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x1000 | 7, data: chan_update.clone() });
 +                      }
 +              }
 +      }
 +
 +      fn peer_connected(&self, their_node_id: &PublicKey) {
 +              log_debug!(self, "Generating channel_reestablish events for {}", log_pubkey!(their_node_id));
 +
 +              let _ = self.total_consistency_lock.read().unwrap();
 +              let mut channel_state_lock = self.channel_state.lock().unwrap();
 +              let channel_state = channel_state_lock.borrow_parts();
 +              let pending_msg_events = channel_state.pending_msg_events;
 +              channel_state.by_id.retain(|_, chan| {
 +                      if chan.get_their_node_id() == *their_node_id {
 +                              if !chan.have_received_message() {
 +                                      // If we created this (outbound) channel while we were disconnected from the
 +                                      // peer we probably failed to send the open_channel message, which is now
 +                                      // lost. We can't have had anything pending related to this channel, so we just
 +                                      // drop it.
 +                                      false
 +                              } else {
 +                                      pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
 +                                              node_id: chan.get_their_node_id(),
 +                                              msg: chan.get_channel_reestablish(),
 +                                      });
 +                                      true
 +                              }
 +                      } else { true }
 +              });
 +              //TODO: Also re-broadcast announcement_signatures
 +      }
 +
 +      fn handle_error(&self, their_node_id: &PublicKey, msg: &msgs::ErrorMessage) {
 +              let _ = self.total_consistency_lock.read().unwrap();
 +
 +              if msg.channel_id == [0; 32] {
 +                      for chan in self.list_channels() {
 +                              if chan.remote_network_id == *their_node_id {
 +                                      self.force_close_channel(&chan.channel_id);
 +                              }
 +                      }
 +              } else {
 +                      self.force_close_channel(&msg.channel_id);
 +              }
 +      }
 +}
 +
 +const SERIALIZATION_VERSION: u8 = 1;
 +const MIN_SERIALIZATION_VERSION: u8 = 1;
 +
 +impl Writeable for PendingForwardHTLCInfo {
 +      fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
 +              self.onion_packet.write(writer)?;
 +              self.incoming_shared_secret.write(writer)?;
 +              self.payment_hash.write(writer)?;
 +              self.short_channel_id.write(writer)?;
 +              self.amt_to_forward.write(writer)?;
 +              self.outgoing_cltv_value.write(writer)?;
 +              Ok(())
 +      }
 +}
 +
 +impl<R: ::std::io::Read> Readable<R> for PendingForwardHTLCInfo {
 +      fn read(reader: &mut R) -> Result<PendingForwardHTLCInfo, DecodeError> {
 +              Ok(PendingForwardHTLCInfo {
 +                      onion_packet: Readable::read(reader)?,
 +                      incoming_shared_secret: Readable::read(reader)?,
 +                      payment_hash: Readable::read(reader)?,
 +                      short_channel_id: Readable::read(reader)?,
 +                      amt_to_forward: Readable::read(reader)?,
 +                      outgoing_cltv_value: Readable::read(reader)?,
 +              })
 +      }
 +}
 +
 +impl Writeable for HTLCFailureMsg {
 +      fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
 +              match self {
 +                      &HTLCFailureMsg::Relay(ref fail_msg) => {
 +                              0u8.write(writer)?;
 +                              fail_msg.write(writer)?;
 +                      },
 +                      &HTLCFailureMsg::Malformed(ref fail_msg) => {
 +                              1u8.write(writer)?;
 +                              fail_msg.write(writer)?;
 +                      }
 +              }
 +              Ok(())
 +      }
 +}
 +
 +impl<R: ::std::io::Read> Readable<R> for HTLCFailureMsg {
 +      fn read(reader: &mut R) -> Result<HTLCFailureMsg, DecodeError> {
 +              match <u8 as Readable<R>>::read(reader)? {
 +                      0 => Ok(HTLCFailureMsg::Relay(Readable::read(reader)?)),
 +                      1 => Ok(HTLCFailureMsg::Malformed(Readable::read(reader)?)),
 +                      _ => Err(DecodeError::InvalidValue),
 +              }
 +      }
 +}
 +
 +impl Writeable for PendingHTLCStatus {
 +      fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
 +              match self {
 +                      &PendingHTLCStatus::Forward(ref forward_info) => {
 +                              0u8.write(writer)?;
 +                              forward_info.write(writer)?;
 +                      },
 +                      &PendingHTLCStatus::Fail(ref fail_msg) => {
 +                              1u8.write(writer)?;
 +                              fail_msg.write(writer)?;
 +                      }
 +              }
 +              Ok(())
 +      }
 +}
 +
 +impl<R: ::std::io::Read> Readable<R> for PendingHTLCStatus {
 +      fn read(reader: &mut R) -> Result<PendingHTLCStatus, DecodeError> {
 +              match <u8 as Readable<R>>::read(reader)? {
 +                      0 => Ok(PendingHTLCStatus::Forward(Readable::read(reader)?)),
 +                      1 => Ok(PendingHTLCStatus::Fail(Readable::read(reader)?)),
 +                      _ => Err(DecodeError::InvalidValue),
 +              }
 +      }
 +}
 +
 +impl_writeable!(HTLCPreviousHopData, 0, {
 +      short_channel_id,
 +      htlc_id,
 +      incoming_packet_shared_secret
 +});
 +
 +impl Writeable for HTLCSource {
 +      fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
 +              match self {
 +                      &HTLCSource::PreviousHopData(ref hop_data) => {
 +                              0u8.write(writer)?;
 +                              hop_data.write(writer)?;
 +                      },
 +                      &HTLCSource::OutboundRoute { ref route, ref session_priv, ref first_hop_htlc_msat } => {
 +                              1u8.write(writer)?;
 +                              route.write(writer)?;
 +                              session_priv.write(writer)?;
 +                              first_hop_htlc_msat.write(writer)?;
 +                      }
 +              }
 +              Ok(())
 +      }
 +}
 +
 +impl<R: ::std::io::Read> Readable<R> for HTLCSource {
 +      fn read(reader: &mut R) -> Result<HTLCSource, DecodeError> {
 +              match <u8 as Readable<R>>::read(reader)? {
 +                      0 => Ok(HTLCSource::PreviousHopData(Readable::read(reader)?)),
 +                      1 => Ok(HTLCSource::OutboundRoute {
 +                              route: Readable::read(reader)?,
 +                              session_priv: Readable::read(reader)?,
 +                              first_hop_htlc_msat: Readable::read(reader)?,
 +                      }),
 +                      _ => Err(DecodeError::InvalidValue),
 +              }
 +      }
 +}
 +
 +impl Writeable for HTLCFailReason {
 +      fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
 +              match self {
 +                      &HTLCFailReason::LightningError { ref err } => {
 +                              0u8.write(writer)?;
 +                              err.write(writer)?;
 +                      },
 +                      &HTLCFailReason::Reason { ref failure_code, ref data } => {
 +                              1u8.write(writer)?;
 +                              failure_code.write(writer)?;
 +                              data.write(writer)?;
 +                      }
 +              }
 +              Ok(())
 +      }
 +}
 +
 +impl<R: ::std::io::Read> Readable<R> for HTLCFailReason {
 +      fn read(reader: &mut R) -> Result<HTLCFailReason, DecodeError> {
 +              match <u8 as Readable<R>>::read(reader)? {
 +                      0 => Ok(HTLCFailReason::LightningError { err: Readable::read(reader)? }),
 +                      1 => Ok(HTLCFailReason::Reason {
 +                              failure_code: Readable::read(reader)?,
 +                              data: Readable::read(reader)?,
 +                      }),
 +                      _ => Err(DecodeError::InvalidValue),
 +              }
 +      }
 +}
 +
 +impl Writeable for HTLCForwardInfo {
 +      fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
 +              match self {
 +                      &HTLCForwardInfo::AddHTLC { ref prev_short_channel_id, ref prev_htlc_id, ref forward_info } => {
 +                              0u8.write(writer)?;
 +                              prev_short_channel_id.write(writer)?;
 +                              prev_htlc_id.write(writer)?;
 +                              forward_info.write(writer)?;
 +                      },
 +                      &HTLCForwardInfo::FailHTLC { ref htlc_id, ref err_packet } => {
 +                              1u8.write(writer)?;
 +                              htlc_id.write(writer)?;
 +                              err_packet.write(writer)?;
 +                      },
 +              }
 +              Ok(())
 +      }
 +}
 +
 +impl<R: ::std::io::Read> Readable<R> for HTLCForwardInfo {
 +      fn read(reader: &mut R) -> Result<HTLCForwardInfo, DecodeError> {
 +              match <u8 as Readable<R>>::read(reader)? {
 +                      0 => Ok(HTLCForwardInfo::AddHTLC {
 +                              prev_short_channel_id: Readable::read(reader)?,
 +                              prev_htlc_id: Readable::read(reader)?,
 +                              forward_info: Readable::read(reader)?,
 +                      }),
 +                      1 => Ok(HTLCForwardInfo::FailHTLC {
 +                              htlc_id: Readable::read(reader)?,
 +                              err_packet: Readable::read(reader)?,
 +                      }),
 +                      _ => Err(DecodeError::InvalidValue),
 +              }
 +      }
 +}
 +
 +impl Writeable for ChannelManager {
 +      fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
 +              let _ = self.total_consistency_lock.write().unwrap();
 +
 +              writer.write_all(&[SERIALIZATION_VERSION; 1])?;
 +              writer.write_all(&[MIN_SERIALIZATION_VERSION; 1])?;
 +
 +              self.genesis_hash.write(writer)?;
 +              (self.latest_block_height.load(Ordering::Acquire) as u32).write(writer)?;
 +              self.last_block_hash.lock().unwrap().write(writer)?;
 +
 +              let channel_state = self.channel_state.lock().unwrap();
 +              let mut unfunded_channels = 0;
 +              for (_, channel) in channel_state.by_id.iter() {
 +                      if !channel.is_funding_initiated() {
 +                              unfunded_channels += 1;
 +                      }
 +              }
 +              ((channel_state.by_id.len() - unfunded_channels) as u64).write(writer)?;
 +              for (_, channel) in channel_state.by_id.iter() {
 +                      if channel.is_funding_initiated() {
 +                              channel.write(writer)?;
 +                      }
 +              }
 +
 +              (channel_state.forward_htlcs.len() as u64).write(writer)?;
 +              for (short_channel_id, pending_forwards) in channel_state.forward_htlcs.iter() {
 +                      short_channel_id.write(writer)?;
 +                      (pending_forwards.len() as u64).write(writer)?;
 +                      for forward in pending_forwards {
 +                              forward.write(writer)?;
 +                      }
 +              }
 +
 +              (channel_state.claimable_htlcs.len() as u64).write(writer)?;
 +              for (payment_hash, previous_hops) in channel_state.claimable_htlcs.iter() {
 +                      payment_hash.write(writer)?;
 +                      (previous_hops.len() as u64).write(writer)?;
 +                      for &(recvd_amt, ref previous_hop) in previous_hops.iter() {
 +                              recvd_amt.write(writer)?;
 +                              previous_hop.write(writer)?;
 +                      }
 +              }
 +
 +              Ok(())
 +      }
 +}
 +
 +/// Arguments for the creation of a ChannelManager that are not deserialized.
 +///
 +/// At a high-level, the process for deserializing a ChannelManager and resuming normal operation
 +/// is:
 +/// 1) Deserialize all stored ChannelMonitors.
 +/// 2) Deserialize the ChannelManager by filling in this struct and calling <(Sha256dHash,
 +///    ChannelManager)>::read(reader, args).
 +///    This may result in closing some Channels if the ChannelMonitor is newer than the stored
 +///    ChannelManager state to ensure no loss of funds. Thus, transactions may be broadcasted.
 +/// 3) Register all relevant ChannelMonitor outpoints with your chain watch mechanism using
 +///    ChannelMonitor::get_monitored_outpoints and ChannelMonitor::get_funding_txo().
 +/// 4) Reconnect blocks on your ChannelMonitors.
 +/// 5) Move the ChannelMonitors into your local ManyChannelMonitor.
 +/// 6) Disconnect/connect blocks on the ChannelManager.
 +/// 7) Register the new ChannelManager with your ChainWatchInterface (this does not happen
 +///    automatically as it does in ChannelManager::new()).
 +pub struct ChannelManagerReadArgs<'a> {
 +      /// The keys provider which will give us relevant keys. Some keys will be loaded during
 +      /// deserialization.
 +      pub keys_manager: Arc<KeysInterface>,
 +
 +      /// The fee_estimator for use in the ChannelManager in the future.
 +      ///
 +      /// No calls to the FeeEstimator will be made during deserialization.
 +      pub fee_estimator: Arc<FeeEstimator>,
 +      /// The ManyChannelMonitor for use in the ChannelManager in the future.
 +      ///
 +      /// No calls to the ManyChannelMonitor will be made during deserialization. It is assumed that
 +      /// you have deserialized ChannelMonitors separately and will add them to your
 +      /// ManyChannelMonitor after deserializing this ChannelManager.
 +      pub monitor: Arc<ManyChannelMonitor>,
 +      /// The ChainWatchInterface for use in the ChannelManager in the future.
 +      ///
 +      /// No calls to the ChainWatchInterface will be made during deserialization.
 +      pub chain_monitor: Arc<ChainWatchInterface>,
 +      /// The BroadcasterInterface which will be used in the ChannelManager in the future and may be
 +      /// used to broadcast the latest local commitment transactions of channels which must be
 +      /// force-closed during deserialization.
 +      pub tx_broadcaster: Arc<BroadcasterInterface>,
 +      /// The Logger for use in the ChannelManager and which may be used to log information during
 +      /// deserialization.
 +      pub logger: Arc<Logger>,
 +      /// Default settings used for new channels. Any existing channels will continue to use the
 +      /// runtime settings which were stored when the ChannelManager was serialized.
 +      pub default_config: UserConfig,
 +
 +      /// A map from channel funding outpoints to ChannelMonitors for those channels (ie
 +      /// value.get_funding_txo() should be the key).
 +      ///
 +      /// If a monitor is inconsistent with the channel state during deserialization the channel will
 +      /// be force-closed using the data in the ChannelMonitor and the channel will be dropped. This
 +      /// is true for missing channels as well. If there is a monitor missing for which we find
 +      /// channel data Err(DecodeError::InvalidValue) will be returned.
 +      ///
 +      /// In such cases the latest local transactions will be sent to the tx_broadcaster included in
 +      /// this struct.
 +      pub channel_monitors: &'a HashMap<OutPoint, &'a ChannelMonitor>,
 +}
 +
 +impl<'a, R : ::std::io::Read> ReadableArgs<R, ChannelManagerReadArgs<'a>> for (Sha256dHash, ChannelManager) {
 +      fn read(reader: &mut R, args: ChannelManagerReadArgs<'a>) -> Result<Self, DecodeError> {
 +              let _ver: u8 = Readable::read(reader)?;
 +              let min_ver: u8 = Readable::read(reader)?;
 +              if min_ver > SERIALIZATION_VERSION {
 +                      return Err(DecodeError::UnknownVersion);
 +              }
 +
 +              let genesis_hash: Sha256dHash = Readable::read(reader)?;
 +              let latest_block_height: u32 = Readable::read(reader)?;
 +              let last_block_hash: Sha256dHash = Readable::read(reader)?;
 +
 +              let mut closed_channels = Vec::new();
 +
 +              let channel_count: u64 = Readable::read(reader)?;
 +              let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128));
 +              let mut by_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
 +              let mut short_to_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
 +              for _ in 0..channel_count {
 +                      let mut channel: Channel = ReadableArgs::read(reader, args.logger.clone())?;
 +                      if channel.last_block_connected != last_block_hash {
 +                              return Err(DecodeError::InvalidValue);
 +                      }
 +
 +                      let funding_txo = channel.channel_monitor().get_funding_txo().ok_or(DecodeError::InvalidValue)?;
 +                      funding_txo_set.insert(funding_txo.clone());
 +                      if let Some(monitor) = args.channel_monitors.get(&funding_txo) {
 +                              if channel.get_cur_local_commitment_transaction_number() != monitor.get_cur_local_commitment_number() ||
 +                                              channel.get_revoked_remote_commitment_transaction_number() != monitor.get_min_seen_secret() ||
 +                                              channel.get_cur_remote_commitment_transaction_number() != monitor.get_cur_remote_commitment_number() {
 +                                      let mut force_close_res = channel.force_shutdown();
 +                                      force_close_res.0 = monitor.get_latest_local_commitment_txn();
 +                                      closed_channels.push(force_close_res);
 +                              } else {
 +                                      if let Some(short_channel_id) = channel.get_short_channel_id() {
 +                                              short_to_id.insert(short_channel_id, channel.channel_id());
 +                                      }
 +                                      by_id.insert(channel.channel_id(), channel);
 +                              }
 +                      } else {
 +                              return Err(DecodeError::InvalidValue);
 +                      }
 +              }
 +
 +              for (ref funding_txo, ref monitor) in args.channel_monitors.iter() {
 +                      if !funding_txo_set.contains(funding_txo) {
 +                              closed_channels.push((monitor.get_latest_local_commitment_txn(), Vec::new()));
 +                      }
 +              }
 +
 +              let forward_htlcs_count: u64 = Readable::read(reader)?;
 +              let mut forward_htlcs = HashMap::with_capacity(cmp::min(forward_htlcs_count as usize, 128));
 +              for _ in 0..forward_htlcs_count {
 +                      let short_channel_id = Readable::read(reader)?;
 +                      let pending_forwards_count: u64 = Readable::read(reader)?;
 +                      let mut pending_forwards = Vec::with_capacity(cmp::min(pending_forwards_count as usize, 128));
 +                      for _ in 0..pending_forwards_count {
 +                              pending_forwards.push(Readable::read(reader)?);
 +                      }
 +                      forward_htlcs.insert(short_channel_id, pending_forwards);
 +              }
 +
 +              let claimable_htlcs_count: u64 = Readable::read(reader)?;
 +              let mut claimable_htlcs = HashMap::with_capacity(cmp::min(claimable_htlcs_count as usize, 128));
 +              for _ in 0..claimable_htlcs_count {
 +                      let payment_hash = Readable::read(reader)?;
 +                      let previous_hops_len: u64 = Readable::read(reader)?;
 +                      let mut previous_hops = Vec::with_capacity(cmp::min(previous_hops_len as usize, 2));
 +                      for _ in 0..previous_hops_len {
 +                              previous_hops.push((Readable::read(reader)?, Readable::read(reader)?));
 +                      }
 +                      claimable_htlcs.insert(payment_hash, previous_hops);
 +              }
 +
 +              let channel_manager = ChannelManager {
 +                      genesis_hash,
 +                      fee_estimator: args.fee_estimator,
 +                      monitor: args.monitor,
 +                      chain_monitor: args.chain_monitor,
 +                      tx_broadcaster: args.tx_broadcaster,
 +
 +                      latest_block_height: AtomicUsize::new(latest_block_height as usize),
 +                      last_block_hash: Mutex::new(last_block_hash),
 +                      secp_ctx: Secp256k1::new(),
 +
 +                      channel_state: Mutex::new(ChannelHolder {
 +                              by_id,
 +                              short_to_id,
 +                              forward_htlcs,
 +                              claimable_htlcs,
 +                              pending_msg_events: Vec::new(),
 +                      }),
 +                      our_network_key: args.keys_manager.get_node_secret(),
 +
 +                      pending_events: Mutex::new(Vec::new()),
 +                      total_consistency_lock: RwLock::new(()),
 +                      keys_manager: args.keys_manager,
 +                      logger: args.logger,
 +                      default_configuration: args.default_config,
 +              };
 +
 +              for close_res in closed_channels.drain(..) {
 +                      channel_manager.finish_force_close_channel(close_res);
 +                      //TODO: Broadcast channel update for closed channels, but only after we've made a
 +                      //connection or two.
 +              }
 +
 +              Ok((last_block_hash.clone(), channel_manager))
 +      }
 +}
index ba30330719e1fd0bd7a2ad66f943b7b7d44f3b00,0000000000000000000000000000000000000000..7e776227aeef3871294d3726b8606bf6feacc0f7
mode 100644,000000..100644
--- /dev/null
@@@ -1,1227 -1,0 +1,1227 @@@
- pub fn claim_payment_along_route(origin_node: &Node, expected_route: &[&Node], skip_last: bool, our_payment_preimage: PaymentPreimage) {
-       assert!(expected_route.last().unwrap().node.claim_funds(our_payment_preimage));
 +//! A bunch of useful utilities for building networks of nodes and exchanging messages between
 +//! nodes for functional tests.
 +
 +use chain::chaininterface;
 +use chain::transaction::OutPoint;
 +use chain::keysinterface::KeysInterface;
 +use ln::channelmanager::{ChannelManager,RAACommitmentOrder, PaymentPreimage, PaymentHash};
 +use ln::router::{Route, Router};
 +use ln::msgs;
 +use ln::msgs::{ChannelMessageHandler,RoutingMessageHandler, LocalFeatures};
 +use util::test_utils;
 +use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
 +use util::errors::APIError;
 +use util::logger::Logger;
 +use util::config::UserConfig;
 +
 +use bitcoin::util::hash::BitcoinHash;
 +use bitcoin::blockdata::block::BlockHeader;
 +use bitcoin::blockdata::transaction::{Transaction, TxOut};
 +use bitcoin::network::constants::Network;
 +
 +use bitcoin_hashes::sha256::Hash as Sha256;
 +use bitcoin_hashes::sha256d::Hash as Sha256d;
 +use bitcoin_hashes::Hash;
 +
 +use secp256k1::Secp256k1;
 +use secp256k1::key::PublicKey;
 +
 +use rand::{thread_rng,Rng};
 +
 +use std::cell::RefCell;
 +use std::rc::Rc;
 +use std::sync::{Arc, Mutex};
 +use std::mem;
 +
 +pub const CHAN_CONFIRM_DEPTH: u32 = 100;
 +pub fn confirm_transaction(chain: &chaininterface::ChainWatchInterfaceUtil, tx: &Transaction, chan_id: u32) {
 +      assert!(chain.does_match_tx(tx));
 +      let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +      chain.block_connected_checked(&header, 1, &[tx; 1], &[chan_id; 1]);
 +      for i in 2..CHAN_CONFIRM_DEPTH {
 +              header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +              chain.block_connected_checked(&header, i, &[tx; 0], &[0; 0]);
 +      }
 +}
 +
 +pub fn connect_blocks(chain: &chaininterface::ChainWatchInterfaceUtil, depth: u32, height: u32, parent: bool, prev_blockhash: Sha256d) -> Sha256d {
 +      let mut header = BlockHeader { version: 0x2000000, prev_blockhash: if parent { prev_blockhash } else { Default::default() }, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +      chain.block_connected_checked(&header, height + 1, &Vec::new(), &Vec::new());
 +      for i in 2..depth + 1 {
 +              header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +              chain.block_connected_checked(&header, height + i, &Vec::new(), &Vec::new());
 +      }
 +      header.bitcoin_hash()
 +}
 +
 +pub struct Node {
 +      pub chain_monitor: Arc<chaininterface::ChainWatchInterfaceUtil>,
 +      pub tx_broadcaster: Arc<test_utils::TestBroadcaster>,
 +      pub chan_monitor: Arc<test_utils::TestChannelMonitor>,
 +      pub keys_manager: Arc<test_utils::TestKeysInterface>,
 +      pub node: Arc<ChannelManager>,
 +      pub router: Router,
 +      pub node_seed: [u8; 32],
 +      pub network_payment_count: Rc<RefCell<u8>>,
 +      pub network_chan_count: Rc<RefCell<u32>>,
 +}
 +impl Drop for Node {
 +      fn drop(&mut self) {
 +              if !::std::thread::panicking() {
 +                      // Check that we processed all pending events
 +                      assert!(self.node.get_and_clear_pending_msg_events().is_empty());
 +                      assert!(self.node.get_and_clear_pending_events().is_empty());
 +                      assert!(self.chan_monitor.added_monitors.lock().unwrap().is_empty());
 +              }
 +      }
 +}
 +
 +pub fn create_chan_between_nodes(node_a: &Node, node_b: &Node, a_flags: LocalFeatures, b_flags: LocalFeatures) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
 +      create_chan_between_nodes_with_value(node_a, node_b, 100000, 10001, a_flags, b_flags)
 +}
 +
 +pub fn create_chan_between_nodes_with_value(node_a: &Node, node_b: &Node, channel_value: u64, push_msat: u64, a_flags: LocalFeatures, b_flags: LocalFeatures) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
 +      let (funding_locked, channel_id, tx) = create_chan_between_nodes_with_value_a(node_a, node_b, channel_value, push_msat, a_flags, b_flags);
 +      let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(node_a, node_b, &funding_locked);
 +      (announcement, as_update, bs_update, channel_id, tx)
 +}
 +
 +macro_rules! get_revoke_commit_msgs {
 +      ($node: expr, $node_id: expr) => {
 +              {
 +                      let events = $node.node.get_and_clear_pending_msg_events();
 +                      assert_eq!(events.len(), 2);
 +                      (match events[0] {
 +                              MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
 +                                      assert_eq!(*node_id, $node_id);
 +                                      (*msg).clone()
 +                              },
 +                              _ => panic!("Unexpected event"),
 +                      }, match events[1] {
 +                              MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
 +                                      assert_eq!(*node_id, $node_id);
 +                                      assert!(updates.update_add_htlcs.is_empty());
 +                                      assert!(updates.update_fulfill_htlcs.is_empty());
 +                                      assert!(updates.update_fail_htlcs.is_empty());
 +                                      assert!(updates.update_fail_malformed_htlcs.is_empty());
 +                                      assert!(updates.update_fee.is_none());
 +                                      updates.commitment_signed.clone()
 +                              },
 +                              _ => panic!("Unexpected event"),
 +                      })
 +              }
 +      }
 +}
 +
 +macro_rules! get_event_msg {
 +      ($node: expr, $event_type: path, $node_id: expr) => {
 +              {
 +                      let events = $node.node.get_and_clear_pending_msg_events();
 +                      assert_eq!(events.len(), 1);
 +                      match events[0] {
 +                              $event_type { ref node_id, ref msg } => {
 +                                      assert_eq!(*node_id, $node_id);
 +                                      (*msg).clone()
 +                              },
 +                              _ => panic!("Unexpected event"),
 +                      }
 +              }
 +      }
 +}
 +
 +macro_rules! get_htlc_update_msgs {
 +      ($node: expr, $node_id: expr) => {
 +              {
 +                      let events = $node.node.get_and_clear_pending_msg_events();
 +                      assert_eq!(events.len(), 1);
 +                      match events[0] {
 +                              MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
 +                                      assert_eq!(*node_id, $node_id);
 +                                      (*updates).clone()
 +                              },
 +                              _ => panic!("Unexpected event"),
 +                      }
 +              }
 +      }
 +}
 +
 +macro_rules! get_feerate {
 +      ($node: expr, $channel_id: expr) => {
 +              {
 +                      let chan_lock = $node.node.channel_state.lock().unwrap();
 +                      let chan = chan_lock.by_id.get(&$channel_id).unwrap();
 +                      chan.get_feerate()
 +              }
 +      }
 +}
 +
 +pub fn create_funding_transaction(node: &Node, expected_chan_value: u64, expected_user_chan_id: u64) -> ([u8; 32], Transaction, OutPoint) {
 +      let chan_id = *node.network_chan_count.borrow();
 +
 +      let events = node.node.get_and_clear_pending_events();
 +      assert_eq!(events.len(), 1);
 +      match events[0] {
 +              Event::FundingGenerationReady { ref temporary_channel_id, ref channel_value_satoshis, ref output_script, user_channel_id } => {
 +                      assert_eq!(*channel_value_satoshis, expected_chan_value);
 +                      assert_eq!(user_channel_id, expected_user_chan_id);
 +
 +                      let tx = Transaction { version: chan_id as u32, lock_time: 0, input: Vec::new(), output: vec![TxOut {
 +                              value: *channel_value_satoshis, script_pubkey: output_script.clone(),
 +                      }]};
 +                      let funding_outpoint = OutPoint::new(tx.txid(), 0);
 +                      (*temporary_channel_id, tx, funding_outpoint)
 +              },
 +              _ => panic!("Unexpected event"),
 +      }
 +}
 +
 +pub fn create_chan_between_nodes_with_value_init(node_a: &Node, node_b: &Node, channel_value: u64, push_msat: u64, a_flags: LocalFeatures, b_flags: LocalFeatures) -> Transaction {
 +      node_a.node.create_channel(node_b.node.get_our_node_id(), channel_value, push_msat, 42).unwrap();
 +      node_b.node.handle_open_channel(&node_a.node.get_our_node_id(), a_flags, &get_event_msg!(node_a, MessageSendEvent::SendOpenChannel, node_b.node.get_our_node_id())).unwrap();
 +      node_a.node.handle_accept_channel(&node_b.node.get_our_node_id(), b_flags, &get_event_msg!(node_b, MessageSendEvent::SendAcceptChannel, node_a.node.get_our_node_id())).unwrap();
 +
 +      let (temporary_channel_id, tx, funding_output) = create_funding_transaction(node_a, channel_value, 42);
 +
 +      {
 +              node_a.node.funding_transaction_generated(&temporary_channel_id, funding_output);
 +              let mut added_monitors = node_a.chan_monitor.added_monitors.lock().unwrap();
 +              assert_eq!(added_monitors.len(), 1);
 +              assert_eq!(added_monitors[0].0, funding_output);
 +              added_monitors.clear();
 +      }
 +
 +      node_b.node.handle_funding_created(&node_a.node.get_our_node_id(), &get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b.node.get_our_node_id())).unwrap();
 +      {
 +              let mut added_monitors = node_b.chan_monitor.added_monitors.lock().unwrap();
 +              assert_eq!(added_monitors.len(), 1);
 +              assert_eq!(added_monitors[0].0, funding_output);
 +              added_monitors.clear();
 +      }
 +
 +      node_a.node.handle_funding_signed(&node_b.node.get_our_node_id(), &get_event_msg!(node_b, MessageSendEvent::SendFundingSigned, node_a.node.get_our_node_id())).unwrap();
 +      {
 +              let mut added_monitors = node_a.chan_monitor.added_monitors.lock().unwrap();
 +              assert_eq!(added_monitors.len(), 1);
 +              assert_eq!(added_monitors[0].0, funding_output);
 +              added_monitors.clear();
 +      }
 +
 +      let events_4 = node_a.node.get_and_clear_pending_events();
 +      assert_eq!(events_4.len(), 1);
 +      match events_4[0] {
 +              Event::FundingBroadcastSafe { ref funding_txo, user_channel_id } => {
 +                      assert_eq!(user_channel_id, 42);
 +                      assert_eq!(*funding_txo, funding_output);
 +              },
 +              _ => panic!("Unexpected event"),
 +      };
 +
 +      tx
 +}
 +
 +pub fn create_chan_between_nodes_with_value_confirm_first(node_recv: &Node, node_conf: &Node, tx: &Transaction) {
 +      confirm_transaction(&node_conf.chain_monitor, &tx, tx.version);
 +      node_recv.node.handle_funding_locked(&node_conf.node.get_our_node_id(), &get_event_msg!(node_conf, MessageSendEvent::SendFundingLocked, node_recv.node.get_our_node_id())).unwrap();
 +}
 +
 +pub fn create_chan_between_nodes_with_value_confirm_second(node_recv: &Node, node_conf: &Node) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32]) {
 +      let channel_id;
 +      let events_6 = node_conf.node.get_and_clear_pending_msg_events();
 +      assert_eq!(events_6.len(), 2);
 +      ((match events_6[0] {
 +              MessageSendEvent::SendFundingLocked { ref node_id, ref msg } => {
 +                      channel_id = msg.channel_id.clone();
 +                      assert_eq!(*node_id, node_recv.node.get_our_node_id());
 +                      msg.clone()
 +              },
 +              _ => panic!("Unexpected event"),
 +      }, match events_6[1] {
 +              MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } => {
 +                      assert_eq!(*node_id, node_recv.node.get_our_node_id());
 +                      msg.clone()
 +              },
 +              _ => panic!("Unexpected event"),
 +      }), channel_id)
 +}
 +
 +pub fn create_chan_between_nodes_with_value_confirm(node_a: &Node, node_b: &Node, tx: &Transaction) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32]) {
 +      create_chan_between_nodes_with_value_confirm_first(node_a, node_b, tx);
 +      confirm_transaction(&node_a.chain_monitor, &tx, tx.version);
 +      create_chan_between_nodes_with_value_confirm_second(node_b, node_a)
 +}
 +
 +pub fn create_chan_between_nodes_with_value_a(node_a: &Node, node_b: &Node, channel_value: u64, push_msat: u64, a_flags: LocalFeatures, b_flags: LocalFeatures) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32], Transaction) {
 +      let tx = create_chan_between_nodes_with_value_init(node_a, node_b, channel_value, push_msat, a_flags, b_flags);
 +      let (msgs, chan_id) = create_chan_between_nodes_with_value_confirm(node_a, node_b, &tx);
 +      (msgs, chan_id, tx)
 +}
 +
 +pub fn create_chan_between_nodes_with_value_b(node_a: &Node, node_b: &Node, as_funding_msgs: &(msgs::FundingLocked, msgs::AnnouncementSignatures)) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate) {
 +      node_b.node.handle_funding_locked(&node_a.node.get_our_node_id(), &as_funding_msgs.0).unwrap();
 +      let bs_announcement_sigs = get_event_msg!(node_b, MessageSendEvent::SendAnnouncementSignatures, node_a.node.get_our_node_id());
 +      node_b.node.handle_announcement_signatures(&node_a.node.get_our_node_id(), &as_funding_msgs.1).unwrap();
 +
 +      let events_7 = node_b.node.get_and_clear_pending_msg_events();
 +      assert_eq!(events_7.len(), 1);
 +      let (announcement, bs_update) = match events_7[0] {
 +              MessageSendEvent::BroadcastChannelAnnouncement { ref msg, ref update_msg } => {
 +                      (msg, update_msg)
 +              },
 +              _ => panic!("Unexpected event"),
 +      };
 +
 +      node_a.node.handle_announcement_signatures(&node_b.node.get_our_node_id(), &bs_announcement_sigs).unwrap();
 +      let events_8 = node_a.node.get_and_clear_pending_msg_events();
 +      assert_eq!(events_8.len(), 1);
 +      let as_update = match events_8[0] {
 +              MessageSendEvent::BroadcastChannelAnnouncement { ref msg, ref update_msg } => {
 +                      assert!(*announcement == *msg);
 +                      assert_eq!(update_msg.contents.short_channel_id, announcement.contents.short_channel_id);
 +                      assert_eq!(update_msg.contents.short_channel_id, bs_update.contents.short_channel_id);
 +                      update_msg
 +              },
 +              _ => panic!("Unexpected event"),
 +      };
 +
 +      *node_a.network_chan_count.borrow_mut() += 1;
 +
 +      ((*announcement).clone(), (*as_update).clone(), (*bs_update).clone())
 +}
 +
 +pub fn create_announced_chan_between_nodes(nodes: &Vec<Node>, a: usize, b: usize, a_flags: LocalFeatures, b_flags: LocalFeatures) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
 +      create_announced_chan_between_nodes_with_value(nodes, a, b, 100000, 10001, a_flags, b_flags)
 +}
 +
 +pub fn create_announced_chan_between_nodes_with_value(nodes: &Vec<Node>, a: usize, b: usize, channel_value: u64, push_msat: u64, a_flags: LocalFeatures, b_flags: LocalFeatures) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
 +      let chan_announcement = create_chan_between_nodes_with_value(&nodes[a], &nodes[b], channel_value, push_msat, a_flags, b_flags);
 +      for node in nodes {
 +              assert!(node.router.handle_channel_announcement(&chan_announcement.0).unwrap());
 +              node.router.handle_channel_update(&chan_announcement.1).unwrap();
 +              node.router.handle_channel_update(&chan_announcement.2).unwrap();
 +      }
 +      (chan_announcement.1, chan_announcement.2, chan_announcement.3, chan_announcement.4)
 +}
 +
 +macro_rules! check_spends {
 +      ($tx: expr, $spends_tx: expr) => {
 +              {
 +                      $tx.verify(|out_point| {
 +                              if out_point.txid == $spends_tx.txid() {
 +                                      $spends_tx.output.get(out_point.vout as usize).cloned()
 +                              } else {
 +                                      None
 +                              }
 +                      }).unwrap();
 +              }
 +      }
 +}
 +
 +macro_rules! get_closing_signed_broadcast {
 +      ($node: expr, $dest_pubkey: expr) => {
 +              {
 +                      let events = $node.get_and_clear_pending_msg_events();
 +                      assert!(events.len() == 1 || events.len() == 2);
 +                      (match events[events.len() - 1] {
 +                              MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
 +                                      assert_eq!(msg.contents.flags & 2, 2);
 +                                      msg.clone()
 +                              },
 +                              _ => panic!("Unexpected event"),
 +                      }, if events.len() == 2 {
 +                              match events[0] {
 +                                      MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => {
 +                                              assert_eq!(*node_id, $dest_pubkey);
 +                                              Some(msg.clone())
 +                                      },
 +                                      _ => panic!("Unexpected event"),
 +                              }
 +                      } else { None })
 +              }
 +      }
 +}
 +
 +macro_rules! check_closed_broadcast {
 +      ($node: expr) => {{
 +              let events = $node.node.get_and_clear_pending_msg_events();
 +              assert_eq!(events.len(), 1);
 +              match events[0] {
 +                      MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
 +                              assert_eq!(msg.contents.flags & 2, 2);
 +                      },
 +                      _ => panic!("Unexpected event"),
 +              }
 +      }}
 +}
 +
 +pub fn close_channel(outbound_node: &Node, inbound_node: &Node, channel_id: &[u8; 32], funding_tx: Transaction, close_inbound_first: bool) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, Transaction) {
 +      let (node_a, broadcaster_a, struct_a) = if close_inbound_first { (&inbound_node.node, &inbound_node.tx_broadcaster, inbound_node) } else { (&outbound_node.node, &outbound_node.tx_broadcaster, outbound_node) };
 +      let (node_b, broadcaster_b) = if close_inbound_first { (&outbound_node.node, &outbound_node.tx_broadcaster) } else { (&inbound_node.node, &inbound_node.tx_broadcaster) };
 +      let (tx_a, tx_b);
 +
 +      node_a.close_channel(channel_id).unwrap();
 +      node_b.handle_shutdown(&node_a.get_our_node_id(), &get_event_msg!(struct_a, MessageSendEvent::SendShutdown, node_b.get_our_node_id())).unwrap();
 +
 +      let events_1 = node_b.get_and_clear_pending_msg_events();
 +      assert!(events_1.len() >= 1);
 +      let shutdown_b = match events_1[0] {
 +              MessageSendEvent::SendShutdown { ref node_id, ref msg } => {
 +                      assert_eq!(node_id, &node_a.get_our_node_id());
 +                      msg.clone()
 +              },
 +              _ => panic!("Unexpected event"),
 +      };
 +
 +      let closing_signed_b = if !close_inbound_first {
 +              assert_eq!(events_1.len(), 1);
 +              None
 +      } else {
 +              Some(match events_1[1] {
 +                      MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => {
 +                              assert_eq!(node_id, &node_a.get_our_node_id());
 +                              msg.clone()
 +                      },
 +                      _ => panic!("Unexpected event"),
 +              })
 +      };
 +
 +      node_a.handle_shutdown(&node_b.get_our_node_id(), &shutdown_b).unwrap();
 +      let (as_update, bs_update) = if close_inbound_first {
 +              assert!(node_a.get_and_clear_pending_msg_events().is_empty());
 +              node_a.handle_closing_signed(&node_b.get_our_node_id(), &closing_signed_b.unwrap()).unwrap();
 +              assert_eq!(broadcaster_a.txn_broadcasted.lock().unwrap().len(), 1);
 +              tx_a = broadcaster_a.txn_broadcasted.lock().unwrap().remove(0);
 +              let (as_update, closing_signed_a) = get_closing_signed_broadcast!(node_a, node_b.get_our_node_id());
 +
 +              node_b.handle_closing_signed(&node_a.get_our_node_id(), &closing_signed_a.unwrap()).unwrap();
 +              let (bs_update, none_b) = get_closing_signed_broadcast!(node_b, node_a.get_our_node_id());
 +              assert!(none_b.is_none());
 +              assert_eq!(broadcaster_b.txn_broadcasted.lock().unwrap().len(), 1);
 +              tx_b = broadcaster_b.txn_broadcasted.lock().unwrap().remove(0);
 +              (as_update, bs_update)
 +      } else {
 +              let closing_signed_a = get_event_msg!(struct_a, MessageSendEvent::SendClosingSigned, node_b.get_our_node_id());
 +
 +              node_b.handle_closing_signed(&node_a.get_our_node_id(), &closing_signed_a).unwrap();
 +              assert_eq!(broadcaster_b.txn_broadcasted.lock().unwrap().len(), 1);
 +              tx_b = broadcaster_b.txn_broadcasted.lock().unwrap().remove(0);
 +              let (bs_update, closing_signed_b) = get_closing_signed_broadcast!(node_b, node_a.get_our_node_id());
 +
 +              node_a.handle_closing_signed(&node_b.get_our_node_id(), &closing_signed_b.unwrap()).unwrap();
 +              let (as_update, none_a) = get_closing_signed_broadcast!(node_a, node_b.get_our_node_id());
 +              assert!(none_a.is_none());
 +              assert_eq!(broadcaster_a.txn_broadcasted.lock().unwrap().len(), 1);
 +              tx_a = broadcaster_a.txn_broadcasted.lock().unwrap().remove(0);
 +              (as_update, bs_update)
 +      };
 +      assert_eq!(tx_a, tx_b);
 +      check_spends!(tx_a, funding_tx);
 +
 +      (as_update, bs_update, tx_a)
 +}
 +
 +pub struct SendEvent {
 +      pub node_id: PublicKey,
 +      pub msgs: Vec<msgs::UpdateAddHTLC>,
 +      pub commitment_msg: msgs::CommitmentSigned,
 +}
 +impl SendEvent {
 +      pub fn from_commitment_update(node_id: PublicKey, updates: msgs::CommitmentUpdate) -> SendEvent {
 +              assert!(updates.update_fulfill_htlcs.is_empty());
 +              assert!(updates.update_fail_htlcs.is_empty());
 +              assert!(updates.update_fail_malformed_htlcs.is_empty());
 +              assert!(updates.update_fee.is_none());
 +              SendEvent { node_id: node_id, msgs: updates.update_add_htlcs, commitment_msg: updates.commitment_signed }
 +      }
 +
 +      pub fn from_event(event: MessageSendEvent) -> SendEvent {
 +              match event {
 +                      MessageSendEvent::UpdateHTLCs { node_id, updates } => SendEvent::from_commitment_update(node_id, updates),
 +                      _ => panic!("Unexpected event type!"),
 +              }
 +      }
 +
 +      pub fn from_node(node: &Node) -> SendEvent {
 +              let mut events = node.node.get_and_clear_pending_msg_events();
 +              assert_eq!(events.len(), 1);
 +              SendEvent::from_event(events.pop().unwrap())
 +      }
 +}
 +
 +macro_rules! check_added_monitors {
 +      ($node: expr, $count: expr) => {
 +              {
 +                      let mut added_monitors = $node.chan_monitor.added_monitors.lock().unwrap();
 +                      assert_eq!(added_monitors.len(), $count);
 +                      added_monitors.clear();
 +              }
 +      }
 +}
 +
 +macro_rules! commitment_signed_dance {
 +      ($node_a: expr, $node_b: expr, $commitment_signed: expr, $fail_backwards: expr, true /* skip last step */) => {
 +              {
 +                      check_added_monitors!($node_a, 0);
 +                      assert!($node_a.node.get_and_clear_pending_msg_events().is_empty());
 +                      $node_a.node.handle_commitment_signed(&$node_b.node.get_our_node_id(), &$commitment_signed).unwrap();
 +                      check_added_monitors!($node_a, 1);
 +                      commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, false);
 +              }
 +      };
 +      ($node_a: expr, $node_b: expr, (), $fail_backwards: expr, true /* skip last step */, true /* return extra message */, true /* return last RAA */) => {
 +              {
 +                      let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!($node_a, $node_b.node.get_our_node_id());
 +                      check_added_monitors!($node_b, 0);
 +                      assert!($node_b.node.get_and_clear_pending_msg_events().is_empty());
 +                      $node_b.node.handle_revoke_and_ack(&$node_a.node.get_our_node_id(), &as_revoke_and_ack).unwrap();
 +                      assert!($node_b.node.get_and_clear_pending_msg_events().is_empty());
 +                      check_added_monitors!($node_b, 1);
 +                      $node_b.node.handle_commitment_signed(&$node_a.node.get_our_node_id(), &as_commitment_signed).unwrap();
 +                      let (bs_revoke_and_ack, extra_msg_option) = {
 +                              let events = $node_b.node.get_and_clear_pending_msg_events();
 +                              assert!(events.len() <= 2);
 +                              (match events[0] {
 +                                      MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
 +                                              assert_eq!(*node_id, $node_a.node.get_our_node_id());
 +                                              (*msg).clone()
 +                                      },
 +                                      _ => panic!("Unexpected event"),
 +                              }, events.get(1).map(|e| e.clone()))
 +                      };
 +                      check_added_monitors!($node_b, 1);
 +                      if $fail_backwards {
 +                              assert!($node_a.node.get_and_clear_pending_events().is_empty());
 +                              assert!($node_a.node.get_and_clear_pending_msg_events().is_empty());
 +                      }
 +                      (extra_msg_option, bs_revoke_and_ack)
 +              }
 +      };
 +      ($node_a: expr, $node_b: expr, $commitment_signed: expr, $fail_backwards: expr, true /* skip last step */, false /* return extra message */, true /* return last RAA */) => {
 +              {
 +                      check_added_monitors!($node_a, 0);
 +                      assert!($node_a.node.get_and_clear_pending_msg_events().is_empty());
 +                      $node_a.node.handle_commitment_signed(&$node_b.node.get_our_node_id(), &$commitment_signed).unwrap();
 +                      check_added_monitors!($node_a, 1);
 +                      let (extra_msg_option, bs_revoke_and_ack) = commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, true, true);
 +                      assert!(extra_msg_option.is_none());
 +                      bs_revoke_and_ack
 +              }
 +      };
 +      ($node_a: expr, $node_b: expr, (), $fail_backwards: expr, true /* skip last step */, true /* return extra message */) => {
 +              {
 +                      let (extra_msg_option, bs_revoke_and_ack) = commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, true, true);
 +                      $node_a.node.handle_revoke_and_ack(&$node_b.node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
 +                      check_added_monitors!($node_a, 1);
 +                      extra_msg_option
 +              }
 +      };
 +      ($node_a: expr, $node_b: expr, (), $fail_backwards: expr, true /* skip last step */, false /* no extra message */) => {
 +              {
 +                      assert!(commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, true).is_none());
 +              }
 +      };
 +      ($node_a: expr, $node_b: expr, $commitment_signed: expr, $fail_backwards: expr) => {
 +              {
 +                      commitment_signed_dance!($node_a, $node_b, $commitment_signed, $fail_backwards, true);
 +                      if $fail_backwards {
 +                              expect_pending_htlcs_forwardable!($node_a);
 +                              check_added_monitors!($node_a, 1);
 +
 +                              let channel_state = $node_a.node.channel_state.lock().unwrap();
 +                              assert_eq!(channel_state.pending_msg_events.len(), 1);
 +                              if let MessageSendEvent::UpdateHTLCs { ref node_id, .. } = channel_state.pending_msg_events[0] {
 +                                      assert_ne!(*node_id, $node_b.node.get_our_node_id());
 +                              } else { panic!("Unexpected event"); }
 +                      } else {
 +                              assert!($node_a.node.get_and_clear_pending_msg_events().is_empty());
 +                      }
 +              }
 +      }
 +}
 +
 +macro_rules! get_payment_preimage_hash {
 +      ($node: expr) => {
 +              {
 +                      let payment_preimage = PaymentPreimage([*$node.network_payment_count.borrow(); 32]);
 +                      *$node.network_payment_count.borrow_mut() += 1;
 +                      let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner());
 +                      (payment_preimage, payment_hash)
 +              }
 +      }
 +}
 +
 +macro_rules! expect_pending_htlcs_forwardable {
 +      ($node: expr) => {{
 +              let events = $node.node.get_and_clear_pending_events();
 +              assert_eq!(events.len(), 1);
 +              match events[0] {
 +                      Event::PendingHTLCsForwardable { .. } => { },
 +                      _ => panic!("Unexpected event"),
 +              };
 +              $node.node.process_pending_htlc_forwards();
 +      }}
 +}
 +
 +macro_rules! expect_payment_received {
 +      ($node: expr, $expected_payment_hash: expr, $expected_recv_value: expr) => {
 +              let events = $node.node.get_and_clear_pending_events();
 +              assert_eq!(events.len(), 1);
 +              match events[0] {
 +                      Event::PaymentReceived { ref payment_hash, amt } => {
 +                              assert_eq!($expected_payment_hash, *payment_hash);
 +                              assert_eq!($expected_recv_value, amt);
 +                      },
 +                      _ => panic!("Unexpected event"),
 +              }
 +      }
 +}
 +
 +macro_rules! expect_payment_sent {
 +      ($node: expr, $expected_payment_preimage: expr) => {
 +              let events = $node.node.get_and_clear_pending_events();
 +              assert_eq!(events.len(), 1);
 +              match events[0] {
 +                      Event::PaymentSent { ref payment_preimage } => {
 +                              assert_eq!($expected_payment_preimage, *payment_preimage);
 +                      },
 +                      _ => panic!("Unexpected event"),
 +              }
 +      }
 +}
 +
 +pub fn send_along_route_with_hash(origin_node: &Node, route: Route, expected_route: &[&Node], recv_value: u64, our_payment_hash: PaymentHash) {
 +      let mut payment_event = {
 +              origin_node.node.send_payment(route, our_payment_hash).unwrap();
 +              check_added_monitors!(origin_node, 1);
 +
 +              let mut events = origin_node.node.get_and_clear_pending_msg_events();
 +              assert_eq!(events.len(), 1);
 +              SendEvent::from_event(events.remove(0))
 +      };
 +      let mut prev_node = origin_node;
 +
 +      for (idx, &node) in expected_route.iter().enumerate() {
 +              assert_eq!(node.node.get_our_node_id(), payment_event.node_id);
 +
 +              node.node.handle_update_add_htlc(&prev_node.node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
 +              check_added_monitors!(node, 0);
 +              commitment_signed_dance!(node, prev_node, payment_event.commitment_msg, false);
 +
 +              expect_pending_htlcs_forwardable!(node);
 +
 +              if idx == expected_route.len() - 1 {
 +                      let events_2 = node.node.get_and_clear_pending_events();
 +                      assert_eq!(events_2.len(), 1);
 +                      match events_2[0] {
 +                              Event::PaymentReceived { ref payment_hash, amt } => {
 +                                      assert_eq!(our_payment_hash, *payment_hash);
 +                                      assert_eq!(amt, recv_value);
 +                              },
 +                              _ => panic!("Unexpected event"),
 +                      }
 +              } else {
 +                      let mut events_2 = node.node.get_and_clear_pending_msg_events();
 +                      assert_eq!(events_2.len(), 1);
 +                      check_added_monitors!(node, 1);
 +                      payment_event = SendEvent::from_event(events_2.remove(0));
 +                      assert_eq!(payment_event.msgs.len(), 1);
 +              }
 +
 +              prev_node = node;
 +      }
 +}
 +
 +pub fn send_along_route(origin_node: &Node, route: Route, expected_route: &[&Node], recv_value: u64) -> (PaymentPreimage, PaymentHash) {
 +      let (our_payment_preimage, our_payment_hash) = get_payment_preimage_hash!(origin_node);
 +      send_along_route_with_hash(origin_node, route, expected_route, recv_value, our_payment_hash);
 +      (our_payment_preimage, our_payment_hash)
 +}
 +
- pub fn claim_payment(origin_node: &Node, expected_route: &[&Node], our_payment_preimage: PaymentPreimage) {
-       claim_payment_along_route(origin_node, expected_route, false, our_payment_preimage);
++pub fn claim_payment_along_route(origin_node: &Node, expected_route: &[&Node], skip_last: bool, our_payment_preimage: PaymentPreimage, expected_amount: u64) {
++      assert!(expected_route.last().unwrap().node.claim_funds(our_payment_preimage, expected_amount));
 +      check_added_monitors!(expected_route.last().unwrap(), 1);
 +
 +      let mut next_msgs: Option<(msgs::UpdateFulfillHTLC, msgs::CommitmentSigned)> = None;
 +      let mut expected_next_node = expected_route.last().unwrap().node.get_our_node_id();
 +      macro_rules! get_next_msgs {
 +              ($node: expr) => {
 +                      {
 +                              let events = $node.node.get_and_clear_pending_msg_events();
 +                              assert_eq!(events.len(), 1);
 +                              match events[0] {
 +                                      MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
 +                                              assert!(update_add_htlcs.is_empty());
 +                                              assert_eq!(update_fulfill_htlcs.len(), 1);
 +                                              assert!(update_fail_htlcs.is_empty());
 +                                              assert!(update_fail_malformed_htlcs.is_empty());
 +                                              assert!(update_fee.is_none());
 +                                              expected_next_node = node_id.clone();
 +                                              Some((update_fulfill_htlcs[0].clone(), commitment_signed.clone()))
 +                                      },
 +                                      _ => panic!("Unexpected event"),
 +                              }
 +                      }
 +              }
 +      }
 +
 +      macro_rules! last_update_fulfill_dance {
 +              ($node: expr, $prev_node: expr) => {
 +                      {
 +                              $node.node.handle_update_fulfill_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0).unwrap();
 +                              check_added_monitors!($node, 0);
 +                              assert!($node.node.get_and_clear_pending_msg_events().is_empty());
 +                              commitment_signed_dance!($node, $prev_node, next_msgs.as_ref().unwrap().1, false);
 +                      }
 +              }
 +      }
 +      macro_rules! mid_update_fulfill_dance {
 +              ($node: expr, $prev_node: expr, $new_msgs: expr) => {
 +                      {
 +                              $node.node.handle_update_fulfill_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0).unwrap();
 +                              check_added_monitors!($node, 1);
 +                              let new_next_msgs = if $new_msgs {
 +                                      get_next_msgs!($node)
 +                              } else {
 +                                      assert!($node.node.get_and_clear_pending_msg_events().is_empty());
 +                                      None
 +                              };
 +                              commitment_signed_dance!($node, $prev_node, next_msgs.as_ref().unwrap().1, false);
 +                              next_msgs = new_next_msgs;
 +                      }
 +              }
 +      }
 +
 +      let mut prev_node = expected_route.last().unwrap();
 +      for (idx, node) in expected_route.iter().rev().enumerate() {
 +              assert_eq!(expected_next_node, node.node.get_our_node_id());
 +              let update_next_msgs = !skip_last || idx != expected_route.len() - 1;
 +              if next_msgs.is_some() {
 +                      mid_update_fulfill_dance!(node, prev_node, update_next_msgs);
 +              } else if update_next_msgs {
 +                      next_msgs = get_next_msgs!(node);
 +              } else {
 +                      assert!(node.node.get_and_clear_pending_msg_events().is_empty());
 +              }
 +              if !skip_last && idx == expected_route.len() - 1 {
 +                      assert_eq!(expected_next_node, origin_node.node.get_our_node_id());
 +              }
 +
 +              prev_node = node;
 +      }
 +
 +      if !skip_last {
 +              last_update_fulfill_dance!(origin_node, expected_route.first().unwrap());
 +              expect_payment_sent!(origin_node, our_payment_preimage);
 +      }
 +}
 +
- pub fn send_payment(origin: &Node, expected_route: &[&Node], recv_value: u64) {
++pub fn claim_payment(origin_node: &Node, expected_route: &[&Node], our_payment_preimage: PaymentPreimage, expected_amount: u64) {
++      claim_payment_along_route(origin_node, expected_route, false, our_payment_preimage, expected_amount);
 +}
 +
 +pub const TEST_FINAL_CLTV: u32 = 32;
 +
 +pub fn route_payment(origin_node: &Node, expected_route: &[&Node], recv_value: u64) -> (PaymentPreimage, PaymentHash) {
 +      let route = origin_node.router.get_route(&expected_route.last().unwrap().node.get_our_node_id(), None, &Vec::new(), recv_value, TEST_FINAL_CLTV).unwrap();
 +      assert_eq!(route.hops.len(), expected_route.len());
 +      for (node, hop) in expected_route.iter().zip(route.hops.iter()) {
 +              assert_eq!(hop.pubkey, node.node.get_our_node_id());
 +      }
 +
 +      send_along_route(origin_node, route, expected_route, recv_value)
 +}
 +
 +pub fn route_over_limit(origin_node: &Node, expected_route: &[&Node], recv_value: u64) {
 +      let route = origin_node.router.get_route(&expected_route.last().unwrap().node.get_our_node_id(), None, &Vec::new(), recv_value, TEST_FINAL_CLTV).unwrap();
 +      assert_eq!(route.hops.len(), expected_route.len());
 +      for (node, hop) in expected_route.iter().zip(route.hops.iter()) {
 +              assert_eq!(hop.pubkey, node.node.get_our_node_id());
 +      }
 +
 +      let (_, our_payment_hash) = get_payment_preimage_hash!(origin_node);
 +
 +      let err = origin_node.node.send_payment(route, our_payment_hash).err().unwrap();
 +      match err {
 +              APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over the max HTLC value in flight our peer will accept"),
 +              _ => panic!("Unknown error variants"),
 +      };
 +}
 +
-       claim_payment(&origin, expected_route, our_payment_preimage);
++pub fn send_payment(origin: &Node, expected_route: &[&Node], recv_value: u64, expected_value: u64) {
 +      let our_payment_preimage = route_payment(&origin, expected_route, recv_value).0;
++      claim_payment(&origin, expected_route, our_payment_preimage, expected_value);
 +}
 +
 +pub fn fail_payment_along_route(origin_node: &Node, expected_route: &[&Node], skip_last: bool, our_payment_hash: PaymentHash) {
 +      assert!(expected_route.last().unwrap().node.fail_htlc_backwards(&our_payment_hash));
 +      expect_pending_htlcs_forwardable!(expected_route.last().unwrap());
 +      check_added_monitors!(expected_route.last().unwrap(), 1);
 +
 +      let mut next_msgs: Option<(msgs::UpdateFailHTLC, msgs::CommitmentSigned)> = None;
 +      macro_rules! update_fail_dance {
 +              ($node: expr, $prev_node: expr, $last_node: expr) => {
 +                      {
 +                              $node.node.handle_update_fail_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0).unwrap();
 +                              commitment_signed_dance!($node, $prev_node, next_msgs.as_ref().unwrap().1, !$last_node);
 +                              if skip_last && $last_node {
 +                                      expect_pending_htlcs_forwardable!($node);
 +                              }
 +                      }
 +              }
 +      }
 +
 +      let mut expected_next_node = expected_route.last().unwrap().node.get_our_node_id();
 +      let mut prev_node = expected_route.last().unwrap();
 +      for (idx, node) in expected_route.iter().rev().enumerate() {
 +              assert_eq!(expected_next_node, node.node.get_our_node_id());
 +              if next_msgs.is_some() {
 +                      // We may be the "last node" for the purpose of the commitment dance if we're
 +                      // skipping the last node (implying it is disconnected) and we're the
 +                      // second-to-last node!
 +                      update_fail_dance!(node, prev_node, skip_last && idx == expected_route.len() - 1);
 +              }
 +
 +              let events = node.node.get_and_clear_pending_msg_events();
 +              if !skip_last || idx != expected_route.len() - 1 {
 +                      assert_eq!(events.len(), 1);
 +                      match events[0] {
 +                              MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
 +                                      assert!(update_add_htlcs.is_empty());
 +                                      assert!(update_fulfill_htlcs.is_empty());
 +                                      assert_eq!(update_fail_htlcs.len(), 1);
 +                                      assert!(update_fail_malformed_htlcs.is_empty());
 +                                      assert!(update_fee.is_none());
 +                                      expected_next_node = node_id.clone();
 +                                      next_msgs = Some((update_fail_htlcs[0].clone(), commitment_signed.clone()));
 +                              },
 +                              _ => panic!("Unexpected event"),
 +                      }
 +              } else {
 +                      assert!(events.is_empty());
 +              }
 +              if !skip_last && idx == expected_route.len() - 1 {
 +                      assert_eq!(expected_next_node, origin_node.node.get_our_node_id());
 +              }
 +
 +              prev_node = node;
 +      }
 +
 +      if !skip_last {
 +              update_fail_dance!(origin_node, expected_route.first().unwrap(), true);
 +
 +              let events = origin_node.node.get_and_clear_pending_events();
 +              assert_eq!(events.len(), 1);
 +              match events[0] {
 +                      Event::PaymentFailed { payment_hash, rejected_by_dest, .. } => {
 +                              assert_eq!(payment_hash, our_payment_hash);
 +                              assert!(rejected_by_dest);
 +                      },
 +                      _ => panic!("Unexpected event"),
 +              }
 +      }
 +}
 +
 +pub fn fail_payment(origin_node: &Node, expected_route: &[&Node], our_payment_hash: PaymentHash) {
 +      fail_payment_along_route(origin_node, expected_route, false, our_payment_hash);
 +}
 +
 +pub fn create_network(node_count: usize, node_config: &[Option<UserConfig>]) -> Vec<Node> {
 +      let mut nodes = Vec::new();
 +      let mut rng = thread_rng();
 +      let secp_ctx = Secp256k1::new();
 +
 +      let chan_count = Rc::new(RefCell::new(0));
 +      let payment_count = Rc::new(RefCell::new(0));
 +
 +      for i in 0..node_count {
 +              let logger: Arc<Logger> = Arc::new(test_utils::TestLogger::with_id(format!("node {}", i)));
 +              let feeest = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 });
 +              let chain_monitor = Arc::new(chaininterface::ChainWatchInterfaceUtil::new(Network::Testnet, Arc::clone(&logger)));
 +              let tx_broadcaster = Arc::new(test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new())});
 +              let mut seed = [0; 32];
 +              rng.fill_bytes(&mut seed);
 +              let keys_manager = Arc::new(test_utils::TestKeysInterface::new(&seed, Network::Testnet, Arc::clone(&logger)));
 +              let chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(chain_monitor.clone(), tx_broadcaster.clone(), logger.clone(), feeest.clone()));
 +              let mut default_config = UserConfig::new();
 +              default_config.channel_options.announced_channel = true;
 +              default_config.peer_channel_config_limits.force_announced_channel_preference = false;
 +              let node = ChannelManager::new(Network::Testnet, feeest.clone(), chan_monitor.clone(), chain_monitor.clone(), tx_broadcaster.clone(), Arc::clone(&logger), keys_manager.clone(), if node_config[i].is_some() { node_config[i].clone().unwrap() } else { default_config }, 0).unwrap();
 +              let router = Router::new(PublicKey::from_secret_key(&secp_ctx, &keys_manager.get_node_secret()), chain_monitor.clone(), Arc::clone(&logger));
 +              nodes.push(Node { chain_monitor, tx_broadcaster, chan_monitor, node, router, keys_manager, node_seed: seed,
 +                      network_payment_count: payment_count.clone(),
 +                      network_chan_count: chan_count.clone(),
 +              });
 +      }
 +
 +      nodes
 +}
 +
 +#[derive(PartialEq)]
 +pub enum HTLCType { NONE, TIMEOUT, SUCCESS }
 +/// Tests that the given node has broadcast transactions for the given Channel
 +///
 +/// First checks that the latest local commitment tx has been broadcast, unless an explicit
 +/// commitment_tx is provided, which may be used to test that a remote commitment tx was
 +/// broadcast and the revoked outputs were claimed.
 +///
 +/// Next tests that there is (or is not) a transaction that spends the commitment transaction
 +/// that appears to be the type of HTLC transaction specified in has_htlc_tx.
 +///
 +/// All broadcast transactions must be accounted for in one of the above three types of we'll
 +/// also fail.
 +pub fn test_txn_broadcast(node: &Node, chan: &(msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction), commitment_tx: Option<Transaction>, has_htlc_tx: HTLCType) -> Vec<Transaction> {
 +      let mut node_txn = node.tx_broadcaster.txn_broadcasted.lock().unwrap();
 +      assert!(node_txn.len() >= if commitment_tx.is_some() { 0 } else { 1 } + if has_htlc_tx == HTLCType::NONE { 0 } else { 1 });
 +
 +      let mut res = Vec::with_capacity(2);
 +      node_txn.retain(|tx| {
 +              if tx.input.len() == 1 && tx.input[0].previous_output.txid == chan.3.txid() {
 +                      check_spends!(tx, chan.3.clone());
 +                      if commitment_tx.is_none() {
 +                              res.push(tx.clone());
 +                      }
 +                      false
 +              } else { true }
 +      });
 +      if let Some(explicit_tx) = commitment_tx {
 +              res.push(explicit_tx.clone());
 +      }
 +
 +      assert_eq!(res.len(), 1);
 +
 +      if has_htlc_tx != HTLCType::NONE {
 +              node_txn.retain(|tx| {
 +                      if tx.input.len() == 1 && tx.input[0].previous_output.txid == res[0].txid() {
 +                              check_spends!(tx, res[0].clone());
 +                              if has_htlc_tx == HTLCType::TIMEOUT {
 +                                      assert!(tx.lock_time != 0);
 +                              } else {
 +                                      assert!(tx.lock_time == 0);
 +                              }
 +                              res.push(tx.clone());
 +                              false
 +                      } else { true }
 +              });
 +              assert!(res.len() == 2 || res.len() == 3);
 +              if res.len() == 3 {
 +                      assert_eq!(res[1], res[2]);
 +              }
 +      }
 +
 +      assert!(node_txn.is_empty());
 +      res
 +}
 +
 +/// Tests that the given node has broadcast a claim transaction against the provided revoked
 +/// HTLC transaction.
 +pub fn test_revoked_htlc_claim_txn_broadcast(node: &Node, revoked_tx: Transaction) {
 +      let mut node_txn = node.tx_broadcaster.txn_broadcasted.lock().unwrap();
 +      assert_eq!(node_txn.len(), 1);
 +      node_txn.retain(|tx| {
 +              if tx.input.len() == 1 && tx.input[0].previous_output.txid == revoked_tx.txid() {
 +                      check_spends!(tx, revoked_tx.clone());
 +                      false
 +              } else { true }
 +      });
 +      assert!(node_txn.is_empty());
 +}
 +
 +pub fn check_preimage_claim(node: &Node, prev_txn: &Vec<Transaction>) -> Vec<Transaction> {
 +      let mut node_txn = node.tx_broadcaster.txn_broadcasted.lock().unwrap();
 +
 +      assert!(node_txn.len() >= 1);
 +      assert_eq!(node_txn[0].input.len(), 1);
 +      let mut found_prev = false;
 +
 +      for tx in prev_txn {
 +              if node_txn[0].input[0].previous_output.txid == tx.txid() {
 +                      check_spends!(node_txn[0], tx.clone());
 +                      assert!(node_txn[0].input[0].witness[2].len() > 106); // must spend an htlc output
 +                      assert_eq!(tx.input.len(), 1); // must spend a commitment tx
 +
 +                      found_prev = true;
 +                      break;
 +              }
 +      }
 +      assert!(found_prev);
 +
 +      let mut res = Vec::new();
 +      mem::swap(&mut *node_txn, &mut res);
 +      res
 +}
 +
 +pub fn get_announce_close_broadcast_events(nodes: &Vec<Node>, a: usize, b: usize) {
 +      let events_1 = nodes[a].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events_1.len(), 1);
 +      let as_update = match events_1[0] {
 +              MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
 +                      msg.clone()
 +              },
 +              _ => panic!("Unexpected event"),
 +      };
 +
 +      let events_2 = nodes[b].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events_2.len(), 1);
 +      let bs_update = match events_2[0] {
 +              MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
 +                      msg.clone()
 +              },
 +              _ => panic!("Unexpected event"),
 +      };
 +
 +      for node in nodes {
 +              node.router.handle_channel_update(&as_update).unwrap();
 +              node.router.handle_channel_update(&bs_update).unwrap();
 +      }
 +}
 +
 +macro_rules! get_channel_value_stat {
 +      ($node: expr, $channel_id: expr) => {{
 +              let chan_lock = $node.node.channel_state.lock().unwrap();
 +              let chan = chan_lock.by_id.get(&$channel_id).unwrap();
 +              chan.get_value_stat()
 +      }}
 +}
 +
 +macro_rules! get_chan_reestablish_msgs {
 +      ($src_node: expr, $dst_node: expr) => {
 +              {
 +                      let mut res = Vec::with_capacity(1);
 +                      for msg in $src_node.node.get_and_clear_pending_msg_events() {
 +                              if let MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } = msg {
 +                                      assert_eq!(*node_id, $dst_node.node.get_our_node_id());
 +                                      res.push(msg.clone());
 +                              } else {
 +                                      panic!("Unexpected event")
 +                              }
 +                      }
 +                      res
 +              }
 +      }
 +}
 +
 +macro_rules! handle_chan_reestablish_msgs {
 +      ($src_node: expr, $dst_node: expr) => {
 +              {
 +                      let msg_events = $src_node.node.get_and_clear_pending_msg_events();
 +                      let mut idx = 0;
 +                      let funding_locked = if let Some(&MessageSendEvent::SendFundingLocked { ref node_id, ref msg }) = msg_events.get(0) {
 +                              idx += 1;
 +                              assert_eq!(*node_id, $dst_node.node.get_our_node_id());
 +                              Some(msg.clone())
 +                      } else {
 +                              None
 +                      };
 +
 +                      let mut revoke_and_ack = None;
 +                      let mut commitment_update = None;
 +                      let order = if let Some(ev) = msg_events.get(idx) {
 +                              idx += 1;
 +                              match ev {
 +                                      &MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
 +                                              assert_eq!(*node_id, $dst_node.node.get_our_node_id());
 +                                              revoke_and_ack = Some(msg.clone());
 +                                              RAACommitmentOrder::RevokeAndACKFirst
 +                                      },
 +                                      &MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
 +                                              assert_eq!(*node_id, $dst_node.node.get_our_node_id());
 +                                              commitment_update = Some(updates.clone());
 +                                              RAACommitmentOrder::CommitmentFirst
 +                                      },
 +                                      _ => panic!("Unexpected event"),
 +                              }
 +                      } else {
 +                              RAACommitmentOrder::CommitmentFirst
 +                      };
 +
 +                      if let Some(ev) = msg_events.get(idx) {
 +                              match ev {
 +                                      &MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
 +                                              assert_eq!(*node_id, $dst_node.node.get_our_node_id());
 +                                              assert!(revoke_and_ack.is_none());
 +                                              revoke_and_ack = Some(msg.clone());
 +                                      },
 +                                      &MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
 +                                              assert_eq!(*node_id, $dst_node.node.get_our_node_id());
 +                                              assert!(commitment_update.is_none());
 +                                              commitment_update = Some(updates.clone());
 +                                      },
 +                                      _ => panic!("Unexpected event"),
 +                              }
 +                      }
 +
 +                      (funding_locked, revoke_and_ack, commitment_update, order)
 +              }
 +      }
 +}
 +
 +/// pending_htlc_adds includes both the holding cell and in-flight update_add_htlcs, whereas
 +/// for claims/fails they are separated out.
 +pub fn reconnect_nodes(node_a: &Node, node_b: &Node, send_funding_locked: (bool, bool), pending_htlc_adds: (i64, i64), pending_htlc_claims: (usize, usize), pending_cell_htlc_claims: (usize, usize), pending_cell_htlc_fails: (usize, usize), pending_raa: (bool, bool)) {
 +      node_a.node.peer_connected(&node_b.node.get_our_node_id());
 +      let reestablish_1 = get_chan_reestablish_msgs!(node_a, node_b);
 +      node_b.node.peer_connected(&node_a.node.get_our_node_id());
 +      let reestablish_2 = get_chan_reestablish_msgs!(node_b, node_a);
 +
 +      if send_funding_locked.0 {
 +              // If a expects a funding_locked, it better not think it has received a revoke_and_ack
 +              // from b
 +              for reestablish in reestablish_1.iter() {
 +                      assert_eq!(reestablish.next_remote_commitment_number, 0);
 +              }
 +      }
 +      if send_funding_locked.1 {
 +              // If b expects a funding_locked, it better not think it has received a revoke_and_ack
 +              // from a
 +              for reestablish in reestablish_2.iter() {
 +                      assert_eq!(reestablish.next_remote_commitment_number, 0);
 +              }
 +      }
 +      if send_funding_locked.0 || send_funding_locked.1 {
 +              // If we expect any funding_locked's, both sides better have set
 +              // next_local_commitment_number to 1
 +              for reestablish in reestablish_1.iter() {
 +                      assert_eq!(reestablish.next_local_commitment_number, 1);
 +              }
 +              for reestablish in reestablish_2.iter() {
 +                      assert_eq!(reestablish.next_local_commitment_number, 1);
 +              }
 +      }
 +
 +      let mut resp_1 = Vec::new();
 +      for msg in reestablish_1 {
 +              node_b.node.handle_channel_reestablish(&node_a.node.get_our_node_id(), &msg).unwrap();
 +              resp_1.push(handle_chan_reestablish_msgs!(node_b, node_a));
 +      }
 +      if pending_cell_htlc_claims.0 != 0 || pending_cell_htlc_fails.0 != 0 {
 +              check_added_monitors!(node_b, 1);
 +      } else {
 +              check_added_monitors!(node_b, 0);
 +      }
 +
 +      let mut resp_2 = Vec::new();
 +      for msg in reestablish_2 {
 +              node_a.node.handle_channel_reestablish(&node_b.node.get_our_node_id(), &msg).unwrap();
 +              resp_2.push(handle_chan_reestablish_msgs!(node_a, node_b));
 +      }
 +      if pending_cell_htlc_claims.1 != 0 || pending_cell_htlc_fails.1 != 0 {
 +              check_added_monitors!(node_a, 1);
 +      } else {
 +              check_added_monitors!(node_a, 0);
 +      }
 +
 +      // We don't yet support both needing updates, as that would require a different commitment dance:
 +      assert!((pending_htlc_adds.0 == 0 && pending_htlc_claims.0 == 0 && pending_cell_htlc_claims.0 == 0 && pending_cell_htlc_fails.0 == 0) ||
 +                      (pending_htlc_adds.1 == 0 && pending_htlc_claims.1 == 0 && pending_cell_htlc_claims.1 == 0 && pending_cell_htlc_fails.1 == 0));
 +
 +      for chan_msgs in resp_1.drain(..) {
 +              if send_funding_locked.0 {
 +                      node_a.node.handle_funding_locked(&node_b.node.get_our_node_id(), &chan_msgs.0.unwrap()).unwrap();
 +                      let announcement_event = node_a.node.get_and_clear_pending_msg_events();
 +                      if !announcement_event.is_empty() {
 +                              assert_eq!(announcement_event.len(), 1);
 +                              if let MessageSendEvent::SendAnnouncementSignatures { .. } = announcement_event[0] {
 +                                      //TODO: Test announcement_sigs re-sending
 +                              } else { panic!("Unexpected event!"); }
 +                      }
 +              } else {
 +                      assert!(chan_msgs.0.is_none());
 +              }
 +              if pending_raa.0 {
 +                      assert!(chan_msgs.3 == RAACommitmentOrder::RevokeAndACKFirst);
 +                      node_a.node.handle_revoke_and_ack(&node_b.node.get_our_node_id(), &chan_msgs.1.unwrap()).unwrap();
 +                      assert!(node_a.node.get_and_clear_pending_msg_events().is_empty());
 +                      check_added_monitors!(node_a, 1);
 +              } else {
 +                      assert!(chan_msgs.1.is_none());
 +              }
 +              if pending_htlc_adds.0 != 0 || pending_htlc_claims.0 != 0 || pending_cell_htlc_claims.0 != 0 || pending_cell_htlc_fails.0 != 0 {
 +                      let commitment_update = chan_msgs.2.unwrap();
 +                      if pending_htlc_adds.0 != -1 { // We use -1 to denote a response commitment_signed
 +                              assert_eq!(commitment_update.update_add_htlcs.len(), pending_htlc_adds.0 as usize);
 +                      } else {
 +                              assert!(commitment_update.update_add_htlcs.is_empty());
 +                      }
 +                      assert_eq!(commitment_update.update_fulfill_htlcs.len(), pending_htlc_claims.0 + pending_cell_htlc_claims.0);
 +                      assert_eq!(commitment_update.update_fail_htlcs.len(), pending_cell_htlc_fails.0);
 +                      assert!(commitment_update.update_fail_malformed_htlcs.is_empty());
 +                      for update_add in commitment_update.update_add_htlcs {
 +                              node_a.node.handle_update_add_htlc(&node_b.node.get_our_node_id(), &update_add).unwrap();
 +                      }
 +                      for update_fulfill in commitment_update.update_fulfill_htlcs {
 +                              node_a.node.handle_update_fulfill_htlc(&node_b.node.get_our_node_id(), &update_fulfill).unwrap();
 +                      }
 +                      for update_fail in commitment_update.update_fail_htlcs {
 +                              node_a.node.handle_update_fail_htlc(&node_b.node.get_our_node_id(), &update_fail).unwrap();
 +                      }
 +
 +                      if pending_htlc_adds.0 != -1 { // We use -1 to denote a response commitment_signed
 +                              commitment_signed_dance!(node_a, node_b, commitment_update.commitment_signed, false);
 +                      } else {
 +                              node_a.node.handle_commitment_signed(&node_b.node.get_our_node_id(), &commitment_update.commitment_signed).unwrap();
 +                              check_added_monitors!(node_a, 1);
 +                              let as_revoke_and_ack = get_event_msg!(node_a, MessageSendEvent::SendRevokeAndACK, node_b.node.get_our_node_id());
 +                              // No commitment_signed so get_event_msg's assert(len == 1) passes
 +                              node_b.node.handle_revoke_and_ack(&node_a.node.get_our_node_id(), &as_revoke_and_ack).unwrap();
 +                              assert!(node_b.node.get_and_clear_pending_msg_events().is_empty());
 +                              check_added_monitors!(node_b, 1);
 +                      }
 +              } else {
 +                      assert!(chan_msgs.2.is_none());
 +              }
 +      }
 +
 +      for chan_msgs in resp_2.drain(..) {
 +              if send_funding_locked.1 {
 +                      node_b.node.handle_funding_locked(&node_a.node.get_our_node_id(), &chan_msgs.0.unwrap()).unwrap();
 +                      let announcement_event = node_b.node.get_and_clear_pending_msg_events();
 +                      if !announcement_event.is_empty() {
 +                              assert_eq!(announcement_event.len(), 1);
 +                              if let MessageSendEvent::SendAnnouncementSignatures { .. } = announcement_event[0] {
 +                                      //TODO: Test announcement_sigs re-sending
 +                              } else { panic!("Unexpected event!"); }
 +                      }
 +              } else {
 +                      assert!(chan_msgs.0.is_none());
 +              }
 +              if pending_raa.1 {
 +                      assert!(chan_msgs.3 == RAACommitmentOrder::RevokeAndACKFirst);
 +                      node_b.node.handle_revoke_and_ack(&node_a.node.get_our_node_id(), &chan_msgs.1.unwrap()).unwrap();
 +                      assert!(node_b.node.get_and_clear_pending_msg_events().is_empty());
 +                      check_added_monitors!(node_b, 1);
 +              } else {
 +                      assert!(chan_msgs.1.is_none());
 +              }
 +              if pending_htlc_adds.1 != 0 || pending_htlc_claims.1 != 0 || pending_cell_htlc_claims.1 != 0 || pending_cell_htlc_fails.1 != 0 {
 +                      let commitment_update = chan_msgs.2.unwrap();
 +                      if pending_htlc_adds.1 != -1 { // We use -1 to denote a response commitment_signed
 +                              assert_eq!(commitment_update.update_add_htlcs.len(), pending_htlc_adds.1 as usize);
 +                      }
 +                      assert_eq!(commitment_update.update_fulfill_htlcs.len(), pending_htlc_claims.0 + pending_cell_htlc_claims.0);
 +                      assert_eq!(commitment_update.update_fail_htlcs.len(), pending_cell_htlc_fails.0);
 +                      assert!(commitment_update.update_fail_malformed_htlcs.is_empty());
 +                      for update_add in commitment_update.update_add_htlcs {
 +                              node_b.node.handle_update_add_htlc(&node_a.node.get_our_node_id(), &update_add).unwrap();
 +                      }
 +                      for update_fulfill in commitment_update.update_fulfill_htlcs {
 +                              node_b.node.handle_update_fulfill_htlc(&node_a.node.get_our_node_id(), &update_fulfill).unwrap();
 +                      }
 +                      for update_fail in commitment_update.update_fail_htlcs {
 +                              node_b.node.handle_update_fail_htlc(&node_a.node.get_our_node_id(), &update_fail).unwrap();
 +                      }
 +
 +                      if pending_htlc_adds.1 != -1 { // We use -1 to denote a response commitment_signed
 +                              commitment_signed_dance!(node_b, node_a, commitment_update.commitment_signed, false);
 +                      } else {
 +                              node_b.node.handle_commitment_signed(&node_a.node.get_our_node_id(), &commitment_update.commitment_signed).unwrap();
 +                              check_added_monitors!(node_b, 1);
 +                              let bs_revoke_and_ack = get_event_msg!(node_b, MessageSendEvent::SendRevokeAndACK, node_a.node.get_our_node_id());
 +                              // No commitment_signed so get_event_msg's assert(len == 1) passes
 +                              node_a.node.handle_revoke_and_ack(&node_b.node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
 +                              assert!(node_a.node.get_and_clear_pending_msg_events().is_empty());
 +                              check_added_monitors!(node_a, 1);
 +                      }
 +              } else {
 +                      assert!(chan_msgs.2.is_none());
 +              }
 +      }
 +}
index 5bb98bb613f6dd67fbc271146780b99037378dd3,0000000000000000000000000000000000000000..d44b75cc981a64997e4a060d6df6087e1b7bfa84
mode 100644,000000..100644
--- /dev/null
@@@ -1,6173 -1,0 +1,6220 @@@
-       send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
 +//! Tests that test standing up a network of ChannelManagers, creating channels, sending
 +//! payments/messages between them, and often checking the resulting ChannelMonitors are able to
 +//! claim outputs on-chain.
 +
 +use chain::transaction::OutPoint;
 +use chain::chaininterface::{ChainListener, ChainWatchInterface, ChainWatchInterfaceUtil};
 +use chain::keysinterface::{KeysInterface, SpendableOutputDescriptor, KeysManager};
 +use chain::keysinterface;
 +use ln::channel::{COMMITMENT_TX_BASE_WEIGHT, COMMITMENT_TX_WEIGHT_PER_HTLC};
 +use ln::channelmanager::{ChannelManager,ChannelManagerReadArgs,HTLCForwardInfo,RAACommitmentOrder, PaymentPreimage, PaymentHash, BREAKDOWN_TIMEOUT};
 +use ln::channelmonitor::{ChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ManyChannelMonitor, ANTI_REORG_DELAY};
 +use ln::channel::{ACCEPTED_HTLC_SCRIPT_WEIGHT, OFFERED_HTLC_SCRIPT_WEIGHT, Channel, ChannelError};
 +use ln::onion_utils;
 +use ln::router::{Route, RouteHop};
 +use ln::msgs;
 +use ln::msgs::{ChannelMessageHandler,RoutingMessageHandler,HTLCFailChannelUpdate, LocalFeatures, ErrorAction};
 +use util::test_utils;
 +use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
 +use util::errors::APIError;
 +use util::ser::{Writeable, ReadableArgs};
 +use util::config::UserConfig;
 +use util::logger::Logger;
 +
 +use bitcoin::util::hash::BitcoinHash;
 +use bitcoin_hashes::sha256d::Hash as Sha256dHash;
 +use bitcoin::util::bip143;
 +use bitcoin::util::address::Address;
 +use bitcoin::util::bip32::{ChildNumber, ExtendedPubKey, ExtendedPrivKey};
 +use bitcoin::blockdata::block::{Block, BlockHeader};
 +use bitcoin::blockdata::transaction::{Transaction, TxOut, TxIn, SigHashType, OutPoint as BitcoinOutPoint};
 +use bitcoin::blockdata::script::{Builder, Script};
 +use bitcoin::blockdata::opcodes;
 +use bitcoin::blockdata::constants::genesis_block;
 +use bitcoin::network::constants::Network;
 +
 +use bitcoin_hashes::sha256::Hash as Sha256;
 +use bitcoin_hashes::Hash;
 +
 +use secp256k1::{Secp256k1, Message};
 +use secp256k1::key::{PublicKey,SecretKey};
 +
 +use std::collections::{BTreeSet, HashMap, HashSet};
 +use std::default::Default;
 +use std::sync::{Arc, Mutex};
 +use std::sync::atomic::Ordering;
 +use std::mem;
 +
 +use rand::{thread_rng, Rng};
 +
 +use ln::functional_test_utils::*;
 +
 +#[test]
 +fn test_insane_channel_opens() {
 +      // Stand up a network of 2 nodes
 +      let nodes = create_network(2, &[None, None]);
 +
 +      // Instantiate channel parameters where we push the maximum msats given our
 +      // funding satoshis
 +      let channel_value_sat = 31337; // same as funding satoshis
 +      let channel_reserve_satoshis = Channel::get_our_channel_reserve_satoshis(channel_value_sat);
 +      let push_msat = (channel_value_sat - channel_reserve_satoshis) * 1000;
 +
 +      // Have node0 initiate a channel to node1 with aforementioned parameters
 +      nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_sat, push_msat, 42).unwrap();
 +
 +      // Extract the channel open message from node0 to node1
 +      let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
 +
 +      // Test helper that asserts we get the correct error string given a mutator
 +      // that supposedly makes the channel open message insane
 +      let insane_open_helper = |expected_error_str, message_mutator: fn(msgs::OpenChannel) -> msgs::OpenChannel| {
 +              match nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), LocalFeatures::new(), &message_mutator(open_channel_message.clone())) {
 +                      Err(msgs::LightningError{ err: error_str, action: msgs::ErrorAction::SendErrorMessage {..}}) => {
 +                              assert_eq!(error_str, expected_error_str, "unexpected LightningError string (expected `{}`, actual `{}`)", expected_error_str, error_str)
 +                      },
 +                      Err(msgs::LightningError{..}) => {panic!("unexpected LightningError action")},
 +                      _ => panic!("insane OpenChannel message was somehow Ok"),
 +              }
 +      };
 +
 +      use ln::channel::MAX_FUNDING_SATOSHIS;
 +      use ln::channelmanager::MAX_LOCAL_BREAKDOWN_TIMEOUT;
 +
 +      // Test all mutations that would make the channel open message insane
 +      insane_open_helper("funding value > 2^24", |mut msg| { msg.funding_satoshis = MAX_FUNDING_SATOSHIS; msg });
 +
 +      insane_open_helper("Bogus channel_reserve_satoshis", |mut msg| { msg.channel_reserve_satoshis = msg.funding_satoshis + 1; msg });
 +
 +      insane_open_helper("push_msat larger than funding value", |mut msg| { msg.push_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000 + 1; msg });
 +
 +      insane_open_helper("Peer never wants payout outputs?", |mut msg| { msg.dust_limit_satoshis = msg.funding_satoshis + 1 ; msg });
 +
 +      insane_open_helper("Bogus; channel reserve is less than dust limit", |mut msg| { msg.dust_limit_satoshis = msg.channel_reserve_satoshis + 1; msg });
 +
 +      insane_open_helper("Minimum htlc value is full channel value", |mut msg| { msg.htlc_minimum_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000; msg });
 +
 +      insane_open_helper("They wanted our payments to be delayed by a needlessly long period", |mut msg| { msg.to_self_delay = MAX_LOCAL_BREAKDOWN_TIMEOUT + 1; msg });
 +
 +      insane_open_helper("0 max_accpted_htlcs makes for a useless channel", |mut msg| { msg.max_accepted_htlcs = 0; msg });
 +
 +      insane_open_helper("max_accpted_htlcs > 483", |mut msg| { msg.max_accepted_htlcs = 484; msg });
 +}
 +
 +#[test]
 +fn test_async_inbound_update_fee() {
 +      let mut nodes = create_network(2, &[None, None]);
 +      let chan = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +      let channel_id = chan.2;
 +
 +      // balancing
-       send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
++      send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000, 8_000_000);
 +
 +      // A                                        B
 +      // update_fee                            ->
 +      // send (1) commitment_signed            -.
 +      //                                       <- update_add_htlc/commitment_signed
 +      // send (2) RAA (awaiting remote revoke) -.
 +      // (1) commitment_signed is delivered    ->
 +      //                                       .- send (3) RAA (awaiting remote revoke)
 +      // (2) RAA is delivered                  ->
 +      //                                       .- send (4) commitment_signed
 +      //                                       <- (3) RAA is delivered
 +      // send (5) commitment_signed            -.
 +      //                                       <- (4) commitment_signed is delivered
 +      // send (6) RAA                          -.
 +      // (5) commitment_signed is delivered    ->
 +      //                                       <- RAA
 +      // (6) RAA is delivered                  ->
 +
 +      // First nodes[0] generates an update_fee
 +      nodes[0].node.update_fee(channel_id, get_feerate!(nodes[0], channel_id) + 20).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +
 +      let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events_0.len(), 1);
 +      let (update_msg, commitment_signed) = match events_0[0] { // (1)
 +              MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
 +                      (update_fee.as_ref(), commitment_signed)
 +              },
 +              _ => panic!("Unexpected event"),
 +      };
 +
 +      nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap();
 +
 +      // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]...
 +      let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
 +      nodes[1].node.send_payment(nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 40000, TEST_FINAL_CLTV).unwrap(), our_payment_hash).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +
 +      let payment_event = {
 +              let mut events_1 = nodes[1].node.get_and_clear_pending_msg_events();
 +              assert_eq!(events_1.len(), 1);
 +              SendEvent::from_event(events_1.remove(0))
 +      };
 +      assert_eq!(payment_event.node_id, nodes[0].node.get_our_node_id());
 +      assert_eq!(payment_event.msgs.len(), 1);
 +
 +      // ...now when the messages get delivered everyone should be happy
 +      nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
 +      nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); // (2)
 +      let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
 +      // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes
 +      check_added_monitors!(nodes[0], 1);
 +
 +      // deliver(1), generate (3):
 +      nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap();
 +      let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
 +      // nodes[1] is awaiting nodes[0] revoke_and_ack so get_event_msg's assert(len == 1) passes
 +      check_added_monitors!(nodes[1], 1);
 +
 +      nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap(); // deliver (2)
 +      let bs_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +      assert!(bs_update.update_add_htlcs.is_empty()); // (4)
 +      assert!(bs_update.update_fulfill_htlcs.is_empty()); // (4)
 +      assert!(bs_update.update_fail_htlcs.is_empty()); // (4)
 +      assert!(bs_update.update_fail_malformed_htlcs.is_empty()); // (4)
 +      assert!(bs_update.update_fee.is_none()); // (4)
 +      check_added_monitors!(nodes[1], 1);
 +
 +      nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap(); // deliver (3)
 +      let as_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
 +      assert!(as_update.update_add_htlcs.is_empty()); // (5)
 +      assert!(as_update.update_fulfill_htlcs.is_empty()); // (5)
 +      assert!(as_update.update_fail_htlcs.is_empty()); // (5)
 +      assert!(as_update.update_fail_malformed_htlcs.is_empty()); // (5)
 +      assert!(as_update.update_fee.is_none()); // (5)
 +      check_added_monitors!(nodes[0], 1);
 +
 +      nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_update.commitment_signed).unwrap(); // deliver (4)
 +      let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
 +      // only (6) so get_event_msg's assert(len == 1) passes
 +      check_added_monitors!(nodes[0], 1);
 +
 +      nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update.commitment_signed).unwrap(); // deliver (5)
 +      let bs_second_revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
 +      check_added_monitors!(nodes[1], 1);
 +
 +      nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +
 +      let events_2 = nodes[0].node.get_and_clear_pending_events();
 +      assert_eq!(events_2.len(), 1);
 +      match events_2[0] {
 +              Event::PendingHTLCsForwardable {..} => {}, // If we actually processed we'd receive the payment
 +              _ => panic!("Unexpected event"),
 +      }
 +
 +      nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke).unwrap(); // deliver (6)
 +      check_added_monitors!(nodes[1], 1);
 +}
 +
 +#[test]
 +fn test_update_fee_unordered_raa() {
 +      // Just the intro to the previous test followed by an out-of-order RAA (which caused a
 +      // crash in an earlier version of the update_fee patch)
 +      let mut nodes = create_network(2, &[None, None]);
 +      let chan = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +      let channel_id = chan.2;
 +
 +      // balancing
-       send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
++      send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000, 8_000_000);
 +
 +      // First nodes[0] generates an update_fee
 +      nodes[0].node.update_fee(channel_id, get_feerate!(nodes[0], channel_id) + 20).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +
 +      let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events_0.len(), 1);
 +      let update_msg = match events_0[0] { // (1)
 +              MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => {
 +                      update_fee.as_ref()
 +              },
 +              _ => panic!("Unexpected event"),
 +      };
 +
 +      nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap();
 +
 +      // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]...
 +      let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
 +      nodes[1].node.send_payment(nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 40000, TEST_FINAL_CLTV).unwrap(), our_payment_hash).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +
 +      let payment_event = {
 +              let mut events_1 = nodes[1].node.get_and_clear_pending_msg_events();
 +              assert_eq!(events_1.len(), 1);
 +              SendEvent::from_event(events_1.remove(0))
 +      };
 +      assert_eq!(payment_event.node_id, nodes[0].node.get_our_node_id());
 +      assert_eq!(payment_event.msgs.len(), 1);
 +
 +      // ...now when the messages get delivered everyone should be happy
 +      nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
 +      nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); // (2)
 +      let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
 +      // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes
 +      check_added_monitors!(nodes[0], 1);
 +
 +      nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg).unwrap(); // deliver (2)
 +      check_added_monitors!(nodes[1], 1);
 +
 +      // We can't continue, sadly, because our (1) now has a bogus signature
 +}
 +
 +#[test]
 +fn test_multi_flight_update_fee() {
 +      let nodes = create_network(2, &[None, None]);
 +      let chan = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +      let channel_id = chan.2;
 +
 +      // A                                        B
 +      // update_fee/commitment_signed          ->
 +      //                                       .- send (1) RAA and (2) commitment_signed
 +      // update_fee (never committed)          ->
 +      // (3) update_fee                        ->
 +      // We have to manually generate the above update_fee, it is allowed by the protocol but we
 +      // don't track which updates correspond to which revoke_and_ack responses so we're in
 +      // AwaitingRAA mode and will not generate the update_fee yet.
 +      //                                       <- (1) RAA delivered
 +      // (3) is generated and send (4) CS      -.
 +      // Note that A cannot generate (4) prior to (1) being delivered as it otherwise doesn't
 +      // know the per_commitment_point to use for it.
 +      //                                       <- (2) commitment_signed delivered
 +      // revoke_and_ack                        ->
 +      //                                          B should send no response here
 +      // (4) commitment_signed delivered       ->
 +      //                                       <- RAA/commitment_signed delivered
 +      // revoke_and_ack                        ->
 +
 +      // First nodes[0] generates an update_fee
 +      let initial_feerate = get_feerate!(nodes[0], channel_id);
 +      nodes[0].node.update_fee(channel_id, initial_feerate + 20).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +
 +      let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events_0.len(), 1);
 +      let (update_msg_1, commitment_signed_1) = match events_0[0] { // (1)
 +              MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
 +                      (update_fee.as_ref().unwrap(), commitment_signed)
 +              },
 +              _ => panic!("Unexpected event"),
 +      };
 +
 +      // Deliver first update_fee/commitment_signed pair, generating (1) and (2):
 +      nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg_1).unwrap();
 +      nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed_1).unwrap();
 +      let (bs_revoke_msg, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +      check_added_monitors!(nodes[1], 1);
 +
 +      // nodes[0] is awaiting a revoke from nodes[1] before it will create a new commitment
 +      // transaction:
 +      nodes[0].node.update_fee(channel_id, initial_feerate + 40).unwrap();
 +      assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
 +      assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 +
 +      // Create the (3) update_fee message that nodes[0] will generate before it does...
 +      let mut update_msg_2 = msgs::UpdateFee {
 +              channel_id: update_msg_1.channel_id.clone(),
 +              feerate_per_kw: (initial_feerate + 30) as u32,
 +      };
 +
 +      nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg_2).unwrap();
 +
 +      update_msg_2.feerate_per_kw = (initial_feerate + 40) as u32;
 +      // Deliver (3)
 +      nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg_2).unwrap();
 +
 +      // Deliver (1), generating (3) and (4)
 +      nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_msg).unwrap();
 +      let as_second_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
 +      check_added_monitors!(nodes[0], 1);
 +      assert!(as_second_update.update_add_htlcs.is_empty());
 +      assert!(as_second_update.update_fulfill_htlcs.is_empty());
 +      assert!(as_second_update.update_fail_htlcs.is_empty());
 +      assert!(as_second_update.update_fail_malformed_htlcs.is_empty());
 +      // Check that the update_fee newly generated matches what we delivered:
 +      assert_eq!(as_second_update.update_fee.as_ref().unwrap().channel_id, update_msg_2.channel_id);
 +      assert_eq!(as_second_update.update_fee.as_ref().unwrap().feerate_per_kw, update_msg_2.feerate_per_kw);
 +
 +      // Deliver (2) commitment_signed
 +      nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed).unwrap();
 +      let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
 +      check_added_monitors!(nodes[0], 1);
 +      // No commitment_signed so get_event_msg's assert(len == 1) passes
 +
 +      nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg).unwrap();
 +      assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 +      check_added_monitors!(nodes[1], 1);
 +
 +      // Delever (4)
 +      nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_update.commitment_signed).unwrap();
 +      let (bs_second_revoke, bs_second_commitment) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +      check_added_monitors!(nodes[1], 1);
 +
 +      nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke).unwrap();
 +      assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 +      check_added_monitors!(nodes[0], 1);
 +
 +      nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment).unwrap();
 +      let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
 +      // No commitment_signed so get_event_msg's assert(len == 1) passes
 +      check_added_monitors!(nodes[0], 1);
 +
 +      nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke).unwrap();
 +      assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 +      check_added_monitors!(nodes[1], 1);
 +}
 +
 +#[test]
 +fn test_update_fee_vanilla() {
 +      let nodes = create_network(2, &[None, None]);
 +      let chan = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +      let channel_id = chan.2;
 +
 +      let feerate = get_feerate!(nodes[0], channel_id);
 +      nodes[0].node.update_fee(channel_id, feerate+25).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +
 +      let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events_0.len(), 1);
 +      let (update_msg, commitment_signed) = match events_0[0] {
 +                      MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
 +                      (update_fee.as_ref(), commitment_signed)
 +              },
 +              _ => panic!("Unexpected event"),
 +      };
 +      nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap();
 +
 +      nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap();
 +      let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +      check_added_monitors!(nodes[1], 1);
 +
 +      nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap();
 +      assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 +      check_added_monitors!(nodes[0], 1);
 +
 +      nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed).unwrap();
 +      let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
 +      // No commitment_signed so get_event_msg's assert(len == 1) passes
 +      check_added_monitors!(nodes[0], 1);
 +
 +      nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg).unwrap();
 +      assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 +      check_added_monitors!(nodes[1], 1);
 +}
 +
 +#[test]
 +fn test_update_fee_that_funder_cannot_afford() {
 +      let nodes = create_network(2, &[None, None]);
 +      let channel_value = 1888;
 +      let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, 700000, LocalFeatures::new(), LocalFeatures::new());
 +      let channel_id = chan.2;
 +
 +      let feerate = 260;
 +      nodes[0].node.update_fee(channel_id, feerate).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      let update_msg = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
 +
 +      nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg.update_fee.unwrap()).unwrap();
 +
 +      commitment_signed_dance!(nodes[1], nodes[0], update_msg.commitment_signed, false);
 +
 +      //Confirm that the new fee based on the last local commitment txn is what we expected based on the feerate of 260 set above.
 +      //This value results in a fee that is exactly what the funder can afford (277 sat + 1000 sat channel reserve)
 +      {
 +              let chan_lock = nodes[1].node.channel_state.lock().unwrap();
 +              let chan = chan_lock.by_id.get(&channel_id).unwrap();
 +
 +              //We made sure neither party's funds are below the dust limit so -2 non-HTLC txns from number of outputs
 +              let num_htlcs = chan.last_local_commitment_txn[0].output.len() - 2;
 +              let total_fee: u64 = feerate * (COMMITMENT_TX_BASE_WEIGHT + (num_htlcs as u64) * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000;
 +              let mut actual_fee = chan.last_local_commitment_txn[0].output.iter().fold(0, |acc, output| acc + output.value);
 +              actual_fee = channel_value - actual_fee;
 +              assert_eq!(total_fee, actual_fee);
 +      } //drop the mutex
 +
 +      //Add 2 to the previous fee rate to the final fee increases by 1 (with no HTLCs the fee is essentially
 +      //fee_rate*(724/1000) so the increment of 1*0.724 is rounded back down)
 +      nodes[0].node.update_fee(channel_id, feerate+2).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +
 +      let update2_msg = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
 +
 +      nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update2_msg.update_fee.unwrap()).unwrap();
 +
 +      //While producing the commitment_signed response after handling a received update_fee request the
 +      //check to see if the funder, who sent the update_fee request, can afford the new fee (funder_balance >= fee+channel_reserve)
 +      //Should produce and error.
 +      let err = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &update2_msg.commitment_signed).unwrap_err();
 +
 +      assert!(match err.err {
 +              "Funding remote cannot afford proposed new fee" => true,
 +              _ => false,
 +      });
 +
 +      //clear the message we could not handle
 +      nodes[1].node.get_and_clear_pending_msg_events();
 +}
 +
 +#[test]
 +fn test_update_fee_with_fundee_update_add_htlc() {
 +      let mut nodes = create_network(2, &[None, None]);
 +      let chan = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +      let channel_id = chan.2;
 +
 +      // balancing
-       claim_payment(&nodes[1], &vec!(&nodes[0])[..], our_payment_preimage);
++      send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000, 8_000_000);
 +
 +      let feerate = get_feerate!(nodes[0], channel_id);
 +      nodes[0].node.update_fee(channel_id, feerate+20).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +
 +      let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events_0.len(), 1);
 +      let (update_msg, commitment_signed) = match events_0[0] {
 +                      MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
 +                      (update_fee.as_ref(), commitment_signed)
 +              },
 +              _ => panic!("Unexpected event"),
 +      };
 +      nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap();
 +      nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap();
 +      let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +      check_added_monitors!(nodes[1], 1);
 +
 +      let route = nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 800000, TEST_FINAL_CLTV).unwrap();
 +
 +      let (our_payment_preimage, our_payment_hash) = get_payment_preimage_hash!(nodes[1]);
 +
 +      // nothing happens since node[1] is in AwaitingRemoteRevoke
 +      nodes[1].node.send_payment(route, our_payment_hash).unwrap();
 +      {
 +              let mut added_monitors = nodes[0].chan_monitor.added_monitors.lock().unwrap();
 +              assert_eq!(added_monitors.len(), 0);
 +              added_monitors.clear();
 +      }
 +      assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
 +      assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 +      // node[1] has nothing to do
 +
 +      nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap();
 +      assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 +      check_added_monitors!(nodes[0], 1);
 +
 +      nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed).unwrap();
 +      let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
 +      // No commitment_signed so get_event_msg's assert(len == 1) passes
 +      check_added_monitors!(nodes[0], 1);
 +      nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +      // AwaitingRemoteRevoke ends here
 +
 +      let commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +      assert_eq!(commitment_update.update_add_htlcs.len(), 1);
 +      assert_eq!(commitment_update.update_fulfill_htlcs.len(), 0);
 +      assert_eq!(commitment_update.update_fail_htlcs.len(), 0);
 +      assert_eq!(commitment_update.update_fail_malformed_htlcs.len(), 0);
 +      assert_eq!(commitment_update.update_fee.is_none(), true);
 +
 +      nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &commitment_update.update_add_htlcs[0]).unwrap();
 +      nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      let (revoke, commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
 +
 +      nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +      assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 +
 +      nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +      let revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
 +      // No commitment_signed so get_event_msg's assert(len == 1) passes
 +
 +      nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 +
 +      expect_pending_htlcs_forwardable!(nodes[0]);
 +
 +      let events = nodes[0].node.get_and_clear_pending_events();
 +      assert_eq!(events.len(), 1);
 +      match events[0] {
 +              Event::PaymentReceived { .. } => { },
 +              _ => panic!("Unexpected event"),
 +      };
 +
-       send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000);
-       send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000);
++      claim_payment(&nodes[1], &vec!(&nodes[0])[..], our_payment_preimage, 800_000);
 +
-       assert!(nodes[2].node.claim_funds(our_payment_preimage));
++      send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000, 800_000);
++      send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000, 800_000);
 +      close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
 +}
 +
 +#[test]
 +fn test_update_fee() {
 +      let nodes = create_network(2, &[None, None]);
 +      let chan = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +      let channel_id = chan.2;
 +
 +      // A                                        B
 +      // (1) update_fee/commitment_signed      ->
 +      //                                       <- (2) revoke_and_ack
 +      //                                       .- send (3) commitment_signed
 +      // (4) update_fee/commitment_signed      ->
 +      //                                       .- send (5) revoke_and_ack (no CS as we're awaiting a revoke)
 +      //                                       <- (3) commitment_signed delivered
 +      // send (6) revoke_and_ack               -.
 +      //                                       <- (5) deliver revoke_and_ack
 +      // (6) deliver revoke_and_ack            ->
 +      //                                       .- send (7) commitment_signed in response to (4)
 +      //                                       <- (7) deliver commitment_signed
 +      // revoke_and_ack                        ->
 +
 +      // Create and deliver (1)...
 +      let feerate = get_feerate!(nodes[0], channel_id);
 +      nodes[0].node.update_fee(channel_id, feerate+20).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +
 +      let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events_0.len(), 1);
 +      let (update_msg, commitment_signed) = match events_0[0] {
 +                      MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
 +                      (update_fee.as_ref(), commitment_signed)
 +              },
 +              _ => panic!("Unexpected event"),
 +      };
 +      nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap();
 +
 +      // Generate (2) and (3):
 +      nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap();
 +      let (revoke_msg, commitment_signed_0) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +      check_added_monitors!(nodes[1], 1);
 +
 +      // Deliver (2):
 +      nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap();
 +      assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 +      check_added_monitors!(nodes[0], 1);
 +
 +      // Create and deliver (4)...
 +      nodes[0].node.update_fee(channel_id, feerate+30).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events_0.len(), 1);
 +      let (update_msg, commitment_signed) = match events_0[0] {
 +                      MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
 +                      (update_fee.as_ref(), commitment_signed)
 +              },
 +              _ => panic!("Unexpected event"),
 +      };
 +
 +      nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap();
 +      nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +      // ... creating (5)
 +      let revoke_msg = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
 +      // No commitment_signed so get_event_msg's assert(len == 1) passes
 +
 +      // Handle (3), creating (6):
 +      nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed_0).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      let revoke_msg_0 = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
 +      // No commitment_signed so get_event_msg's assert(len == 1) passes
 +
 +      // Deliver (5):
 +      nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap();
 +      assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 +      check_added_monitors!(nodes[0], 1);
 +
 +      // Deliver (6), creating (7):
 +      nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg_0).unwrap();
 +      let commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +      assert!(commitment_update.update_add_htlcs.is_empty());
 +      assert!(commitment_update.update_fulfill_htlcs.is_empty());
 +      assert!(commitment_update.update_fail_htlcs.is_empty());
 +      assert!(commitment_update.update_fail_malformed_htlcs.is_empty());
 +      assert!(commitment_update.update_fee.is_none());
 +      check_added_monitors!(nodes[1], 1);
 +
 +      // Deliver (7)
 +      nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
 +      // No commitment_signed so get_event_msg's assert(len == 1) passes
 +
 +      nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +      assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 +
 +      assert_eq!(get_feerate!(nodes[0], channel_id), feerate + 30);
 +      assert_eq!(get_feerate!(nodes[1], channel_id), feerate + 30);
 +      close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
 +}
 +
 +#[test]
 +fn pre_funding_lock_shutdown_test() {
 +      // Test sending a shutdown prior to funding_locked after funding generation
 +      let nodes = create_network(2, &[None, None]);
 +      let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 8000000, 0, LocalFeatures::new(), LocalFeatures::new());
 +      let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +      nodes[0].chain_monitor.block_connected_checked(&header, 1, &[&tx; 1], &[1; 1]);
 +      nodes[1].chain_monitor.block_connected_checked(&header, 1, &[&tx; 1], &[1; 1]);
 +
 +      nodes[0].node.close_channel(&OutPoint::new(tx.txid(), 0).to_channel_id()).unwrap();
 +      let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
 +      nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown).unwrap();
 +      let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
 +      nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown).unwrap();
 +
 +      let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
 +      nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed).unwrap();
 +      let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
 +      nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()).unwrap();
 +      let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
 +      assert!(node_0_none.is_none());
 +
 +      assert!(nodes[0].node.list_channels().is_empty());
 +      assert!(nodes[1].node.list_channels().is_empty());
 +}
 +
 +#[test]
 +fn updates_shutdown_wait() {
 +      // Test sending a shutdown with outstanding updates pending
 +      let mut nodes = create_network(3, &[None, None, None]);
 +      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +      let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
 +      let route_1 = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV).unwrap();
 +      let route_2 = nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV).unwrap();
 +
 +      let (our_payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100000);
 +
 +      nodes[0].node.close_channel(&chan_1.2).unwrap();
 +      let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
 +      nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown).unwrap();
 +      let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
 +      nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown).unwrap();
 +
 +      assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 +      assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 +
 +      let (_, payment_hash) = get_payment_preimage_hash!(nodes[0]);
 +      if let Err(APIError::ChannelUnavailable {..}) = nodes[0].node.send_payment(route_1, payment_hash) {}
 +      else { panic!("New sends should fail!") };
 +      if let Err(APIError::ChannelUnavailable {..}) = nodes[1].node.send_payment(route_2, payment_hash) {}
 +      else { panic!("New sends should fail!") };
 +
-       assert!(nodes[2].node.claim_funds(our_payment_preimage));
++      assert!(nodes[2].node.claim_funds(our_payment_preimage, 100_000));
 +      check_added_monitors!(nodes[2], 1);
 +      let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
 +      assert!(updates.update_add_htlcs.is_empty());
 +      assert!(updates.update_fail_htlcs.is_empty());
 +      assert!(updates.update_fail_malformed_htlcs.is_empty());
 +      assert!(updates.update_fee.is_none());
 +      assert_eq!(updates.update_fulfill_htlcs.len(), 1);
 +      nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +      let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +      commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
 +
 +      assert!(updates_2.update_add_htlcs.is_empty());
 +      assert!(updates_2.update_fail_htlcs.is_empty());
 +      assert!(updates_2.update_fail_malformed_htlcs.is_empty());
 +      assert!(updates_2.update_fee.is_none());
 +      assert_eq!(updates_2.update_fulfill_htlcs.len(), 1);
 +      nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fulfill_htlcs[0]).unwrap();
 +      commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true);
 +
 +      let events = nodes[0].node.get_and_clear_pending_events();
 +      assert_eq!(events.len(), 1);
 +      match events[0] {
 +              Event::PaymentSent { ref payment_preimage } => {
 +                      assert_eq!(our_payment_preimage, *payment_preimage);
 +              },
 +              _ => panic!("Unexpected event"),
 +      }
 +
 +      let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
 +      nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed).unwrap();
 +      let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
 +      nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()).unwrap();
 +      let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
 +      assert!(node_0_none.is_none());
 +
 +      assert!(nodes[0].node.list_channels().is_empty());
 +
 +      assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
 +      nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
 +      close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true);
 +      assert!(nodes[1].node.list_channels().is_empty());
 +      assert!(nodes[2].node.list_channels().is_empty());
 +}
 +
 +#[test]
 +fn htlc_fail_async_shutdown() {
 +      // Test HTLCs fail if shutdown starts even if messages are delivered out-of-order
 +      let mut nodes = create_network(3, &[None, None, None]);
 +      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +      let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
 +
 +      let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV).unwrap();
 +      let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
 +      nodes[0].node.send_payment(route, our_payment_hash).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
 +      assert_eq!(updates.update_add_htlcs.len(), 1);
 +      assert!(updates.update_fulfill_htlcs.is_empty());
 +      assert!(updates.update_fail_htlcs.is_empty());
 +      assert!(updates.update_fail_malformed_htlcs.is_empty());
 +      assert!(updates.update_fee.is_none());
 +
 +      nodes[1].node.close_channel(&chan_1.2).unwrap();
 +      let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
 +      nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown).unwrap();
 +      let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
 +
 +      nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]).unwrap();
 +      nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +      nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown).unwrap();
 +      commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false);
 +
 +      let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +      assert!(updates_2.update_add_htlcs.is_empty());
 +      assert!(updates_2.update_fulfill_htlcs.is_empty());
 +      assert_eq!(updates_2.update_fail_htlcs.len(), 1);
 +      assert!(updates_2.update_fail_malformed_htlcs.is_empty());
 +      assert!(updates_2.update_fee.is_none());
 +
 +      nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fail_htlcs[0]).unwrap();
 +      commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true);
 +
 +      let events = nodes[0].node.get_and_clear_pending_events();
 +      assert_eq!(events.len(), 1);
 +      match events[0] {
 +              Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, .. } => {
 +                      assert_eq!(our_payment_hash, *payment_hash);
 +                      assert!(!rejected_by_dest);
 +              },
 +              _ => panic!("Unexpected event"),
 +      }
 +
 +      let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
 +      assert_eq!(msg_events.len(), 2);
 +      let node_0_closing_signed = match msg_events[0] {
 +              MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => {
 +                      assert_eq!(*node_id, nodes[1].node.get_our_node_id());
 +                      (*msg).clone()
 +              },
 +              _ => panic!("Unexpected event"),
 +      };
 +      match msg_events[1] {
 +              MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg }} => {
 +                      assert_eq!(msg.contents.short_channel_id, chan_1.0.contents.short_channel_id);
 +              },
 +              _ => panic!("Unexpected event"),
 +      }
 +
 +      assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 +      nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed).unwrap();
 +      let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
 +      nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()).unwrap();
 +      let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
 +      assert!(node_0_none.is_none());
 +
 +      assert!(nodes[0].node.list_channels().is_empty());
 +
 +      assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
 +      nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
 +      close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true);
 +      assert!(nodes[1].node.list_channels().is_empty());
 +      assert!(nodes[2].node.list_channels().is_empty());
 +}
 +
 +fn do_test_shutdown_rebroadcast(recv_count: u8) {
 +      // Test that shutdown/closing_signed is re-sent on reconnect with a variable number of
 +      // messages delivered prior to disconnect
 +      let nodes = create_network(3, &[None, None, None]);
 +      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +      let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
 +
 +      let (our_payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100000);
 +
 +      nodes[1].node.close_channel(&chan_1.2).unwrap();
 +      let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
 +      if recv_count > 0 {
 +              nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown).unwrap();
 +              let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
 +              if recv_count > 1 {
 +                      nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown).unwrap();
 +              }
 +      }
 +
 +      nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
 +      nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 +
 +      nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
 +      let node_0_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
 +      nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
 +      let node_1_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
 +
 +      nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_reestablish).unwrap();
 +      let node_1_2nd_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
 +      assert!(node_1_shutdown == node_1_2nd_shutdown);
 +
 +      nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &node_1_reestablish).unwrap();
 +      let node_0_2nd_shutdown = if recv_count > 0 {
 +              let node_0_2nd_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
 +              nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_2nd_shutdown).unwrap();
 +              node_0_2nd_shutdown
 +      } else {
 +              assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 +              nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_2nd_shutdown).unwrap();
 +              get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id())
 +      };
 +      nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_2nd_shutdown).unwrap();
 +
 +      assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 +      assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 +
-       send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
-       send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
-       send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
-       send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
++      assert!(nodes[2].node.claim_funds(our_payment_preimage, 100_000));
 +      check_added_monitors!(nodes[2], 1);
 +      let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
 +      assert!(updates.update_add_htlcs.is_empty());
 +      assert!(updates.update_fail_htlcs.is_empty());
 +      assert!(updates.update_fail_malformed_htlcs.is_empty());
 +      assert!(updates.update_fee.is_none());
 +      assert_eq!(updates.update_fulfill_htlcs.len(), 1);
 +      nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +      let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +      commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
 +
 +      assert!(updates_2.update_add_htlcs.is_empty());
 +      assert!(updates_2.update_fail_htlcs.is_empty());
 +      assert!(updates_2.update_fail_malformed_htlcs.is_empty());
 +      assert!(updates_2.update_fee.is_none());
 +      assert_eq!(updates_2.update_fulfill_htlcs.len(), 1);
 +      nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fulfill_htlcs[0]).unwrap();
 +      commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true);
 +
 +      let events = nodes[0].node.get_and_clear_pending_events();
 +      assert_eq!(events.len(), 1);
 +      match events[0] {
 +              Event::PaymentSent { ref payment_preimage } => {
 +                      assert_eq!(our_payment_preimage, *payment_preimage);
 +              },
 +              _ => panic!("Unexpected event"),
 +      }
 +
 +      let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
 +      if recv_count > 0 {
 +              nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed).unwrap();
 +              let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
 +              assert!(node_1_closing_signed.is_some());
 +      }
 +
 +      nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
 +      nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 +
 +      nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
 +      let node_0_2nd_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
 +      nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
 +      if recv_count == 0 {
 +              // If all closing_signeds weren't delivered we can just resume where we left off...
 +              let node_1_2nd_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
 +
 +              nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &node_1_2nd_reestablish).unwrap();
 +              let node_0_3rd_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
 +              assert!(node_0_2nd_shutdown == node_0_3rd_shutdown);
 +
 +              nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_2nd_reestablish).unwrap();
 +              let node_1_3rd_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
 +              assert!(node_1_3rd_shutdown == node_1_2nd_shutdown);
 +
 +              nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_3rd_shutdown).unwrap();
 +              assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 +
 +              nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_3rd_shutdown).unwrap();
 +              let node_0_2nd_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
 +              assert!(node_0_closing_signed == node_0_2nd_closing_signed);
 +
 +              nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed).unwrap();
 +              let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
 +              nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()).unwrap();
 +              let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
 +              assert!(node_0_none.is_none());
 +      } else {
 +              // If one node, however, received + responded with an identical closing_signed we end
 +              // up erroring and node[0] will try to broadcast its own latest commitment transaction.
 +              // There isn't really anything better we can do simply, but in the future we might
 +              // explore storing a set of recently-closed channels that got disconnected during
 +              // closing_signed and avoiding broadcasting local commitment txn for some timeout to
 +              // give our counterparty enough time to (potentially) broadcast a cooperative closing
 +              // transaction.
 +              assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 +
 +              if let Err(msgs::LightningError{action: msgs::ErrorAction::SendErrorMessage{msg}, ..}) =
 +                              nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_2nd_reestablish) {
 +                      nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msg);
 +                      let msgs::ErrorMessage {ref channel_id, ..} = msg;
 +                      assert_eq!(*channel_id, chan_1.2);
 +              } else { panic!("Needed SendErrorMessage close"); }
 +
 +              // get_closing_signed_broadcast usually eats the BroadcastChannelUpdate for us and
 +              // checks it, but in this case nodes[0] didn't ever get a chance to receive a
 +              // closing_signed so we do it ourselves
 +              check_closed_broadcast!(nodes[0]);
 +      }
 +
 +      assert!(nodes[0].node.list_channels().is_empty());
 +
 +      assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
 +      nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
 +      close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true);
 +      assert!(nodes[1].node.list_channels().is_empty());
 +      assert!(nodes[2].node.list_channels().is_empty());
 +}
 +
 +#[test]
 +fn test_shutdown_rebroadcast() {
 +      do_test_shutdown_rebroadcast(0);
 +      do_test_shutdown_rebroadcast(1);
 +      do_test_shutdown_rebroadcast(2);
 +}
 +
 +#[test]
 +fn fake_network_test() {
 +      // Simple test which builds a network of ChannelManagers, connects them to each other, and
 +      // tests that payments get routed and transactions broadcast in semi-reasonable ways.
 +      let nodes = create_network(4, &[None, None, None, None]);
 +
 +      // Create some initial channels
 +      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +      let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
 +      let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3, LocalFeatures::new(), LocalFeatures::new());
 +
 +      // Rebalance the network a bit by relaying one payment through all the channels...
-       send_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 1000000);
-       send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1], &nodes[0])[..], 1000000);
-       send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1])[..], 1000000);
++      send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000, 8_000_000);
++      send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000, 8_000_000);
++      send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000, 8_000_000);
++      send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000, 8_000_000);
 +
 +      // Send some more payments
-       send_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 1000000);
-       send_payment(&nodes[2], &vec!(&nodes[3])[..], 1000000);
-       send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
-       send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
-       send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
-       send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
-       send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
++      send_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 1000000, 1_000_000);
++      send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1], &nodes[0])[..], 1000000, 1_000_000);
++      send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1])[..], 1000000, 1_000_000);
 +
 +      // Test failure packets
 +      let payment_hash_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 1000000).1;
 +      fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], payment_hash_1);
 +
 +      // Add a new channel that skips 3
 +      let chan_4 = create_announced_chan_between_nodes(&nodes, 1, 3, LocalFeatures::new(), LocalFeatures::new());
 +
-       claim_payment(&nodes[1], &vec!(&nodes[2], &nodes[3], &nodes[1])[..], payment_preimage_1);
++      send_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 1000000, 1_000_000);
++      send_payment(&nodes[2], &vec!(&nodes[3])[..], 1000000, 1_000_000);
++      send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000, 8_000_000);
++      send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000, 8_000_000);
++      send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000, 8_000_000);
++      send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000, 8_000_000);
++      send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000, 8_000_000);
 +
 +      // Do some rebalance loop payments, simultaneously
 +      let mut hops = Vec::with_capacity(3);
 +      hops.push(RouteHop {
 +              pubkey: nodes[2].node.get_our_node_id(),
 +              short_channel_id: chan_2.0.contents.short_channel_id,
 +              fee_msat: 0,
 +              cltv_expiry_delta: chan_3.0.contents.cltv_expiry_delta as u32
 +      });
 +      hops.push(RouteHop {
 +              pubkey: nodes[3].node.get_our_node_id(),
 +              short_channel_id: chan_3.0.contents.short_channel_id,
 +              fee_msat: 0,
 +              cltv_expiry_delta: chan_4.1.contents.cltv_expiry_delta as u32
 +      });
 +      hops.push(RouteHop {
 +              pubkey: nodes[1].node.get_our_node_id(),
 +              short_channel_id: chan_4.0.contents.short_channel_id,
 +              fee_msat: 1000000,
 +              cltv_expiry_delta: TEST_FINAL_CLTV,
 +      });
 +      hops[1].fee_msat = chan_4.1.contents.fee_base_msat as u64 + chan_4.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
 +      hops[0].fee_msat = chan_3.0.contents.fee_base_msat as u64 + chan_3.0.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
 +      let payment_preimage_1 = send_along_route(&nodes[1], Route { hops }, &vec!(&nodes[2], &nodes[3], &nodes[1])[..], 1000000).0;
 +
 +      let mut hops = Vec::with_capacity(3);
 +      hops.push(RouteHop {
 +              pubkey: nodes[3].node.get_our_node_id(),
 +              short_channel_id: chan_4.0.contents.short_channel_id,
 +              fee_msat: 0,
 +              cltv_expiry_delta: chan_3.1.contents.cltv_expiry_delta as u32
 +      });
 +      hops.push(RouteHop {
 +              pubkey: nodes[2].node.get_our_node_id(),
 +              short_channel_id: chan_3.0.contents.short_channel_id,
 +              fee_msat: 0,
 +              cltv_expiry_delta: chan_2.1.contents.cltv_expiry_delta as u32
 +      });
 +      hops.push(RouteHop {
 +              pubkey: nodes[1].node.get_our_node_id(),
 +              short_channel_id: chan_2.0.contents.short_channel_id,
 +              fee_msat: 1000000,
 +              cltv_expiry_delta: TEST_FINAL_CLTV,
 +      });
 +      hops[1].fee_msat = chan_2.1.contents.fee_base_msat as u64 + chan_2.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
 +      hops[0].fee_msat = chan_3.1.contents.fee_base_msat as u64 + chan_3.1.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
 +      let payment_hash_2 = send_along_route(&nodes[1], Route { hops }, &vec!(&nodes[3], &nodes[2], &nodes[1])[..], 1000000).1;
 +
 +      // Claim the rebalances...
 +      fail_payment(&nodes[1], &vec!(&nodes[3], &nodes[2], &nodes[1])[..], payment_hash_2);
-       claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_3);
-       claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_4);
-       claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_5);
++      claim_payment(&nodes[1], &vec!(&nodes[2], &nodes[3], &nodes[1])[..], payment_preimage_1, 1_000_000);
 +
 +      // Add a duplicate new channel from 2 to 4
 +      let chan_5 = create_announced_chan_between_nodes(&nodes, 1, 3, LocalFeatures::new(), LocalFeatures::new());
 +
 +      // Send some payments across both channels
 +      let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
 +      let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
 +      let payment_preimage_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
 +
 +      route_over_limit(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000);
 +
 +      //TODO: Test that routes work again here as we've been notified that the channel is full
 +
-               claim_payment(&nodes[1], &[&nodes[2]], preimage);
++      claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_3, 3_000_000);
++      claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_4, 3_000_000);
++      claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_5, 3_000_000);
 +
 +      // Close down the channels...
 +      close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true);
 +      close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false);
 +      close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true);
 +      close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false);
 +      close_channel(&nodes[1], &nodes[3], &chan_5.2, chan_5.3, false);
 +}
 +
 +#[test]
 +fn holding_cell_htlc_counting() {
 +      // Tests that HTLCs in the holding cell count towards the pending HTLC limits on outbound HTLCs
 +      // to ensure we don't end up with HTLCs sitting around in our holding cell for several
 +      // commitment dance rounds.
 +      let mut nodes = create_network(3, &[None, None, None]);
 +      create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +      let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
 +
 +      let mut payments = Vec::new();
 +      for _ in 0..::ln::channel::OUR_MAX_HTLCS {
 +              let route = nodes[1].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 100000, TEST_FINAL_CLTV).unwrap();
 +              let (payment_preimage, payment_hash) = get_payment_preimage_hash!(nodes[0]);
 +              nodes[1].node.send_payment(route, payment_hash).unwrap();
 +              payments.push((payment_preimage, payment_hash));
 +      }
 +      check_added_monitors!(nodes[1], 1);
 +
 +      let mut events = nodes[1].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events.len(), 1);
 +      let initial_payment_event = SendEvent::from_event(events.pop().unwrap());
 +      assert_eq!(initial_payment_event.node_id, nodes[2].node.get_our_node_id());
 +
 +      // There is now one HTLC in an outbound commitment transaction and (OUR_MAX_HTLCS - 1) HTLCs in
 +      // the holding cell waiting on B's RAA to send. At this point we should not be able to add
 +      // another HTLC.
 +      let route = nodes[1].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 100000, TEST_FINAL_CLTV).unwrap();
 +      let (_, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
 +      if let APIError::ChannelUnavailable { err } = nodes[1].node.send_payment(route, payment_hash_1).unwrap_err() {
 +              assert_eq!(err, "Cannot push more than their max accepted HTLCs");
 +      } else { panic!("Unexpected event"); }
 +
 +      // This should also be true if we try to forward a payment.
 +      let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 100000, TEST_FINAL_CLTV).unwrap();
 +      let (_, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
 +      nodes[0].node.send_payment(route, payment_hash_2).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +
 +      let mut events = nodes[0].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events.len(), 1);
 +      let payment_event = SendEvent::from_event(events.pop().unwrap());
 +      assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
 +
 +      nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
 +      commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
 +      // We have to forward pending HTLCs twice - once tries to forward the payment forward (and
 +      // fails), the second will process the resulting failure and fail the HTLC backward.
 +      expect_pending_htlcs_forwardable!(nodes[1]);
 +      expect_pending_htlcs_forwardable!(nodes[1]);
 +      check_added_monitors!(nodes[1], 1);
 +
 +      let bs_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +      nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_updates.update_fail_htlcs[0]).unwrap();
 +      commitment_signed_dance!(nodes[0], nodes[1], bs_fail_updates.commitment_signed, false, true);
 +
 +      let events = nodes[0].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events.len(), 1);
 +      match events[0] {
 +              MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg }} => {
 +                      assert_eq!(msg.contents.short_channel_id, chan_2.0.contents.short_channel_id);
 +              },
 +              _ => panic!("Unexpected event"),
 +      }
 +
 +      let events = nodes[0].node.get_and_clear_pending_events();
 +      assert_eq!(events.len(), 1);
 +      match events[0] {
 +              Event::PaymentFailed { payment_hash, rejected_by_dest, .. } => {
 +                      assert_eq!(payment_hash, payment_hash_2);
 +                      assert!(!rejected_by_dest);
 +              },
 +              _ => panic!("Unexpected event"),
 +      }
 +
 +      // Now forward all the pending HTLCs and claim them back
 +      nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &initial_payment_event.msgs[0]).unwrap();
 +      nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &initial_payment_event.commitment_msg).unwrap();
 +      check_added_monitors!(nodes[2], 1);
 +
 +      let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
 +      nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +      let as_updates = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
 +
 +      nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_commitment_signed).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +      let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
 +
 +      for ref update in as_updates.update_add_htlcs.iter() {
 +              nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), update).unwrap();
 +      }
 +      nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_updates.commitment_signed).unwrap();
 +      check_added_monitors!(nodes[2], 1);
 +      nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa).unwrap();
 +      check_added_monitors!(nodes[2], 1);
 +      let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
 +
 +      nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +      nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_commitment_signed).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +      let as_final_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
 +
 +      nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_final_raa).unwrap();
 +      check_added_monitors!(nodes[2], 1);
 +
 +      expect_pending_htlcs_forwardable!(nodes[2]);
 +
 +      let events = nodes[2].node.get_and_clear_pending_events();
 +      assert_eq!(events.len(), payments.len());
 +      for (event, &(_, ref hash)) in events.iter().zip(payments.iter()) {
 +              match event {
 +                      &Event::PaymentReceived { ref payment_hash, .. } => {
 +                              assert_eq!(*payment_hash, *hash);
 +                      },
 +                      _ => panic!("Unexpected event"),
 +              };
 +      }
 +
 +      for (preimage, _) in payments.drain(..) {
-       send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
++              claim_payment(&nodes[1], &[&nodes[2]], preimage, 100_000);
 +      }
 +
-       claim_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], payment_preimage);
++      send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000, 1_000_000);
 +}
 +
 +#[test]
 +fn duplicate_htlc_test() {
 +      // Test that we accept duplicate payment_hash HTLCs across the network and that
 +      // claiming/failing them are all separate and don't affect each other
 +      let mut nodes = create_network(6, &[None, None, None, None, None, None]);
 +
 +      // Create some initial channels to route via 3 to 4/5 from 0/1/2
 +      create_announced_chan_between_nodes(&nodes, 0, 3, LocalFeatures::new(), LocalFeatures::new());
 +      create_announced_chan_between_nodes(&nodes, 1, 3, LocalFeatures::new(), LocalFeatures::new());
 +      create_announced_chan_between_nodes(&nodes, 2, 3, LocalFeatures::new(), LocalFeatures::new());
 +      create_announced_chan_between_nodes(&nodes, 3, 4, LocalFeatures::new(), LocalFeatures::new());
 +      create_announced_chan_between_nodes(&nodes, 3, 5, LocalFeatures::new(), LocalFeatures::new());
 +
 +      let (payment_preimage, payment_hash) = route_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], 1000000);
 +
 +      *nodes[0].network_payment_count.borrow_mut() -= 1;
 +      assert_eq!(route_payment(&nodes[1], &vec!(&nodes[3])[..], 1000000).0, payment_preimage);
 +
 +      *nodes[0].network_payment_count.borrow_mut() -= 1;
 +      assert_eq!(route_payment(&nodes[2], &vec!(&nodes[3], &nodes[5])[..], 1000000).0, payment_preimage);
 +
-       claim_payment(&nodes[1], &vec!(&nodes[3])[..], payment_preimage);
++      claim_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], payment_preimage, 1_000_000);
 +      fail_payment(&nodes[2], &vec!(&nodes[3], &nodes[5])[..], payment_hash);
-       send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
++      claim_payment(&nodes[1], &vec!(&nodes[3])[..], payment_preimage, 1_000_000);
 +}
 +
 +#[test]
 +fn test_duplicate_htlc_different_direction_onchain() {
 +      // Test that ChannelMonitor doesn't generate 2 preimage txn
 +      // when we have 2 HTLCs with same preimage that go across a node
 +      // in opposite directions.
 +      let nodes = create_network(2, &[None, None]);
 +
 +      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +
 +      // balancing
-       nodes[0].node.claim_funds(payment_preimage);
++      send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000, 8_000_000);
 +
 +      let (payment_preimage, payment_hash) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 900_000);
 +
 +      let route = nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 800_000, TEST_FINAL_CLTV).unwrap();
 +      send_along_route_with_hash(&nodes[1], route, &vec!(&nodes[0])[..], 800_000, payment_hash);
 +
 +      // Provide preimage to node 0 by claiming payment
-               send_payment(&nodes[0], &vec![&nodes[1], &nodes[2]][..], recv_value_0);
++      nodes[0].node.claim_funds(payment_preimage, 800_000);
 +      check_added_monitors!(nodes[0], 1);
 +
 +      // Broadcast node 1 commitment txn
 +      let remote_txn = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
 +
 +      assert_eq!(remote_txn[0].output.len(), 4); // 1 local, 1 remote, 1 htlc inbound, 1 htlc outbound
 +      let mut has_both_htlcs = 0; // check htlcs match ones committed
 +      for outp in remote_txn[0].output.iter() {
 +              if outp.value == 800_000 / 1000 {
 +                      has_both_htlcs += 1;
 +              } else if outp.value == 900_000 / 1000 {
 +                      has_both_htlcs += 1;
 +              }
 +      }
 +      assert_eq!(has_both_htlcs, 2);
 +
 +      let header = BlockHeader { version: 0x2000_0000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +
 +      nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![remote_txn[0].clone()] }, 1);
 +
 +      // Check we only broadcast 1 timeout tx
 +      let claim_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
 +      let htlc_pair = if claim_txn[0].output[0].value == 800_000 / 1000 { (claim_txn[0].clone(), claim_txn[1].clone()) } else { (claim_txn[1].clone(), claim_txn[0].clone()) };
 +      assert_eq!(claim_txn.len(), 6);
 +      assert_eq!(htlc_pair.0.input.len(), 1);
 +      assert_eq!(htlc_pair.0.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC 1 <--> 0, preimage tx
 +      check_spends!(htlc_pair.0, remote_txn[0].clone());
 +      assert_eq!(htlc_pair.1.input.len(), 1);
 +      assert_eq!(htlc_pair.1.input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // HTLC 0 <--> 1, timeout tx
 +      check_spends!(htlc_pair.1, remote_txn[0].clone());
 +
 +      let events = nodes[0].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events.len(), 2);
 +      for e in events {
 +              match e {
 +                      MessageSendEvent::BroadcastChannelUpdate { .. } => {},
 +                      MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
 +                              assert!(update_add_htlcs.is_empty());
 +                              assert!(update_fail_htlcs.is_empty());
 +                              assert_eq!(update_fulfill_htlcs.len(), 1);
 +                              assert!(update_fail_malformed_htlcs.is_empty());
 +                              assert_eq!(nodes[1].node.get_our_node_id(), *node_id);
 +                      },
 +                      _ => panic!("Unexpected event"),
 +              }
 +      }
 +}
 +
 +fn do_channel_reserve_test(test_recv: bool) {
 +      use ln::msgs::LightningError;
 +
 +      let mut nodes = create_network(3, &[None, None, None]);
 +      let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1900, 1001, LocalFeatures::new(), LocalFeatures::new());
 +      let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1900, 1001, LocalFeatures::new(), LocalFeatures::new());
 +
 +      let mut stat01 = get_channel_value_stat!(nodes[0], chan_1.2);
 +      let mut stat11 = get_channel_value_stat!(nodes[1], chan_1.2);
 +
 +      let mut stat12 = get_channel_value_stat!(nodes[1], chan_2.2);
 +      let mut stat22 = get_channel_value_stat!(nodes[2], chan_2.2);
 +
 +      macro_rules! get_route_and_payment_hash {
 +              ($recv_value: expr) => {{
 +                      let route = nodes[0].router.get_route(&nodes.last().unwrap().node.get_our_node_id(), None, &Vec::new(), $recv_value, TEST_FINAL_CLTV).unwrap();
 +                      let (payment_preimage, payment_hash) = get_payment_preimage_hash!(nodes[0]);
 +                      (route, payment_hash, payment_preimage)
 +              }}
 +      };
 +
 +      macro_rules! expect_forward {
 +              ($node: expr) => {{
 +                      let mut events = $node.node.get_and_clear_pending_msg_events();
 +                      assert_eq!(events.len(), 1);
 +                      check_added_monitors!($node, 1);
 +                      let payment_event = SendEvent::from_event(events.remove(0));
 +                      payment_event
 +              }}
 +      }
 +
 +      let feemsat = 239; // somehow we know?
 +      let total_fee_msat = (nodes.len() - 2) as u64 * 239;
 +
 +      let recv_value_0 = stat01.their_max_htlc_value_in_flight_msat - total_fee_msat;
 +
 +      // attempt to send amt_msat > their_max_htlc_value_in_flight_msat
 +      {
 +              let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_0 + 1);
 +              assert!(route.hops.iter().rev().skip(1).all(|h| h.fee_msat == feemsat));
 +              let err = nodes[0].node.send_payment(route, our_payment_hash).err().unwrap();
 +              match err {
 +                      APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over the max HTLC value in flight our peer will accept"),
 +                      _ => panic!("Unknown error variants"),
 +              }
 +      }
 +
 +      let mut htlc_id = 0;
 +      // channel reserve is bigger than their_max_htlc_value_in_flight_msat so loop to deplete
 +      // nodes[0]'s wealth
 +      loop {
 +              let amt_msat = recv_value_0 + total_fee_msat;
 +              if stat01.value_to_self_msat - amt_msat < stat01.channel_reserve_msat {
 +                      break;
 +              }
-       claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_1);
-       claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_21);
-       claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_22);
++              send_payment(&nodes[0], &vec![&nodes[1], &nodes[2]][..], recv_value_0, recv_value_0);
 +              htlc_id += 1;
 +
 +              let (stat01_, stat11_, stat12_, stat22_) = (
 +                      get_channel_value_stat!(nodes[0], chan_1.2),
 +                      get_channel_value_stat!(nodes[1], chan_1.2),
 +                      get_channel_value_stat!(nodes[1], chan_2.2),
 +                      get_channel_value_stat!(nodes[2], chan_2.2),
 +              );
 +
 +              assert_eq!(stat01_.value_to_self_msat, stat01.value_to_self_msat - amt_msat);
 +              assert_eq!(stat11_.value_to_self_msat, stat11.value_to_self_msat + amt_msat);
 +              assert_eq!(stat12_.value_to_self_msat, stat12.value_to_self_msat - (amt_msat - feemsat));
 +              assert_eq!(stat22_.value_to_self_msat, stat22.value_to_self_msat + (amt_msat - feemsat));
 +              stat01 = stat01_; stat11 = stat11_; stat12 = stat12_; stat22 = stat22_;
 +      }
 +
 +      {
 +              let recv_value = stat01.value_to_self_msat - stat01.channel_reserve_msat - total_fee_msat;
 +              // attempt to get channel_reserve violation
 +              let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value + 1);
 +              let err = nodes[0].node.send_payment(route.clone(), our_payment_hash).err().unwrap();
 +              match err {
 +                      APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over their reserve value"),
 +                      _ => panic!("Unknown error variants"),
 +              }
 +      }
 +
 +      // adding pending output
 +      let recv_value_1 = (stat01.value_to_self_msat - stat01.channel_reserve_msat - total_fee_msat)/2;
 +      let amt_msat_1 = recv_value_1 + total_fee_msat;
 +
 +      let (route_1, our_payment_hash_1, our_payment_preimage_1) = get_route_and_payment_hash!(recv_value_1);
 +      let payment_event_1 = {
 +              nodes[0].node.send_payment(route_1, our_payment_hash_1).unwrap();
 +              check_added_monitors!(nodes[0], 1);
 +
 +              let mut events = nodes[0].node.get_and_clear_pending_msg_events();
 +              assert_eq!(events.len(), 1);
 +              SendEvent::from_event(events.remove(0))
 +      };
 +      nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_1.msgs[0]).unwrap();
 +
 +      // channel reserve test with htlc pending output > 0
 +      let recv_value_2 = stat01.value_to_self_msat - amt_msat_1 - stat01.channel_reserve_msat - total_fee_msat;
 +      {
 +              let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_2 + 1);
 +              match nodes[0].node.send_payment(route, our_payment_hash).err().unwrap() {
 +                      APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over their reserve value"),
 +                      _ => panic!("Unknown error variants"),
 +              }
 +      }
 +
 +      {
 +              // test channel_reserve test on nodes[1] side
 +              let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_2 + 1);
 +
 +              // Need to manually create update_add_htlc message to go around the channel reserve check in send_htlc()
 +              let secp_ctx = Secp256k1::new();
 +              let session_priv = SecretKey::from_slice(&{
 +                      let mut session_key = [0; 32];
 +                      let mut rng = thread_rng();
 +                      rng.fill_bytes(&mut session_key);
 +                      session_key
 +              }).expect("RNG is bad!");
 +
 +              let cur_height = nodes[0].node.latest_block_height.load(Ordering::Acquire) as u32 + 1;
 +              let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route, &session_priv).unwrap();
 +              let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route, cur_height).unwrap();
 +              let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, &our_payment_hash);
 +              let msg = msgs::UpdateAddHTLC {
 +                      channel_id: chan_1.2,
 +                      htlc_id,
 +                      amount_msat: htlc_msat,
 +                      payment_hash: our_payment_hash,
 +                      cltv_expiry: htlc_cltv,
 +                      onion_routing_packet: onion_packet,
 +              };
 +
 +              if test_recv {
 +                      let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg).err().unwrap();
 +                      match err {
 +                              LightningError{err, .. } => assert_eq!(err, "Remote HTLC add would put them over their reserve value"),
 +                      }
 +                      // If we send a garbage message, the channel should get closed, making the rest of this test case fail.
 +                      assert_eq!(nodes[1].node.list_channels().len(), 1);
 +                      assert_eq!(nodes[1].node.list_channels().len(), 1);
 +                      check_closed_broadcast!(nodes[1]);
 +                      return;
 +              }
 +      }
 +
 +      // split the rest to test holding cell
 +      let recv_value_21 = recv_value_2/2;
 +      let recv_value_22 = recv_value_2 - recv_value_21 - total_fee_msat;
 +      {
 +              let stat = get_channel_value_stat!(nodes[0], chan_1.2);
 +              assert_eq!(stat.value_to_self_msat - (stat.pending_outbound_htlcs_amount_msat + recv_value_21 + recv_value_22 + total_fee_msat + total_fee_msat), stat.channel_reserve_msat);
 +      }
 +
 +      // now see if they go through on both sides
 +      let (route_21, our_payment_hash_21, our_payment_preimage_21) = get_route_and_payment_hash!(recv_value_21);
 +      // but this will stuck in the holding cell
 +      nodes[0].node.send_payment(route_21, our_payment_hash_21).unwrap();
 +      check_added_monitors!(nodes[0], 0);
 +      let events = nodes[0].node.get_and_clear_pending_events();
 +      assert_eq!(events.len(), 0);
 +
 +      // test with outbound holding cell amount > 0
 +      {
 +              let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_22+1);
 +              match nodes[0].node.send_payment(route, our_payment_hash).err().unwrap() {
 +                      APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over their reserve value"),
 +                      _ => panic!("Unknown error variants"),
 +              }
 +      }
 +
 +      let (route_22, our_payment_hash_22, our_payment_preimage_22) = get_route_and_payment_hash!(recv_value_22);
 +      // this will also stuck in the holding cell
 +      nodes[0].node.send_payment(route_22, our_payment_hash_22).unwrap();
 +      check_added_monitors!(nodes[0], 0);
 +      assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
 +      assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 +
 +      // flush the pending htlc
 +      nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event_1.commitment_msg).unwrap();
 +      let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +      check_added_monitors!(nodes[1], 1);
 +
 +      nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_revoke_and_ack).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      let commitment_update_2 = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
 +
 +      nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_commitment_signed).unwrap();
 +      let bs_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
 +      // No commitment_signed so get_event_msg's assert(len == 1) passes
 +      check_added_monitors!(nodes[0], 1);
 +
 +      nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
 +      assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 +      check_added_monitors!(nodes[1], 1);
 +
 +      expect_pending_htlcs_forwardable!(nodes[1]);
 +
 +      let ref payment_event_11 = expect_forward!(nodes[1]);
 +      nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_11.msgs[0]).unwrap();
 +      commitment_signed_dance!(nodes[2], nodes[1], payment_event_11.commitment_msg, false);
 +
 +      expect_pending_htlcs_forwardable!(nodes[2]);
 +      expect_payment_received!(nodes[2], our_payment_hash_1, recv_value_1);
 +
 +      // flush the htlcs in the holding cell
 +      assert_eq!(commitment_update_2.update_add_htlcs.len(), 2);
 +      nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[0]).unwrap();
 +      nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[1]).unwrap();
 +      commitment_signed_dance!(nodes[1], nodes[0], &commitment_update_2.commitment_signed, false);
 +      expect_pending_htlcs_forwardable!(nodes[1]);
 +
 +      let ref payment_event_3 = expect_forward!(nodes[1]);
 +      assert_eq!(payment_event_3.msgs.len(), 2);
 +      nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[0]).unwrap();
 +      nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[1]).unwrap();
 +
 +      commitment_signed_dance!(nodes[2], nodes[1], &payment_event_3.commitment_msg, false);
 +      expect_pending_htlcs_forwardable!(nodes[2]);
 +
 +      let events = nodes[2].node.get_and_clear_pending_events();
 +      assert_eq!(events.len(), 2);
 +      match events[0] {
 +              Event::PaymentReceived { ref payment_hash, amt } => {
 +                      assert_eq!(our_payment_hash_21, *payment_hash);
 +                      assert_eq!(recv_value_21, amt);
 +              },
 +              _ => panic!("Unexpected event"),
 +      }
 +      match events[1] {
 +              Event::PaymentReceived { ref payment_hash, amt } => {
 +                      assert_eq!(our_payment_hash_22, *payment_hash);
 +                      assert_eq!(recv_value_22, amt);
 +              },
 +              _ => panic!("Unexpected event"),
 +      }
 +
-       assert!(nodes[1].node.claim_funds(payment_preimage_1));
++      claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_1, recv_value_1);
++      claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_21, recv_value_21);
++      claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_22, recv_value_22);
 +
 +      let expected_value_to_self = stat01.value_to_self_msat - (recv_value_1 + total_fee_msat) - (recv_value_21 + total_fee_msat) - (recv_value_22 + total_fee_msat);
 +      let stat0 = get_channel_value_stat!(nodes[0], chan_1.2);
 +      assert_eq!(stat0.value_to_self_msat, expected_value_to_self);
 +      assert_eq!(stat0.value_to_self_msat, stat0.channel_reserve_msat);
 +
 +      let stat2 = get_channel_value_stat!(nodes[2], chan_2.2);
 +      assert_eq!(stat2.value_to_self_msat, stat22.value_to_self_msat + recv_value_1 + recv_value_21 + recv_value_22);
 +}
 +
 +#[test]
 +fn channel_reserve_test() {
 +      do_channel_reserve_test(false);
 +      do_channel_reserve_test(true);
 +}
 +
 +#[test]
 +fn channel_reserve_in_flight_removes() {
 +      // In cases where one side claims an HTLC, it thinks it has additional available funds that it
 +      // can send to its counterparty, but due to update ordering, the other side may not yet have
 +      // considered those HTLCs fully removed.
 +      // This tests that we don't count HTLCs which will not be included in the next remote
 +      // commitment transaction towards the reserve value (as it implies no commitment transaction
 +      // will be generated which violates the remote reserve value).
 +      // This was broken previously, and discovered by the chanmon_fail_consistency fuzz test.
 +      // To test this we:
 +      //  * route two HTLCs from A to B (note that, at a high level, this test is checking that, when
 +      //    you consider the values of both of these HTLCs, B may not send an HTLC back to A, but if
 +      //    you only consider the value of the first HTLC, it may not),
 +      //  * start routing a third HTLC from A to B,
 +      //  * claim the first two HTLCs (though B will generate an update_fulfill for one, and put
 +      //    the other claim in its holding cell, as it immediately goes into AwaitingRAA),
 +      //  * deliver the first fulfill from B
 +      //  * deliver the update_add and an RAA from A, resulting in B freeing the second holding cell
 +      //    claim,
 +      //  * deliver A's response CS and RAA.
 +      //    This results in A having the second HTLC in AwaitingRemovedRemoteRevoke, but B having
 +      //    removed it fully. B now has the push_msat plus the first two HTLCs in value.
 +      //  * Now B happily sends another HTLC, potentially violating its reserve value from A's point
 +      //    of view (if A counts the AwaitingRemovedRemoteRevoke HTLC).
 +      let mut nodes = create_network(2, &[None, None]);
 +      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +
 +      let b_chan_values = get_channel_value_stat!(nodes[1], chan_1.2);
 +      // Route the first two HTLCs.
 +      let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], b_chan_values.channel_reserve_msat - b_chan_values.value_to_self_msat - 10000);
 +      let (payment_preimage_2, _) = route_payment(&nodes[0], &[&nodes[1]], 20000);
 +
 +      // Start routing the third HTLC (this is just used to get everyone in the right state).
 +      let (payment_preimage_3, payment_hash_3) = get_payment_preimage_hash!(nodes[0]);
 +      let send_1 = {
 +              let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV).unwrap();
 +              nodes[0].node.send_payment(route, payment_hash_3).unwrap();
 +              check_added_monitors!(nodes[0], 1);
 +              let mut events = nodes[0].node.get_and_clear_pending_msg_events();
 +              assert_eq!(events.len(), 1);
 +              SendEvent::from_event(events.remove(0))
 +      };
 +
 +      // Now claim both of the first two HTLCs on B's end, putting B in AwaitingRAA and generating an
 +      // initial fulfill/CS.
-       assert!(nodes[1].node.claim_funds(payment_preimage_2));
++      assert!(nodes[1].node.claim_funds(payment_preimage_1, b_chan_values.channel_reserve_msat - b_chan_values.value_to_self_msat - 10000));
 +      check_added_monitors!(nodes[1], 1);
 +      let bs_removes = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +
 +      // This claim goes in B's holding cell, allowing us to have a pending B->A RAA which does not
 +      // remove the second HTLC when we send the HTLC back from B to A.
-       claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_4);
-       claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3);
++      assert!(nodes[1].node.claim_funds(payment_preimage_2, 20000));
 +      check_added_monitors!(nodes[1], 1);
 +      assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 +
 +      nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_removes.update_fulfill_htlcs[0]).unwrap();
 +      nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_removes.commitment_signed).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
 +      expect_payment_sent!(nodes[0], payment_preimage_1);
 +
 +      nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_1.msgs[0]).unwrap();
 +      nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_1.commitment_msg).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +      // B is already AwaitingRAA, so cant generate a CS here
 +      let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
 +
 +      nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +      let bs_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +
 +      nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
 +
 +      nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +      let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
 +
 +      // The second HTLCis removed, but as A is in AwaitingRAA it can't generate a CS here, so the
 +      // RAA that B generated above doesn't fully resolve the second HTLC from A's point of view.
 +      // However, the RAA A generates here *does* fully resolve the HTLC from B's point of view (as A
 +      // can no longer broadcast a commitment transaction with it and B has the preimage so can go
 +      // on-chain as necessary).
 +      nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_cs.update_fulfill_htlcs[0]).unwrap();
 +      nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
 +      expect_payment_sent!(nodes[0], payment_preimage_2);
 +
 +      nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +      assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 +
 +      expect_pending_htlcs_forwardable!(nodes[1]);
 +      expect_payment_received!(nodes[1], payment_hash_3, 100000);
 +
 +      // Note that as this RAA was generated before the delivery of the update_fulfill it shouldn't
 +      // resolve the second HTLC from A's point of view.
 +      nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
 +
 +      // Now that B doesn't have the second RAA anymore, but A still does, send a payment from B back
 +      // to A to ensure that A doesn't count the almost-removed HTLC in update_add processing.
 +      let (payment_preimage_4, payment_hash_4) = get_payment_preimage_hash!(nodes[1]);
 +      let send_2 = {
 +              let route = nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &[], 10000, TEST_FINAL_CLTV).unwrap();
 +              nodes[1].node.send_payment(route, payment_hash_4).unwrap();
 +              check_added_monitors!(nodes[1], 1);
 +              let mut events = nodes[1].node.get_and_clear_pending_msg_events();
 +              assert_eq!(events.len(), 1);
 +              SendEvent::from_event(events.remove(0))
 +      };
 +
 +      nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_2.msgs[0]).unwrap();
 +      nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_2.commitment_msg).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
 +
 +      // Now just resolve all the outstanding messages/HTLCs for completeness...
 +
 +      nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +      let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
 +
 +      nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +
 +      nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
 +
 +      nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +      let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
 +
 +      nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +
 +      expect_pending_htlcs_forwardable!(nodes[0]);
 +      expect_payment_received!(nodes[0], payment_hash_4, 10000);
 +
-       send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
-       send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
-       send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
-       send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
++      claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_4, 10_000);
++      claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3, 100_000);
 +}
 +
 +#[test]
 +fn channel_monitor_network_test() {
 +      // Simple test which builds a network of ChannelManagers, connects them to each other, and
 +      // tests that ChannelMonitor is able to recover from various states.
 +      let nodes = create_network(5, &[None, None, None, None, None]);
 +
 +      // Create some initial channels
 +      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +      let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
 +      let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3, LocalFeatures::new(), LocalFeatures::new());
 +      let chan_4 = create_announced_chan_between_nodes(&nodes, 3, 4, LocalFeatures::new(), LocalFeatures::new());
 +
 +      // Rebalance the network a bit by relaying one payment through all the channels...
-               ($node: expr, $prev_node: expr, $preimage: expr) => {
++      send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000, 8_000_000);
++      send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000, 8_000_000);
++      send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000, 8_000_000);
++      send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000, 8_000_000);
 +
 +      // Simple case with no pending HTLCs:
 +      nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), true);
 +      {
 +              let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE);
 +              let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +              nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn.drain(..).next().unwrap()] }, 1);
 +              test_txn_broadcast(&nodes[0], &chan_1, None, HTLCType::NONE);
 +      }
 +      get_announce_close_broadcast_events(&nodes, 0, 1);
 +      assert_eq!(nodes[0].node.list_channels().len(), 0);
 +      assert_eq!(nodes[1].node.list_channels().len(), 1);
 +
 +      // One pending HTLC is discarded by the force-close:
 +      let payment_preimage_1 = route_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 3000000).0;
 +
 +      // Simple case of one pending HTLC to HTLC-Timeout
 +      nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), true);
 +      {
 +              let mut node_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::TIMEOUT);
 +              let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +              nodes[2].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn.drain(..).next().unwrap()] }, 1);
 +              test_txn_broadcast(&nodes[2], &chan_2, None, HTLCType::NONE);
 +      }
 +      get_announce_close_broadcast_events(&nodes, 1, 2);
 +      assert_eq!(nodes[1].node.list_channels().len(), 0);
 +      assert_eq!(nodes[2].node.list_channels().len(), 1);
 +
 +      macro_rules! claim_funds {
-                               assert!($node.node.claim_funds($preimage));
++              ($node: expr, $prev_node: expr, $preimage: expr, $amount: expr) => {
 +                      {
-               claim_funds!(nodes[3], nodes[2], payment_preimage_1);
++                              assert!($node.node.claim_funds($preimage, $amount));
 +                              check_added_monitors!($node, 1);
 +
 +                              let events = $node.node.get_and_clear_pending_msg_events();
 +                              assert_eq!(events.len(), 1);
 +                              match events[0] {
 +                                      MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, .. } } => {
 +                                              assert!(update_add_htlcs.is_empty());
 +                                              assert!(update_fail_htlcs.is_empty());
 +                                              assert_eq!(*node_id, $prev_node.node.get_our_node_id());
 +                                      },
 +                                      _ => panic!("Unexpected event"),
 +                              };
 +                      }
 +              }
 +      }
 +
 +      // nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2]
 +      // HTLC-Timeout and a nodes[3] claim against it (+ its own announces)
 +      nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id(), true);
 +      {
 +              let node_txn = test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::TIMEOUT);
 +
 +              // Claim the payment on nodes[3], giving it knowledge of the preimage
-               claim_funds!(nodes[4], nodes[3], payment_preimage_2);
++              claim_funds!(nodes[3], nodes[2], payment_preimage_1, 3_000_000);
 +
 +              let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +              nodes[3].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn[0].clone()] }, 1);
 +
 +              check_preimage_claim(&nodes[3], &node_txn);
 +      }
 +      get_announce_close_broadcast_events(&nodes, 2, 3);
 +      assert_eq!(nodes[2].node.list_channels().len(), 0);
 +      assert_eq!(nodes[3].node.list_channels().len(), 1);
 +
 +      { // Cheat and reset nodes[4]'s height to 1
 +              let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +              nodes[4].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![] }, 1);
 +      }
 +
 +      assert_eq!(nodes[3].node.latest_block_height.load(Ordering::Acquire), 1);
 +      assert_eq!(nodes[4].node.latest_block_height.load(Ordering::Acquire), 1);
 +      // One pending HTLC to time out:
 +      let payment_preimage_2 = route_payment(&nodes[3], &vec!(&nodes[4])[..], 3000000).0;
 +      // CLTV expires at TEST_FINAL_CLTV + 1 (current height) + 1 (added in send_payment for
 +      // buffer space).
 +
 +      {
 +              let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +              nodes[3].chain_monitor.block_connected_checked(&header, 2, &Vec::new()[..], &[0; 0]);
 +              for i in 3..TEST_FINAL_CLTV + 2 + LATENCY_GRACE_PERIOD_BLOCKS + 1 {
 +                      header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +                      nodes[3].chain_monitor.block_connected_checked(&header, i, &Vec::new()[..], &[0; 0]);
 +              }
 +
 +              let node_txn = test_txn_broadcast(&nodes[3], &chan_4, None, HTLCType::TIMEOUT);
 +
 +              // Claim the payment on nodes[4], giving it knowledge of the preimage
-       claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3);
++              claim_funds!(nodes[4], nodes[3], payment_preimage_2, 3_000_000);
 +
 +              header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +              nodes[4].chain_monitor.block_connected_checked(&header, 2, &Vec::new()[..], &[0; 0]);
 +              for i in 3..TEST_FINAL_CLTV + 2 - CLTV_CLAIM_BUFFER + 1 {
 +                      header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +                      nodes[4].chain_monitor.block_connected_checked(&header, i, &Vec::new()[..], &[0; 0]);
 +              }
 +
 +              test_txn_broadcast(&nodes[4], &chan_4, None, HTLCType::SUCCESS);
 +
 +              header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +              nodes[4].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn[0].clone()] }, TEST_FINAL_CLTV - 5);
 +
 +              check_preimage_claim(&nodes[4], &node_txn);
 +      }
 +      get_announce_close_broadcast_events(&nodes, 3, 4);
 +      assert_eq!(nodes[3].node.list_channels().len(), 0);
 +      assert_eq!(nodes[4].node.list_channels().len(), 0);
 +}
 +
 +#[test]
 +fn test_justice_tx() {
 +      // Test justice txn built on revoked HTLC-Success tx, against both sides
 +
 +      let mut alice_config = UserConfig::new();
 +      alice_config.channel_options.announced_channel = true;
 +      alice_config.peer_channel_config_limits.force_announced_channel_preference = false;
 +      alice_config.own_channel_config.our_to_self_delay = 6 * 24 * 5;
 +      let mut bob_config = UserConfig::new();
 +      bob_config.channel_options.announced_channel = true;
 +      bob_config.peer_channel_config_limits.force_announced_channel_preference = false;
 +      bob_config.own_channel_config.our_to_self_delay = 6 * 24 * 3;
 +      let nodes = create_network(2, &[Some(alice_config), Some(bob_config)]);
 +      // Create some new channels:
 +      let chan_5 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +
 +      // A pending HTLC which will be revoked:
 +      let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
 +      // Get the will-be-revoked local txn from nodes[0]
 +      let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.iter().next().unwrap().1.last_local_commitment_txn.clone();
 +      assert_eq!(revoked_local_txn.len(), 2); // First commitment tx, then HTLC tx
 +      assert_eq!(revoked_local_txn[0].input.len(), 1);
 +      assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_5.3.txid());
 +      assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to 0 are present
 +      assert_eq!(revoked_local_txn[1].input.len(), 1);
 +      assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid());
 +      assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC-Timeout
 +      // Revoke the old state
-       claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_4);
++      claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3, 3_000_000);
 +
 +      {
 +              let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +              nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
 +              {
 +                      let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
 +                      assert_eq!(node_txn.len(), 3);
 +                      assert_eq!(node_txn.pop().unwrap(), node_txn[0]); // An outpoint registration will result in a 2nd block_connected
 +                      assert_eq!(node_txn[0].input.len(), 2); // We should claim the revoked output and the HTLC output
 +
 +                      check_spends!(node_txn[0], revoked_local_txn[0].clone());
 +                      node_txn.swap_remove(0);
 +              }
 +              test_txn_broadcast(&nodes[1], &chan_5, None, HTLCType::NONE);
 +
 +              nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
 +              let node_txn = test_txn_broadcast(&nodes[0], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT);
 +              header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +              nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn[1].clone()] }, 1);
 +              test_revoked_htlc_claim_txn_broadcast(&nodes[1], node_txn[1].clone());
 +      }
 +      get_announce_close_broadcast_events(&nodes, 0, 1);
 +
 +      assert_eq!(nodes[0].node.list_channels().len(), 0);
 +      assert_eq!(nodes[1].node.list_channels().len(), 0);
 +
 +      // We test justice_tx build by A on B's revoked HTLC-Success tx
 +      // Create some new channels:
 +      let chan_6 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +
 +      // A pending HTLC which will be revoked:
 +      let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
 +      // Get the will-be-revoked local txn from B
 +      let revoked_local_txn = nodes[1].node.channel_state.lock().unwrap().by_id.iter().next().unwrap().1.last_local_commitment_txn.clone();
 +      assert_eq!(revoked_local_txn.len(), 1); // Only commitment tx
 +      assert_eq!(revoked_local_txn[0].input.len(), 1);
 +      assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_6.3.txid());
 +      assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to A are present
 +      // Revoke the old state
-       send_payment(&nodes[0], &vec!(&nodes[1])[..], 5000000);
++      claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_4, 3_000_000);
 +      {
 +              let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +              nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
 +              {
 +                      let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
 +                      assert_eq!(node_txn.len(), 3);
 +                      assert_eq!(node_txn.pop().unwrap(), node_txn[0]); // An outpoint registration will result in a 2nd block_connected
 +                      assert_eq!(node_txn[0].input.len(), 1); // We claim the received HTLC output
 +
 +                      check_spends!(node_txn[0], revoked_local_txn[0].clone());
 +                      node_txn.swap_remove(0);
 +              }
 +              test_txn_broadcast(&nodes[0], &chan_6, None, HTLCType::NONE);
 +
 +              nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
 +              let node_txn = test_txn_broadcast(&nodes[1], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::SUCCESS);
 +              header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +              nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn[1].clone()] }, 1);
 +              test_revoked_htlc_claim_txn_broadcast(&nodes[0], node_txn[1].clone());
 +      }
 +      get_announce_close_broadcast_events(&nodes, 0, 1);
 +      assert_eq!(nodes[0].node.list_channels().len(), 0);
 +      assert_eq!(nodes[1].node.list_channels().len(), 0);
 +}
 +
 +#[test]
 +fn revoked_output_claim() {
 +      // Simple test to ensure a node will claim a revoked output when a stale remote commitment
 +      // transaction is broadcast by its counterparty
 +      let nodes = create_network(2, &[None, None]);
 +      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +      // node[0] is gonna to revoke an old state thus node[1] should be able to claim the revoked output
 +      let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
 +      assert_eq!(revoked_local_txn.len(), 1);
 +      // Only output is the full channel value back to nodes[0]:
 +      assert_eq!(revoked_local_txn[0].output.len(), 1);
 +      // Send a payment through, updating everyone's latest commitment txn
-       send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
++      send_payment(&nodes[0], &vec!(&nodes[1])[..], 5000000, 5_000_000);
 +
 +      // Inform nodes[1] that nodes[0] broadcast a stale tx
 +      let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +      nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
 +      let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
 +      assert_eq!(node_txn.len(), 3); // nodes[1] will broadcast justice tx twice, and its own local state once
 +
 +      assert_eq!(node_txn[0], node_txn[2]);
 +
 +      check_spends!(node_txn[0], revoked_local_txn[0].clone());
 +      check_spends!(node_txn[1], chan_1.3.clone());
 +
 +      // Inform nodes[0] that a watchtower cheated on its behalf, so it will force-close the chan
 +      nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
 +      get_announce_close_broadcast_events(&nodes, 0, 1);
 +}
 +
 +#[test]
 +fn claim_htlc_outputs_shared_tx() {
 +      // Node revoked old state, htlcs haven't time out yet, claim them in shared justice tx
 +      let nodes = create_network(2, &[None, None]);
 +
 +      // Create some new channel:
 +      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +
 +      // Rebalance the network to generate htlc in the two directions
-       claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
++      send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000, 8_000_000);
 +      // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx
 +      let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
 +      let (_payment_preimage_2, payment_hash_2) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000);
 +
 +      // Get the will-be-revoked local txn from node[0]
 +      let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
 +      assert_eq!(revoked_local_txn.len(), 2); // commitment tx + 1 HTLC-Timeout tx
 +      assert_eq!(revoked_local_txn[0].input.len(), 1);
 +      assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
 +      assert_eq!(revoked_local_txn[1].input.len(), 1);
 +      assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid());
 +      assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC-Timeout
 +      check_spends!(revoked_local_txn[1], revoked_local_txn[0].clone());
 +
 +      //Revoke the old state
-       send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
++      claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1, 3_000_000);
 +
 +      {
 +              let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +              nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
 +              nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
 +              connect_blocks(&nodes[1].chain_monitor, ANTI_REORG_DELAY - 1, 1, true, header.bitcoin_hash());
 +
 +              let events = nodes[1].node.get_and_clear_pending_events();
 +              assert_eq!(events.len(), 1);
 +              match events[0] {
 +                      Event::PaymentFailed { payment_hash, .. } => {
 +                              assert_eq!(payment_hash, payment_hash_2);
 +                      },
 +                      _ => panic!("Unexpected event"),
 +              }
 +
 +              let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
 +              assert_eq!(node_txn.len(), 4);
 +
 +              assert_eq!(node_txn[0].input.len(), 3); // Claim the revoked output + both revoked HTLC outputs
 +              check_spends!(node_txn[0], revoked_local_txn[0].clone());
 +
 +              assert_eq!(node_txn[0], node_txn[3]); // justice tx is duplicated due to block re-scanning
 +
 +              let mut witness_lens = BTreeSet::new();
 +              witness_lens.insert(node_txn[0].input[0].witness.last().unwrap().len());
 +              witness_lens.insert(node_txn[0].input[1].witness.last().unwrap().len());
 +              witness_lens.insert(node_txn[0].input[2].witness.last().unwrap().len());
 +              assert_eq!(witness_lens.len(), 3);
 +              assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local
 +              assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), OFFERED_HTLC_SCRIPT_WEIGHT); // revoked offered HTLC
 +              assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // revoked received HTLC
 +
 +              // Next nodes[1] broadcasts its current local tx state:
 +              assert_eq!(node_txn[1].input.len(), 1);
 +              assert_eq!(node_txn[1].input[0].previous_output.txid, chan_1.3.txid()); //Spending funding tx unique txouput, tx broadcasted by ChannelManager
 +
 +              assert_eq!(node_txn[2].input.len(), 1);
 +              let witness_script = node_txn[2].clone().input[0].witness.pop().unwrap();
 +              assert_eq!(witness_script.len(), OFFERED_HTLC_SCRIPT_WEIGHT); //Spending an offered htlc output
 +              assert_eq!(node_txn[2].input[0].previous_output.txid, node_txn[1].txid());
 +              assert_ne!(node_txn[2].input[0].previous_output.txid, node_txn[0].input[0].previous_output.txid);
 +              assert_ne!(node_txn[2].input[0].previous_output.txid, node_txn[0].input[1].previous_output.txid);
 +      }
 +      get_announce_close_broadcast_events(&nodes, 0, 1);
 +      assert_eq!(nodes[0].node.list_channels().len(), 0);
 +      assert_eq!(nodes[1].node.list_channels().len(), 0);
 +}
 +
 +#[test]
 +fn claim_htlc_outputs_single_tx() {
 +      // Node revoked old state, htlcs have timed out, claim each of them in separated justice tx
 +      let nodes = create_network(2, &[None, None]);
 +
 +      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +
 +      // Rebalance the network to generate htlc in the two directions
-       claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
++      send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000, 8_000_000);
 +      // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx, but this
 +      // time as two different claim transactions as we're gonna to timeout htlc with given a high current height
 +      let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
 +      let (_payment_preimage_2, payment_hash_2) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000);
 +
 +      // Get the will-be-revoked local txn from node[0]
 +      let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
 +
 +      //Revoke the old state
-       send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
-       send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
++      claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1, 3_000_000);
 +
 +      {
 +              let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +              nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 200);
 +              nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 200);
 +              connect_blocks(&nodes[1].chain_monitor, ANTI_REORG_DELAY - 1, 200, true, header.bitcoin_hash());
 +
 +              let events = nodes[1].node.get_and_clear_pending_events();
 +              assert_eq!(events.len(), 1);
 +              match events[0] {
 +                      Event::PaymentFailed { payment_hash, .. } => {
 +                              assert_eq!(payment_hash, payment_hash_2);
 +                      },
 +                      _ => panic!("Unexpected event"),
 +              }
 +
 +              let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
 +              assert_eq!(node_txn.len(), 22); // ChannelManager : 2, ChannelMontitor: 8 (1 standard revoked output, 2 revocation htlc tx, 1 local commitment tx + 1 htlc timeout tx) * 2 (block-rescan) + 5 * (1 local commitment tx + 1 htlc timeout tx)
 +
 +              assert_eq!(node_txn[0], node_txn[7]);
 +              assert_eq!(node_txn[1], node_txn[8]);
 +              assert_eq!(node_txn[2], node_txn[9]);
 +              assert_eq!(node_txn[3], node_txn[10]);
 +              assert_eq!(node_txn[4], node_txn[11]);
 +              assert_eq!(node_txn[3], node_txn[5]); //local commitment tx + htlc timeout tx broadcasted by ChannelManger
 +              assert_eq!(node_txn[4], node_txn[6]);
 +
 +              for i in 12..22 {
 +                      if i % 2 == 0 { assert_eq!(node_txn[3], node_txn[i]); } else { assert_eq!(node_txn[4], node_txn[i]); }
 +              }
 +
 +              assert_eq!(node_txn[0].input.len(), 1);
 +              assert_eq!(node_txn[1].input.len(), 1);
 +              assert_eq!(node_txn[2].input.len(), 1);
 +
 +              fn get_txout(out_point: &BitcoinOutPoint, tx: &Transaction) -> Option<TxOut> {
 +                      if out_point.txid == tx.txid() {
 +                              tx.output.get(out_point.vout as usize).cloned()
 +                      } else {
 +                              None
 +                      }
 +              }
 +              node_txn[0].verify(|out|get_txout(out, &revoked_local_txn[0])).unwrap();
 +              node_txn[1].verify(|out|get_txout(out, &revoked_local_txn[0])).unwrap();
 +              node_txn[2].verify(|out|get_txout(out, &revoked_local_txn[0])).unwrap();
 +
 +              let mut witness_lens = BTreeSet::new();
 +              witness_lens.insert(node_txn[0].input[0].witness.last().unwrap().len());
 +              witness_lens.insert(node_txn[1].input[0].witness.last().unwrap().len());
 +              witness_lens.insert(node_txn[2].input[0].witness.last().unwrap().len());
 +              assert_eq!(witness_lens.len(), 3);
 +              assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local
 +              assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), OFFERED_HTLC_SCRIPT_WEIGHT); // revoked offered HTLC
 +              assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // revoked received HTLC
 +
 +              assert_eq!(node_txn[3].input.len(), 1);
 +              check_spends!(node_txn[3], chan_1.3.clone());
 +
 +              assert_eq!(node_txn[4].input.len(), 1);
 +              let witness_script = node_txn[4].input[0].witness.last().unwrap();
 +              assert_eq!(witness_script.len(), OFFERED_HTLC_SCRIPT_WEIGHT); //Spending an offered htlc output
 +              assert_eq!(node_txn[4].input[0].previous_output.txid, node_txn[3].txid());
 +              assert_ne!(node_txn[4].input[0].previous_output.txid, node_txn[0].input[0].previous_output.txid);
 +              assert_ne!(node_txn[4].input[0].previous_output.txid, node_txn[1].input[0].previous_output.txid);
 +      }
 +      get_announce_close_broadcast_events(&nodes, 0, 1);
 +      assert_eq!(nodes[0].node.list_channels().len(), 0);
 +      assert_eq!(nodes[1].node.list_channels().len(), 0);
 +}
 +
 +#[test]
 +fn test_htlc_on_chain_success() {
 +      // Test that in case of a unilateral close onchain, we detect the state of output thanks to
 +      // ChainWatchInterface and pass the preimage backward accordingly. So here we test that ChannelManager is
 +      // broadcasting the right event to other nodes in payment path.
 +      // We test with two HTLCs simultaneously as that was not handled correctly in the past.
 +      // A --------------------> B ----------------------> C (preimage)
 +      // First, C should claim the HTLC outputs via HTLC-Success when its own latest local
 +      // commitment transaction was broadcast.
 +      // Then, B should learn the preimage from said transactions, attempting to claim backwards
 +      // towards B.
 +      // B should be able to claim via preimage if A then broadcasts its local tx.
 +      // Finally, when A sees B's latest local commitment transaction it should be able to claim
 +      // the HTLC outputs via the preimage it learned (which, once confirmed should generate a
 +      // PaymentSent event).
 +
 +      let nodes = create_network(3, &[None, None, None]);
 +
 +      // Create some initial channels
 +      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +      let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
 +
 +      // Rebalance the network a bit by relaying one payment through all the channels...
-       nodes[2].node.claim_funds(our_payment_preimage);
-       nodes[2].node.claim_funds(our_payment_preimage_2);
++      send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000, 8_000_000);
++      send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000, 8_000_000);
 +
 +      let (our_payment_preimage, _payment_hash) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000);
 +      let (our_payment_preimage_2, _payment_hash_2) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000);
 +      let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
 +
 +      // Broadcast legit commitment tx from C on B's chain
 +      // Broadcast HTLC Success transaction by C on received output from C's commitment tx on B's chain
 +      let commitment_tx = nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().last_local_commitment_txn.clone();
 +      assert_eq!(commitment_tx.len(), 1);
 +      check_spends!(commitment_tx[0], chan_2.3.clone());
-       send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
-       send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
++      nodes[2].node.claim_funds(our_payment_preimage, 3_000_000);
++      nodes[2].node.claim_funds(our_payment_preimage_2, 3_000_000);
 +      check_added_monitors!(nodes[2], 2);
 +      let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
 +      assert!(updates.update_add_htlcs.is_empty());
 +      assert!(updates.update_fail_htlcs.is_empty());
 +      assert!(updates.update_fail_malformed_htlcs.is_empty());
 +      assert_eq!(updates.update_fulfill_htlcs.len(), 1);
 +
 +      nodes[2].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1);
 +      check_closed_broadcast!(nodes[2]);
 +      let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 (commitment tx), ChannelMonitor : 4 (2*2 * HTLC-Success tx)
 +      assert_eq!(node_txn.len(), 5);
 +      assert_eq!(node_txn[0], node_txn[3]);
 +      assert_eq!(node_txn[1], node_txn[4]);
 +      assert_eq!(node_txn[2], commitment_tx[0]);
 +      check_spends!(node_txn[0], commitment_tx[0].clone());
 +      check_spends!(node_txn[1], commitment_tx[0].clone());
 +      assert_eq!(node_txn[0].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
 +      assert_eq!(node_txn[1].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
 +      assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
 +      assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
 +      assert_eq!(node_txn[0].lock_time, 0);
 +      assert_eq!(node_txn[1].lock_time, 0);
 +
 +      // Verify that B's ChannelManager is able to extract preimage from HTLC Success tx and pass it backward
 +      nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: node_txn}, 1);
 +      let events = nodes[1].node.get_and_clear_pending_msg_events();
 +      {
 +              let mut added_monitors = nodes[1].chan_monitor.added_monitors.lock().unwrap();
 +              assert_eq!(added_monitors.len(), 2);
 +              assert_eq!(added_monitors[0].0.txid, chan_1.3.txid());
 +              assert_eq!(added_monitors[1].0.txid, chan_1.3.txid());
 +              added_monitors.clear();
 +      }
 +      assert_eq!(events.len(), 2);
 +      match events[0] {
 +              MessageSendEvent::BroadcastChannelUpdate { .. } => {},
 +              _ => panic!("Unexpected event"),
 +      }
 +      match events[1] {
 +              MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
 +                      assert!(update_add_htlcs.is_empty());
 +                      assert!(update_fail_htlcs.is_empty());
 +                      assert_eq!(update_fulfill_htlcs.len(), 1);
 +                      assert!(update_fail_malformed_htlcs.is_empty());
 +                      assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
 +              },
 +              _ => panic!("Unexpected event"),
 +      };
 +      macro_rules! check_tx_local_broadcast {
 +              ($node: expr, $htlc_offered: expr, $commitment_tx: expr, $chan_tx: expr) => { {
 +                      // ChannelManager : 3 (commitment tx, 2*HTLC-Timeout tx), ChannelMonitor : 2 (timeout tx) * 2 (block-rescan)
 +                      let mut node_txn = $node.tx_broadcaster.txn_broadcasted.lock().unwrap();
 +                      assert_eq!(node_txn.len(), 7);
 +                      assert_eq!(node_txn[0], node_txn[5]);
 +                      assert_eq!(node_txn[1], node_txn[6]);
 +                      check_spends!(node_txn[0], $commitment_tx.clone());
 +                      check_spends!(node_txn[1], $commitment_tx.clone());
 +                      assert_ne!(node_txn[0].lock_time, 0);
 +                      assert_ne!(node_txn[1].lock_time, 0);
 +                      if $htlc_offered {
 +                              assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
 +                              assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
 +                              assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
 +                              assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
 +                      } else {
 +                              assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
 +                              assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
 +                              assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
 +                              assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
 +                      }
 +                      check_spends!(node_txn[2], $chan_tx.clone());
 +                      check_spends!(node_txn[3], node_txn[2].clone());
 +                      check_spends!(node_txn[4], node_txn[2].clone());
 +                      assert_eq!(node_txn[2].input[0].witness.last().unwrap().len(), 71);
 +                      assert_eq!(node_txn[3].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
 +                      assert_eq!(node_txn[4].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
 +                      assert!(node_txn[3].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
 +                      assert!(node_txn[4].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
 +                      assert_ne!(node_txn[3].lock_time, 0);
 +                      assert_ne!(node_txn[4].lock_time, 0);
 +                      node_txn.clear();
 +              } }
 +      }
 +      // nodes[1] now broadcasts its own local state as a fallback, suggesting an alternate
 +      // commitment transaction with a corresponding HTLC-Timeout transactions, as well as a
 +      // timeout-claim of the output that nodes[2] just claimed via success.
 +      check_tx_local_broadcast!(nodes[1], false, commitment_tx[0], chan_2.3);
 +
 +      // Broadcast legit commitment tx from A on B's chain
 +      // Broadcast preimage tx by B on offered output from A commitment tx  on A's chain
 +      let commitment_tx = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
 +      check_spends!(commitment_tx[0], chan_1.3.clone());
 +      nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1);
 +      check_closed_broadcast!(nodes[1]);
 +      let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 (commitment tx), ChannelMonitor : 1 (HTLC-Success) * 2 (block-rescan)
 +      assert_eq!(node_txn.len(), 3);
 +      assert_eq!(node_txn[0], node_txn[2]);
 +      check_spends!(node_txn[0], commitment_tx[0].clone());
 +      assert_eq!(node_txn[0].input.len(), 2);
 +      assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
 +      assert_eq!(node_txn[0].input[1].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
 +      assert_eq!(node_txn[0].lock_time, 0);
 +      assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
 +      check_spends!(node_txn[1], chan_1.3.clone());
 +      assert_eq!(node_txn[1].input[0].witness.clone().last().unwrap().len(), 71);
 +      // We don't bother to check that B can claim the HTLC output on its commitment tx here as
 +      // we already checked the same situation with A.
 +
 +      // Verify that A's ChannelManager is able to extract preimage from preimage tx and generate PaymentSent
 +      nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_tx[0].clone(), node_txn[0].clone()] }, 1);
 +      check_closed_broadcast!(nodes[0]);
 +      let events = nodes[0].node.get_and_clear_pending_events();
 +      assert_eq!(events.len(), 2);
 +      let mut first_claimed = false;
 +      for event in events {
 +              match event {
 +                      Event::PaymentSent { payment_preimage } => {
 +                              if payment_preimage == our_payment_preimage {
 +                                      assert!(!first_claimed);
 +                                      first_claimed = true;
 +                              } else {
 +                                      assert_eq!(payment_preimage, our_payment_preimage_2);
 +                              }
 +                      },
 +                      _ => panic!("Unexpected event"),
 +              }
 +      }
 +      check_tx_local_broadcast!(nodes[0], true, commitment_tx[0], chan_1.3);
 +}
 +
 +#[test]
 +fn test_htlc_on_chain_timeout() {
 +      // Test that in case of a unilateral close onchain, we detect the state of output thanks to
 +      // ChainWatchInterface and timeout the HTLC backward accordingly. So here we test that ChannelManager is
 +      // broadcasting the right event to other nodes in payment path.
 +      // A ------------------> B ----------------------> C (timeout)
 +      //    B's commitment tx                 C's commitment tx
 +      //            \                                  \
 +      //         B's HTLC timeout tx               B's timeout tx
 +
 +      let nodes = create_network(3, &[None, None, None]);
 +
 +      // Create some intial channels
 +      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +      let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
 +
 +      // Rebalance the network a bit by relaying one payment thorugh all the channels...
-       claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
++      send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000, 8_000_000);
++      send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000, 8_000_000);
 +
 +      let (_payment_preimage, payment_hash) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000);
 +      let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
 +
 +      // Broadcast legit commitment tx from C on B's chain
 +      let commitment_tx = nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().last_local_commitment_txn.clone();
 +      check_spends!(commitment_tx[0], chan_2.3.clone());
 +      nodes[2].node.fail_htlc_backwards(&payment_hash);
 +      check_added_monitors!(nodes[2], 0);
 +      expect_pending_htlcs_forwardable!(nodes[2]);
 +      check_added_monitors!(nodes[2], 1);
 +
 +      let events = nodes[2].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events.len(), 1);
 +      match events[0] {
 +              MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
 +                      assert!(update_add_htlcs.is_empty());
 +                      assert!(!update_fail_htlcs.is_empty());
 +                      assert!(update_fulfill_htlcs.is_empty());
 +                      assert!(update_fail_malformed_htlcs.is_empty());
 +                      assert_eq!(nodes[1].node.get_our_node_id(), *node_id);
 +              },
 +              _ => panic!("Unexpected event"),
 +      };
 +      nodes[2].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1);
 +      check_closed_broadcast!(nodes[2]);
 +      let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 (commitment tx)
 +      assert_eq!(node_txn.len(), 1);
 +      check_spends!(node_txn[0], chan_2.3.clone());
 +      assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), 71);
 +
 +      // Broadcast timeout transaction by B on received output from C's commitment tx on B's chain
 +      // Verify that B's ChannelManager is able to detect that HTLC is timeout by its own tx and react backward in consequence
 +      nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 200);
 +      let timeout_tx;
 +      {
 +              let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
 +              assert_eq!(node_txn.len(), 8); // ChannelManager : 2 (commitment tx, HTLC-Timeout tx), ChannelMonitor : 6 (HTLC-Timeout tx, commitment tx, timeout tx) * 2 (block-rescan)
 +              assert_eq!(node_txn[0], node_txn[5]);
 +              assert_eq!(node_txn[1], node_txn[6]);
 +              assert_eq!(node_txn[2], node_txn[7]);
 +              check_spends!(node_txn[0], commitment_tx[0].clone());
 +              assert_eq!(node_txn[0].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
 +              check_spends!(node_txn[1], chan_2.3.clone());
 +              check_spends!(node_txn[2], node_txn[1].clone());
 +              assert_eq!(node_txn[1].clone().input[0].witness.last().unwrap().len(), 71);
 +              assert_eq!(node_txn[2].clone().input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
 +              check_spends!(node_txn[3], chan_2.3.clone());
 +              check_spends!(node_txn[4], node_txn[3].clone());
 +              assert_eq!(node_txn[3].input[0].witness.clone().last().unwrap().len(), 71);
 +              assert_eq!(node_txn[4].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
 +              timeout_tx = node_txn[0].clone();
 +              node_txn.clear();
 +      }
 +
 +      nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![timeout_tx]}, 1);
 +      connect_blocks(&nodes[1].chain_monitor, ANTI_REORG_DELAY - 1, 1, true, header.bitcoin_hash());
 +      check_added_monitors!(nodes[1], 0);
 +      check_closed_broadcast!(nodes[1]);
 +
 +      expect_pending_htlcs_forwardable!(nodes[1]);
 +      check_added_monitors!(nodes[1], 1);
 +      let events = nodes[1].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events.len(), 1);
 +      match events[0] {
 +              MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
 +                      assert!(update_add_htlcs.is_empty());
 +                      assert!(!update_fail_htlcs.is_empty());
 +                      assert!(update_fulfill_htlcs.is_empty());
 +                      assert!(update_fail_malformed_htlcs.is_empty());
 +                      assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
 +              },
 +              _ => panic!("Unexpected event"),
 +      };
 +      let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // Well... here we detect our own htlc_timeout_tx so no tx to be generated
 +      assert_eq!(node_txn.len(), 0);
 +
 +      // Broadcast legit commitment tx from B on A's chain
 +      let commitment_tx = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
 +      check_spends!(commitment_tx[0], chan_1.3.clone());
 +
 +      nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 200);
 +      check_closed_broadcast!(nodes[0]);
 +      let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 2 (commitment tx, HTLC-Timeout tx), ChannelMonitor : 2 (timeout tx) * 2 block-rescan
 +      assert_eq!(node_txn.len(), 4);
 +      assert_eq!(node_txn[0], node_txn[3]);
 +      check_spends!(node_txn[0], commitment_tx[0].clone());
 +      assert_eq!(node_txn[0].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
 +      check_spends!(node_txn[1], chan_1.3.clone());
 +      check_spends!(node_txn[2], node_txn[1].clone());
 +      assert_eq!(node_txn[1].clone().input[0].witness.last().unwrap().len(), 71);
 +      assert_eq!(node_txn[2].clone().input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
 +}
 +
 +#[test]
 +fn test_simple_commitment_revoked_fail_backward() {
 +      // Test that in case of a revoked commitment tx, we detect the resolution of output by justice tx
 +      // and fail backward accordingly.
 +
 +      let nodes = create_network(3, &[None, None, None]);
 +
 +      // Create some initial channels
 +      create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +      let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
 +
 +      let (payment_preimage, _payment_hash) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
 +      // Get the will-be-revoked local txn from nodes[2]
 +      let revoked_local_txn = nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().last_local_commitment_txn.clone();
 +      // Revoke the old state
-       claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
++      claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage, 3_000_000);
 +
 +      route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
 +
 +      let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
 +      nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
 +      connect_blocks(&nodes[1].chain_monitor, ANTI_REORG_DELAY - 1, 1, true, header.bitcoin_hash());
 +      check_added_monitors!(nodes[1], 0);
 +      check_closed_broadcast!(nodes[1]);
 +
 +      expect_pending_htlcs_forwardable!(nodes[1]);
 +      check_added_monitors!(nodes[1], 1);
 +      let events = nodes[1].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events.len(), 1);
 +      match events[0] {
 +              MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => {
 +                      assert!(update_add_htlcs.is_empty());
 +                      assert_eq!(update_fail_htlcs.len(), 1);
 +                      assert!(update_fulfill_htlcs.is_empty());
 +                      assert!(update_fail_malformed_htlcs.is_empty());
 +                      assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
 +
 +                      nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]).unwrap();
 +                      commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true);
 +
 +                      let events = nodes[0].node.get_and_clear_pending_msg_events();
 +                      assert_eq!(events.len(), 1);
 +                      match events[0] {
 +                              MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {},
 +                              _ => panic!("Unexpected event"),
 +                      }
 +                      let events = nodes[0].node.get_and_clear_pending_events();
 +                      assert_eq!(events.len(), 1);
 +                      match events[0] {
 +                              Event::PaymentFailed { .. } => {},
 +                              _ => panic!("Unexpected event"),
 +                      }
 +              },
 +              _ => panic!("Unexpected event"),
 +      }
 +}
 +
 +fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use_dust: bool, no_to_remote: bool) {
 +      // Test that if our counterparty broadcasts a revoked commitment transaction we fail all
 +      // pending HTLCs on that channel backwards even if the HTLCs aren't present in our latest
 +      // commitment transaction anymore.
 +      // To do this, we have the peer which will broadcast a revoked commitment transaction send
 +      // a number of update_fail/commitment_signed updates without ever sending the RAA in
 +      // response to our commitment_signed. This is somewhat misbehavior-y, though not
 +      // technically disallowed and we should probably handle it reasonably.
 +      // Note that this is pretty exhaustive as an outbound HTLC which we haven't yet
 +      // failed/fulfilled backwards must be in at least one of the latest two remote commitment
 +      // transactions:
 +      // * Once we move it out of our holding cell/add it, we will immediately include it in a
 +      //   commitment_signed (implying it will be in the latest remote commitment transaction).
 +      // * Once they remove it, we will send a (the first) commitment_signed without the HTLC,
 +      //   and once they revoke the previous commitment transaction (allowing us to send a new
 +      //   commitment_signed) we will be free to fail/fulfill the HTLC backwards.
 +      let mut nodes = create_network(3, &[None, None, None]);
 +
 +      // Create some initial channels
 +      create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +      let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
 +
 +      let (payment_preimage, _payment_hash) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], if no_to_remote { 10_000 } else { 3_000_000 });
 +      // Get the will-be-revoked local txn from nodes[2]
 +      let revoked_local_txn = nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().last_local_commitment_txn.clone();
 +      assert_eq!(revoked_local_txn[0].output.len(), if no_to_remote { 1 } else { 2 });
 +      // Revoke the old state
-       claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_1);
++      claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage, if no_to_remote { 10_000 } else { 3_000_000});
 +
 +      let value = if use_dust {
 +              // The dust limit applied to HTLC outputs considers the fee of the HTLC transaction as
 +              // well, so HTLCs at exactly the dust limit will not be included in commitment txn.
 +              nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().our_dust_limit_satoshis * 1000
 +      } else { 3000000 };
 +
 +      let (_, first_payment_hash) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
 +      let (_, second_payment_hash) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
 +      let (_, third_payment_hash) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
 +
 +      assert!(nodes[2].node.fail_htlc_backwards(&first_payment_hash));
 +      expect_pending_htlcs_forwardable!(nodes[2]);
 +      check_added_monitors!(nodes[2], 1);
 +      let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
 +      assert!(updates.update_add_htlcs.is_empty());
 +      assert!(updates.update_fulfill_htlcs.is_empty());
 +      assert!(updates.update_fail_malformed_htlcs.is_empty());
 +      assert_eq!(updates.update_fail_htlcs.len(), 1);
 +      assert!(updates.update_fee.is_none());
 +      nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]).unwrap();
 +      let bs_raa = commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true, false, true);
 +      // Drop the last RAA from 3 -> 2
 +
 +      assert!(nodes[2].node.fail_htlc_backwards(&second_payment_hash));
 +      expect_pending_htlcs_forwardable!(nodes[2]);
 +      check_added_monitors!(nodes[2], 1);
 +      let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
 +      assert!(updates.update_add_htlcs.is_empty());
 +      assert!(updates.update_fulfill_htlcs.is_empty());
 +      assert!(updates.update_fail_malformed_htlcs.is_empty());
 +      assert_eq!(updates.update_fail_htlcs.len(), 1);
 +      assert!(updates.update_fee.is_none());
 +      nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]).unwrap();
 +      nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +      // Note that nodes[1] is in AwaitingRAA, so won't send a CS
 +      let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
 +      nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa).unwrap();
 +      check_added_monitors!(nodes[2], 1);
 +
 +      assert!(nodes[2].node.fail_htlc_backwards(&third_payment_hash));
 +      expect_pending_htlcs_forwardable!(nodes[2]);
 +      check_added_monitors!(nodes[2], 1);
 +      let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
 +      assert!(updates.update_add_htlcs.is_empty());
 +      assert!(updates.update_fulfill_htlcs.is_empty());
 +      assert!(updates.update_fail_malformed_htlcs.is_empty());
 +      assert_eq!(updates.update_fail_htlcs.len(), 1);
 +      assert!(updates.update_fee.is_none());
 +      nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]).unwrap();
 +      // At this point first_payment_hash has dropped out of the latest two commitment
 +      // transactions that nodes[1] is tracking...
 +      nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +      // Note that nodes[1] is (still) in AwaitingRAA, so won't send a CS
 +      let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
 +      nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa).unwrap();
 +      check_added_monitors!(nodes[2], 1);
 +
 +      // Add a fourth HTLC, this one will get sequestered away in nodes[1]'s holding cell waiting
 +      // on nodes[2]'s RAA.
 +      let route = nodes[1].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
 +      let (_, fourth_payment_hash) = get_payment_preimage_hash!(nodes[0]);
 +      nodes[1].node.send_payment(route, fourth_payment_hash).unwrap();
 +      assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 +      assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
 +      check_added_monitors!(nodes[1], 0);
 +
 +      if deliver_bs_raa {
 +              nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_raa).unwrap();
 +              // One monitor for the new revocation preimage, no second on as we won't generate a new
 +              // commitment transaction for nodes[0] until process_pending_htlc_forwards().
 +              check_added_monitors!(nodes[1], 1);
 +              let events = nodes[1].node.get_and_clear_pending_events();
 +              assert_eq!(events.len(), 1);
 +              match events[0] {
 +                      Event::PendingHTLCsForwardable { .. } => { },
 +                      _ => panic!("Unexpected event"),
 +              };
 +              // Deliberately don't process the pending fail-back so they all fail back at once after
 +              // block connection just like the !deliver_bs_raa case
 +      }
 +
 +      let mut failed_htlcs = HashSet::new();
 +      assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
 +
 +      let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
 +      nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
 +      connect_blocks(&nodes[1].chain_monitor, ANTI_REORG_DELAY - 1, 1, true, header.bitcoin_hash());
 +
 +      let events = nodes[1].node.get_and_clear_pending_events();
 +      assert_eq!(events.len(), if deliver_bs_raa { 1 } else { 2 });
 +      match events[0] {
 +              Event::PaymentFailed { ref payment_hash, .. } => {
 +                      assert_eq!(*payment_hash, fourth_payment_hash);
 +              },
 +              _ => panic!("Unexpected event"),
 +      }
 +      if !deliver_bs_raa {
 +              match events[1] {
 +                      Event::PendingHTLCsForwardable { .. } => { },
 +                      _ => panic!("Unexpected event"),
 +              };
 +      }
 +      nodes[1].node.process_pending_htlc_forwards();
 +      check_added_monitors!(nodes[1], 1);
 +
 +      let events = nodes[1].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events.len(), if deliver_bs_raa { 3 } else { 2 });
 +      match events[if deliver_bs_raa { 1 } else { 0 }] {
 +              MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { .. } } => {},
 +              _ => panic!("Unexpected event"),
 +      }
 +      if deliver_bs_raa {
 +              match events[0] {
 +                      MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
 +                              assert_eq!(nodes[2].node.get_our_node_id(), *node_id);
 +                              assert_eq!(update_add_htlcs.len(), 1);
 +                              assert!(update_fulfill_htlcs.is_empty());
 +                              assert!(update_fail_htlcs.is_empty());
 +                              assert!(update_fail_malformed_htlcs.is_empty());
 +                      },
 +                      _ => panic!("Unexpected event"),
 +              }
 +      }
 +      match events[if deliver_bs_raa { 2 } else { 1 }] {
 +              MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => {
 +                      assert!(update_add_htlcs.is_empty());
 +                      assert_eq!(update_fail_htlcs.len(), 3);
 +                      assert!(update_fulfill_htlcs.is_empty());
 +                      assert!(update_fail_malformed_htlcs.is_empty());
 +                      assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
 +
 +                      nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]).unwrap();
 +                      nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[1]).unwrap();
 +                      nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[2]).unwrap();
 +
 +                      commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true);
 +
 +                      let events = nodes[0].node.get_and_clear_pending_msg_events();
 +                      // If we delivered B's RAA we got an unknown preimage error, not something
 +                      // that we should update our routing table for.
 +                      assert_eq!(events.len(), if deliver_bs_raa { 2 } else { 3 });
 +                      for event in events {
 +                              match event {
 +                                      MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {},
 +                                      _ => panic!("Unexpected event"),
 +                              }
 +                      }
 +                      let events = nodes[0].node.get_and_clear_pending_events();
 +                      assert_eq!(events.len(), 3);
 +                      match events[0] {
 +                              Event::PaymentFailed { ref payment_hash, .. } => {
 +                                      assert!(failed_htlcs.insert(payment_hash.0));
 +                              },
 +                              _ => panic!("Unexpected event"),
 +                      }
 +                      match events[1] {
 +                              Event::PaymentFailed { ref payment_hash, .. } => {
 +                                      assert!(failed_htlcs.insert(payment_hash.0));
 +                              },
 +                              _ => panic!("Unexpected event"),
 +                      }
 +                      match events[2] {
 +                              Event::PaymentFailed { ref payment_hash, .. } => {
 +                                      assert!(failed_htlcs.insert(payment_hash.0));
 +                              },
 +                              _ => panic!("Unexpected event"),
 +                      }
 +              },
 +              _ => panic!("Unexpected event"),
 +      }
 +
 +      assert!(failed_htlcs.contains(&first_payment_hash.0));
 +      assert!(failed_htlcs.contains(&second_payment_hash.0));
 +      assert!(failed_htlcs.contains(&third_payment_hash.0));
 +}
 +
 +#[test]
 +fn test_commitment_revoked_fail_backward_exhaustive_a() {
 +      do_test_commitment_revoked_fail_backward_exhaustive(false, true, false);
 +      do_test_commitment_revoked_fail_backward_exhaustive(true, true, false);
 +      do_test_commitment_revoked_fail_backward_exhaustive(false, false, false);
 +      do_test_commitment_revoked_fail_backward_exhaustive(true, false, false);
 +}
 +
 +#[test]
 +fn test_commitment_revoked_fail_backward_exhaustive_b() {
 +      do_test_commitment_revoked_fail_backward_exhaustive(false, true, true);
 +      do_test_commitment_revoked_fail_backward_exhaustive(true, true, true);
 +      do_test_commitment_revoked_fail_backward_exhaustive(false, false, true);
 +      do_test_commitment_revoked_fail_backward_exhaustive(true, false, true);
 +}
 +
 +#[test]
 +fn test_htlc_ignore_latest_remote_commitment() {
 +      // Test that HTLC transactions spending the latest remote commitment transaction are simply
 +      // ignored if we cannot claim them. This originally tickled an invalid unwrap().
 +      let nodes = create_network(2, &[None, None]);
 +      create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +
 +      route_payment(&nodes[0], &[&nodes[1]], 10000000);
 +      nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id);
 +      check_closed_broadcast!(nodes[0]);
 +
 +      let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
 +      assert_eq!(node_txn.len(), 2);
 +
 +      let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +      nodes[1].chain_monitor.block_connected_checked(&header, 1, &[&node_txn[0], &node_txn[1]], &[1; 2]);
 +      check_closed_broadcast!(nodes[1]);
 +
 +      // Duplicate the block_connected call since this may happen due to other listeners
 +      // registering new transactions
 +      nodes[1].chain_monitor.block_connected_checked(&header, 1, &[&node_txn[0], &node_txn[1]], &[1; 2]);
 +}
 +
 +#[test]
 +fn test_force_close_fail_back() {
 +      // Check which HTLCs are failed-backwards on channel force-closure
 +      let mut nodes = create_network(3, &[None, None, None]);
 +      create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +      create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
 +
 +      let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 1000000, 42).unwrap();
 +
 +      let (our_payment_preimage, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
 +
 +      let mut payment_event = {
 +              nodes[0].node.send_payment(route, our_payment_hash).unwrap();
 +              check_added_monitors!(nodes[0], 1);
 +
 +              let mut events = nodes[0].node.get_and_clear_pending_msg_events();
 +              assert_eq!(events.len(), 1);
 +              SendEvent::from_event(events.remove(0))
 +      };
 +
 +      nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
 +      commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
 +
 +      expect_pending_htlcs_forwardable!(nodes[1]);
 +
 +      let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events_2.len(), 1);
 +      payment_event = SendEvent::from_event(events_2.remove(0));
 +      assert_eq!(payment_event.msgs.len(), 1);
 +
 +      check_added_monitors!(nodes[1], 1);
 +      nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
 +      nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg).unwrap();
 +      check_added_monitors!(nodes[2], 1);
 +      let (_, _) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
 +
 +      // nodes[2] now has the latest commitment transaction, but hasn't revoked its previous
 +      // state or updated nodes[1]' state. Now force-close and broadcast that commitment/HTLC
 +      // transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!).
 +
 +      nodes[2].node.force_close_channel(&payment_event.commitment_msg.channel_id);
 +      check_closed_broadcast!(nodes[2]);
 +      let tx = {
 +              let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
 +              // Note that we don't bother broadcasting the HTLC-Success transaction here as we don't
 +              // have a use for it unless nodes[2] learns the preimage somehow, the funds will go
 +              // back to nodes[1] upon timeout otherwise.
 +              assert_eq!(node_txn.len(), 1);
 +              node_txn.remove(0)
 +      };
 +
 +      let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +      nodes[1].chain_monitor.block_connected_checked(&header, 1, &[&tx], &[1]);
 +
 +      // Note no UpdateHTLCs event here from nodes[1] to nodes[0]!
 +      check_closed_broadcast!(nodes[1]);
 +
 +      // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success..
 +      {
 +              let mut monitors = nodes[2].chan_monitor.simple_monitor.monitors.lock().unwrap();
 +              monitors.get_mut(&OutPoint::new(Sha256dHash::from_slice(&payment_event.commitment_msg.channel_id[..]).unwrap(), 0)).unwrap()
 +                      .provide_payment_preimage(&our_payment_hash, &our_payment_preimage);
 +      }
 +      nodes[2].chain_monitor.block_connected_checked(&header, 1, &[&tx], &[1]);
 +      let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
 +      assert_eq!(node_txn.len(), 1);
 +      assert_eq!(node_txn[0].input.len(), 1);
 +      assert_eq!(node_txn[0].input[0].previous_output.txid, tx.txid());
 +      assert_eq!(node_txn[0].lock_time, 0); // Must be an HTLC-Success
 +      assert_eq!(node_txn[0].input[0].witness.len(), 5); // Must be an HTLC-Success
 +
 +      check_spends!(node_txn[0], tx);
 +}
 +
 +#[test]
 +fn test_unconf_chan() {
 +      // After creating a chan between nodes, we disconnect all blocks previously seen to force a channel close on nodes[0] side
 +      let nodes = create_network(2, &[None, None]);
 +      create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +
 +      let channel_state = nodes[0].node.channel_state.lock().unwrap();
 +      assert_eq!(channel_state.by_id.len(), 1);
 +      assert_eq!(channel_state.short_to_id.len(), 1);
 +      mem::drop(channel_state);
 +
 +      let mut headers = Vec::new();
 +      let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +      headers.push(header.clone());
 +      for _i in 2..100 {
 +              header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +              headers.push(header.clone());
 +      }
 +      let mut height = 99;
 +      while !headers.is_empty() {
 +              nodes[0].node.block_disconnected(&headers.pop().unwrap(), height);
 +              height -= 1;
 +      }
 +      check_closed_broadcast!(nodes[0]);
 +      let channel_state = nodes[0].node.channel_state.lock().unwrap();
 +      assert_eq!(channel_state.by_id.len(), 0);
 +      assert_eq!(channel_state.short_to_id.len(), 0);
 +}
 +
 +#[test]
 +fn test_simple_peer_disconnect() {
 +      // Test that we can reconnect when there are no lost messages
 +      let nodes = create_network(3, &[None, None, None]);
 +      create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +      create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
 +
 +      nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
 +      nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 +      reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 +
 +      let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
 +      let payment_hash_2 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
 +      fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_2);
-       claim_payment_along_route(&nodes[0], &vec!(&nodes[1], &nodes[2]), true, payment_preimage_3);
++      claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_1, 1_000_000);
 +
 +      nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
 +      nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 +      reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 +
 +      let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
 +      let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
 +      let payment_hash_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
 +      let payment_hash_6 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
 +
 +      nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
 +      nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 +
-       claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_4);
++      claim_payment_along_route(&nodes[0], &vec!(&nodes[1], &nodes[2]), true, payment_preimage_3, 1_000_000);
 +      fail_payment_along_route(&nodes[0], &[&nodes[1], &nodes[2]], true, payment_hash_5);
 +
 +      reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (1, 0), (1, 0), (false, false));
 +      {
 +              let events = nodes[0].node.get_and_clear_pending_events();
 +              assert_eq!(events.len(), 2);
 +              match events[0] {
 +                      Event::PaymentSent { payment_preimage } => {
 +                              assert_eq!(payment_preimage, payment_preimage_3);
 +                      },
 +                      _ => panic!("Unexpected event"),
 +              }
 +              match events[1] {
 +                      Event::PaymentFailed { payment_hash, rejected_by_dest, .. } => {
 +                              assert_eq!(payment_hash, payment_hash_5);
 +                              assert!(rejected_by_dest);
 +                      },
 +                      _ => panic!("Unexpected event"),
 +              }
 +      }
 +
-       nodes[1].node.claim_funds(payment_preimage_1);
++      claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_4, 1_000_000);
 +      fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_6);
 +}
 +
 +fn do_test_drop_messages_peer_disconnect(messages_delivered: u8) {
 +      // Test that we can reconnect when in-flight HTLC updates get dropped
 +      let mut nodes = create_network(2, &[None, None]);
 +      if messages_delivered == 0 {
 +              create_chan_between_nodes_with_value_a(&nodes[0], &nodes[1], 100000, 10001, LocalFeatures::new(), LocalFeatures::new());
 +              // nodes[1] doesn't receive the funding_locked message (it'll be re-sent on reconnect)
 +      } else {
 +              create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +      }
 +
 +      let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), Some(&nodes[0].node.list_usable_channels()), &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
 +      let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
 +
 +      let payment_event = {
 +              nodes[0].node.send_payment(route.clone(), payment_hash_1).unwrap();
 +              check_added_monitors!(nodes[0], 1);
 +
 +              let mut events = nodes[0].node.get_and_clear_pending_msg_events();
 +              assert_eq!(events.len(), 1);
 +              SendEvent::from_event(events.remove(0))
 +      };
 +      assert_eq!(nodes[1].node.get_our_node_id(), payment_event.node_id);
 +
 +      if messages_delivered < 2 {
 +              // Drop the payment_event messages, and let them get re-generated in reconnect_nodes!
 +      } else {
 +              nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
 +              if messages_delivered >= 3 {
 +                      nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap();
 +                      check_added_monitors!(nodes[1], 1);
 +                      let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +
 +                      if messages_delivered >= 4 {
 +                              nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
 +                              assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 +                              check_added_monitors!(nodes[0], 1);
 +
 +                              if messages_delivered >= 5 {
 +                                      nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed).unwrap();
 +                                      let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
 +                                      // No commitment_signed so get_event_msg's assert(len == 1) passes
 +                                      check_added_monitors!(nodes[0], 1);
 +
 +                                      if messages_delivered >= 6 {
 +                                              nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap();
 +                                              assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 +                                              check_added_monitors!(nodes[1], 1);
 +                                      }
 +                              }
 +                      }
 +              }
 +      }
 +
 +      nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
 +      nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 +      if messages_delivered < 3 {
 +              // Even if the funding_locked messages get exchanged, as long as nothing further was
 +              // received on either side, both sides will need to resend them.
 +              reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 1), (0, 0), (0, 0), (0, 0), (false, false));
 +      } else if messages_delivered == 3 {
 +              // nodes[0] still wants its RAA + commitment_signed
 +              reconnect_nodes(&nodes[0], &nodes[1], (false, false), (-1, 0), (0, 0), (0, 0), (0, 0), (true, false));
 +      } else if messages_delivered == 4 {
 +              // nodes[0] still wants its commitment_signed
 +              reconnect_nodes(&nodes[0], &nodes[1], (false, false), (-1, 0), (0, 0), (0, 0), (0, 0), (false, false));
 +      } else if messages_delivered == 5 {
 +              // nodes[1] still wants its final RAA
 +              reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, true));
 +      } else if messages_delivered == 6 {
 +              // Everything was delivered...
 +              reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 +      }
 +
 +      let events_1 = nodes[1].node.get_and_clear_pending_events();
 +      assert_eq!(events_1.len(), 1);
 +      match events_1[0] {
 +              Event::PendingHTLCsForwardable { .. } => { },
 +              _ => panic!("Unexpected event"),
 +      };
 +
 +      nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
 +      nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 +      reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 +
 +      nodes[1].node.process_pending_htlc_forwards();
 +
 +      let events_2 = nodes[1].node.get_and_clear_pending_events();
 +      assert_eq!(events_2.len(), 1);
 +      match events_2[0] {
 +              Event::PaymentReceived { ref payment_hash, amt } => {
 +                      assert_eq!(payment_hash_1, *payment_hash);
 +                      assert_eq!(amt, 1000000);
 +              },
 +              _ => panic!("Unexpected event"),
 +      }
 +
-       claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
++      nodes[1].node.claim_funds(payment_preimage_1, 1_000_000);
 +      check_added_monitors!(nodes[1], 1);
 +
 +      let events_3 = nodes[1].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events_3.len(), 1);
 +      let (update_fulfill_htlc, commitment_signed) = match events_3[0] {
 +              MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
 +                      assert_eq!(*node_id, nodes[0].node.get_our_node_id());
 +                      assert!(updates.update_add_htlcs.is_empty());
 +                      assert!(updates.update_fail_htlcs.is_empty());
 +                      assert_eq!(updates.update_fulfill_htlcs.len(), 1);
 +                      assert!(updates.update_fail_malformed_htlcs.is_empty());
 +                      assert!(updates.update_fee.is_none());
 +                      (updates.update_fulfill_htlcs[0].clone(), updates.commitment_signed.clone())
 +              },
 +              _ => panic!("Unexpected event"),
 +      };
 +
 +      if messages_delivered >= 1 {
 +              nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlc).unwrap();
 +
 +              let events_4 = nodes[0].node.get_and_clear_pending_events();
 +              assert_eq!(events_4.len(), 1);
 +              match events_4[0] {
 +                      Event::PaymentSent { ref payment_preimage } => {
 +                              assert_eq!(payment_preimage_1, *payment_preimage);
 +                      },
 +                      _ => panic!("Unexpected event"),
 +              }
 +
 +              if messages_delivered >= 2 {
 +                      nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed).unwrap();
 +                      check_added_monitors!(nodes[0], 1);
 +                      let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
 +
 +                      if messages_delivered >= 3 {
 +                              nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap();
 +                              assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 +                              check_added_monitors!(nodes[1], 1);
 +
 +                              if messages_delivered >= 4 {
 +                                      nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed).unwrap();
 +                                      let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
 +                                      // No commitment_signed so get_event_msg's assert(len == 1) passes
 +                                      check_added_monitors!(nodes[1], 1);
 +
 +                                      if messages_delivered >= 5 {
 +                                              nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
 +                                              assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 +                                              check_added_monitors!(nodes[0], 1);
 +                                      }
 +                              }
 +                      }
 +              }
 +      }
 +
 +      nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
 +      nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 +      if messages_delivered < 2 {
 +              reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (false, false));
 +              //TODO: Deduplicate PaymentSent events, then enable this if:
 +              //if messages_delivered < 1 {
 +                      let events_4 = nodes[0].node.get_and_clear_pending_events();
 +                      assert_eq!(events_4.len(), 1);
 +                      match events_4[0] {
 +                              Event::PaymentSent { ref payment_preimage } => {
 +                                      assert_eq!(payment_preimage_1, *payment_preimage);
 +                              },
 +                              _ => panic!("Unexpected event"),
 +                      }
 +              //}
 +      } else if messages_delivered == 2 {
 +              // nodes[0] still wants its RAA + commitment_signed
 +              reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, -1), (0, 0), (0, 0), (0, 0), (false, true));
 +      } else if messages_delivered == 3 {
 +              // nodes[0] still wants its commitment_signed
 +              reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, -1), (0, 0), (0, 0), (0, 0), (false, false));
 +      } else if messages_delivered == 4 {
 +              // nodes[1] still wants its final RAA
 +              reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (true, false));
 +      } else if messages_delivered == 5 {
 +              // Everything was delivered...
 +              reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 +      }
 +
 +      nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
 +      nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 +      reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 +
 +      // Channel should still work fine...
 +      let payment_preimage_2 = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000).0;
-       claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
++      claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2, 1_000_000);
 +}
 +
 +#[test]
 +fn test_drop_messages_peer_disconnect_a() {
 +      do_test_drop_messages_peer_disconnect(0);
 +      do_test_drop_messages_peer_disconnect(1);
 +      do_test_drop_messages_peer_disconnect(2);
 +      do_test_drop_messages_peer_disconnect(3);
 +}
 +
 +#[test]
 +fn test_drop_messages_peer_disconnect_b() {
 +      do_test_drop_messages_peer_disconnect(4);
 +      do_test_drop_messages_peer_disconnect(5);
 +      do_test_drop_messages_peer_disconnect(6);
 +}
 +
 +#[test]
 +fn test_funding_peer_disconnect() {
 +      // Test that we can lock in our funding tx while disconnected
 +      let nodes = create_network(2, &[None, None]);
 +      let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001, LocalFeatures::new(), LocalFeatures::new());
 +
 +      nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
 +      nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 +
 +      confirm_transaction(&nodes[0].chain_monitor, &tx, tx.version);
 +      let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events_1.len(), 1);
 +      match events_1[0] {
 +              MessageSendEvent::SendFundingLocked { ref node_id, msg: _ } => {
 +                      assert_eq!(*node_id, nodes[1].node.get_our_node_id());
 +              },
 +              _ => panic!("Unexpected event"),
 +      }
 +
 +      reconnect_nodes(&nodes[0], &nodes[1], (false, true), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 +
 +      nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
 +      nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 +
 +      confirm_transaction(&nodes[1].chain_monitor, &tx, tx.version);
 +      let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events_2.len(), 2);
 +      match events_2[0] {
 +              MessageSendEvent::SendFundingLocked { ref node_id, msg: _ } => {
 +                      assert_eq!(*node_id, nodes[0].node.get_our_node_id());
 +              },
 +              _ => panic!("Unexpected event"),
 +      }
 +      match events_2[1] {
 +              MessageSendEvent::SendAnnouncementSignatures { ref node_id, msg: _ } => {
 +                      assert_eq!(*node_id, nodes[0].node.get_our_node_id());
 +              },
 +              _ => panic!("Unexpected event"),
 +      }
 +
 +      reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 +
 +      // TODO: We shouldn't need to manually pass list_usable_chanels here once we support
 +      // rebroadcasting announcement_signatures upon reconnect.
 +
 +      let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), Some(&nodes[0].node.list_usable_channels()), &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
 +      let (payment_preimage, _) = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000);
-       assert!(nodes[1].node.claim_funds(payment_preimage_1));
++      claim_payment(&nodes[0], &[&nodes[1]], payment_preimage, 1_000_000);
 +}
 +
 +#[test]
 +fn test_drop_messages_peer_disconnect_dual_htlc() {
 +      // Test that we can handle reconnecting when both sides of a channel have pending
 +      // commitment_updates when we disconnect.
 +      let mut nodes = create_network(2, &[None, None]);
 +      create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +
 +      let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
 +
 +      // Now try to send a second payment which will fail to send
 +      let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
 +      let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
 +
 +      nodes[0].node.send_payment(route.clone(), payment_hash_2).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +
 +      let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events_1.len(), 1);
 +      match events_1[0] {
 +              MessageSendEvent::UpdateHTLCs { .. } => {},
 +              _ => panic!("Unexpected event"),
 +      }
 +
-       claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
++      assert!(nodes[1].node.claim_funds(payment_preimage_1, 1_000_000));
 +      check_added_monitors!(nodes[1], 1);
 +
 +      let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events_2.len(), 1);
 +      match events_2[0] {
 +              MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
 +                      assert_eq!(*node_id, nodes[0].node.get_our_node_id());
 +                      assert!(update_add_htlcs.is_empty());
 +                      assert_eq!(update_fulfill_htlcs.len(), 1);
 +                      assert!(update_fail_htlcs.is_empty());
 +                      assert!(update_fail_malformed_htlcs.is_empty());
 +                      assert!(update_fee.is_none());
 +
 +                      nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]).unwrap();
 +                      let events_3 = nodes[0].node.get_and_clear_pending_events();
 +                      assert_eq!(events_3.len(), 1);
 +                      match events_3[0] {
 +                              Event::PaymentSent { ref payment_preimage } => {
 +                                      assert_eq!(*payment_preimage, payment_preimage_1);
 +                              },
 +                              _ => panic!("Unexpected event"),
 +                      }
 +
 +                      nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed).unwrap();
 +                      let _ = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
 +                      // No commitment_signed so get_event_msg's assert(len == 1) passes
 +                      check_added_monitors!(nodes[0], 1);
 +              },
 +              _ => panic!("Unexpected event"),
 +      }
 +
 +      nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
 +      nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 +
 +      nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
 +      let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
 +      assert_eq!(reestablish_1.len(), 1);
 +      nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
 +      let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
 +      assert_eq!(reestablish_2.len(), 1);
 +
 +      nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]).unwrap();
 +      let as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
 +      nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]).unwrap();
 +      let bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
 +
 +      assert!(as_resp.0.is_none());
 +      assert!(bs_resp.0.is_none());
 +
 +      assert!(bs_resp.1.is_none());
 +      assert!(bs_resp.2.is_none());
 +
 +      assert!(as_resp.3 == RAACommitmentOrder::CommitmentFirst);
 +
 +      assert_eq!(as_resp.2.as_ref().unwrap().update_add_htlcs.len(), 1);
 +      assert!(as_resp.2.as_ref().unwrap().update_fulfill_htlcs.is_empty());
 +      assert!(as_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty());
 +      assert!(as_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty());
 +      assert!(as_resp.2.as_ref().unwrap().update_fee.is_none());
 +      nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().update_add_htlcs[0]).unwrap();
 +      nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().commitment_signed).unwrap();
 +      let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
 +      // No commitment_signed so get_event_msg's assert(len == 1) passes
 +      check_added_monitors!(nodes[1], 1);
 +
 +      nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), as_resp.1.as_ref().unwrap()).unwrap();
 +      let bs_second_commitment_signed = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +      assert!(bs_second_commitment_signed.update_add_htlcs.is_empty());
 +      assert!(bs_second_commitment_signed.update_fulfill_htlcs.is_empty());
 +      assert!(bs_second_commitment_signed.update_fail_htlcs.is_empty());
 +      assert!(bs_second_commitment_signed.update_fail_malformed_htlcs.is_empty());
 +      assert!(bs_second_commitment_signed.update_fee.is_none());
 +      check_added_monitors!(nodes[1], 1);
 +
 +      nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
 +      let as_commitment_signed = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
 +      assert!(as_commitment_signed.update_add_htlcs.is_empty());
 +      assert!(as_commitment_signed.update_fulfill_htlcs.is_empty());
 +      assert!(as_commitment_signed.update_fail_htlcs.is_empty());
 +      assert!(as_commitment_signed.update_fail_malformed_htlcs.is_empty());
 +      assert!(as_commitment_signed.update_fee.is_none());
 +      check_added_monitors!(nodes[0], 1);
 +
 +      nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_signed.commitment_signed).unwrap();
 +      let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
 +      // No commitment_signed so get_event_msg's assert(len == 1) passes
 +      check_added_monitors!(nodes[0], 1);
 +
 +      nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed.commitment_signed).unwrap();
 +      let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
 +      // No commitment_signed so get_event_msg's assert(len == 1) passes
 +      check_added_monitors!(nodes[1], 1);
 +
 +      nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap();
 +      assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 +      check_added_monitors!(nodes[1], 1);
 +
 +      expect_pending_htlcs_forwardable!(nodes[1]);
 +
 +      let events_5 = nodes[1].node.get_and_clear_pending_events();
 +      assert_eq!(events_5.len(), 1);
 +      match events_5[0] {
 +              Event::PaymentReceived { ref payment_hash, amt: _ } => {
 +                      assert_eq!(payment_hash_2, *payment_hash);
 +              },
 +              _ => panic!("Unexpected event"),
 +      }
 +
 +      nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack).unwrap();
 +      assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 +      check_added_monitors!(nodes[0], 1);
 +
-       send_payment(&nodes[0], &[&nodes[1]], 1000000);
++      claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2, 1_000_000);
 +}
 +
 +#[test]
 +fn test_invalid_channel_announcement() {
 +      //Test BOLT 7 channel_announcement msg requirement for final node, gather data to build customed channel_announcement msgs
 +      let secp_ctx = Secp256k1::new();
 +      let nodes = create_network(2, &[None, None]);
 +
 +      let chan_announcement = create_chan_between_nodes(&nodes[0], &nodes[1], LocalFeatures::new(), LocalFeatures::new());
 +
 +      let a_channel_lock = nodes[0].node.channel_state.lock().unwrap();
 +      let b_channel_lock = nodes[1].node.channel_state.lock().unwrap();
 +      let as_chan = a_channel_lock.by_id.get(&chan_announcement.3).unwrap();
 +      let bs_chan = b_channel_lock.by_id.get(&chan_announcement.3).unwrap();
 +
 +      let _ = nodes[0].router.handle_htlc_fail_channel_update(&msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id : as_chan.get_short_channel_id().unwrap(), is_permanent: false } );
 +
 +      let as_bitcoin_key = PublicKey::from_secret_key(&secp_ctx, &as_chan.get_local_keys().funding_key);
 +      let bs_bitcoin_key = PublicKey::from_secret_key(&secp_ctx, &bs_chan.get_local_keys().funding_key);
 +
 +      let as_network_key = nodes[0].node.get_our_node_id();
 +      let bs_network_key = nodes[1].node.get_our_node_id();
 +
 +      let were_node_one = as_bitcoin_key.serialize()[..] < bs_bitcoin_key.serialize()[..];
 +
 +      let mut chan_announcement;
 +
 +      macro_rules! dummy_unsigned_msg {
 +              () => {
 +                      msgs::UnsignedChannelAnnouncement {
 +                              features: msgs::GlobalFeatures::new(),
 +                              chain_hash: genesis_block(Network::Testnet).header.bitcoin_hash(),
 +                              short_channel_id: as_chan.get_short_channel_id().unwrap(),
 +                              node_id_1: if were_node_one { as_network_key } else { bs_network_key },
 +                              node_id_2: if were_node_one { bs_network_key } else { as_network_key },
 +                              bitcoin_key_1: if were_node_one { as_bitcoin_key } else { bs_bitcoin_key },
 +                              bitcoin_key_2: if were_node_one { bs_bitcoin_key } else { as_bitcoin_key },
 +                              excess_data: Vec::new(),
 +                      };
 +              }
 +      }
 +
 +      macro_rules! sign_msg {
 +              ($unsigned_msg: expr) => {
 +                      let msghash = Message::from_slice(&Sha256dHash::hash(&$unsigned_msg.encode()[..])[..]).unwrap();
 +                      let as_bitcoin_sig = secp_ctx.sign(&msghash, &as_chan.get_local_keys().funding_key);
 +                      let bs_bitcoin_sig = secp_ctx.sign(&msghash, &bs_chan.get_local_keys().funding_key);
 +                      let as_node_sig = secp_ctx.sign(&msghash, &nodes[0].keys_manager.get_node_secret());
 +                      let bs_node_sig = secp_ctx.sign(&msghash, &nodes[1].keys_manager.get_node_secret());
 +                      chan_announcement = msgs::ChannelAnnouncement {
 +                              node_signature_1 : if were_node_one { as_node_sig } else { bs_node_sig},
 +                              node_signature_2 : if were_node_one { bs_node_sig } else { as_node_sig},
 +                              bitcoin_signature_1: if were_node_one { as_bitcoin_sig } else { bs_bitcoin_sig },
 +                              bitcoin_signature_2 : if were_node_one { bs_bitcoin_sig } else { as_bitcoin_sig },
 +                              contents: $unsigned_msg
 +                      }
 +              }
 +      }
 +
 +      let unsigned_msg = dummy_unsigned_msg!();
 +      sign_msg!(unsigned_msg);
 +      assert_eq!(nodes[0].router.handle_channel_announcement(&chan_announcement).unwrap(), true);
 +      let _ = nodes[0].router.handle_htlc_fail_channel_update(&msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id : as_chan.get_short_channel_id().unwrap(), is_permanent: false } );
 +
 +      // Configured with Network::Testnet
 +      let mut unsigned_msg = dummy_unsigned_msg!();
 +      unsigned_msg.chain_hash = genesis_block(Network::Bitcoin).header.bitcoin_hash();
 +      sign_msg!(unsigned_msg);
 +      assert!(nodes[0].router.handle_channel_announcement(&chan_announcement).is_err());
 +
 +      let mut unsigned_msg = dummy_unsigned_msg!();
 +      unsigned_msg.chain_hash = Sha256dHash::hash(&[1,2,3,4,5,6,7,8,9]);
 +      sign_msg!(unsigned_msg);
 +      assert!(nodes[0].router.handle_channel_announcement(&chan_announcement).is_err());
 +}
 +
 +#[test]
 +fn test_no_txn_manager_serialize_deserialize() {
 +      let mut nodes = create_network(2, &[None, None]);
 +
 +      let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001, LocalFeatures::new(), LocalFeatures::new());
 +
 +      nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 +
 +      let nodes_0_serialized = nodes[0].node.encode();
 +      let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
 +      nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut chan_0_monitor_serialized).unwrap();
 +
 +      nodes[0].chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(nodes[0].chain_monitor.clone(), nodes[0].tx_broadcaster.clone(), Arc::new(test_utils::TestLogger::new()), Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 })));
 +      let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
 +      let (_, chan_0_monitor) = <(Sha256dHash, ChannelMonitor)>::read(&mut chan_0_monitor_read, Arc::new(test_utils::TestLogger::new())).unwrap();
 +      assert!(chan_0_monitor_read.is_empty());
 +
 +      let mut nodes_0_read = &nodes_0_serialized[..];
 +      let config = UserConfig::new();
 +      let keys_manager = Arc::new(test_utils::TestKeysInterface::new(&nodes[0].node_seed, Network::Testnet, Arc::new(test_utils::TestLogger::new())));
 +      let (_, nodes_0_deserialized) = {
 +              let mut channel_monitors = HashMap::new();
 +              channel_monitors.insert(chan_0_monitor.get_funding_txo().unwrap(), &chan_0_monitor);
 +              <(Sha256dHash, ChannelManager)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
 +                      default_config: config,
 +                      keys_manager,
 +                      fee_estimator: Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 }),
 +                      monitor: nodes[0].chan_monitor.clone(),
 +                      chain_monitor: nodes[0].chain_monitor.clone(),
 +                      tx_broadcaster: nodes[0].tx_broadcaster.clone(),
 +                      logger: Arc::new(test_utils::TestLogger::new()),
 +                      channel_monitors: &channel_monitors,
 +              }).unwrap()
 +      };
 +      assert!(nodes_0_read.is_empty());
 +
 +      assert!(nodes[0].chan_monitor.add_update_monitor(chan_0_monitor.get_funding_txo().unwrap(), chan_0_monitor).is_ok());
 +      nodes[0].node = Arc::new(nodes_0_deserialized);
 +      let nodes_0_as_listener: Arc<ChainListener> = nodes[0].node.clone();
 +      nodes[0].chain_monitor.register_listener(Arc::downgrade(&nodes_0_as_listener));
 +      assert_eq!(nodes[0].node.list_channels().len(), 1);
 +      check_added_monitors!(nodes[0], 1);
 +
 +      nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
 +      let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
 +      nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
 +      let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
 +
 +      nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]).unwrap();
 +      assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 +      nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]).unwrap();
 +      assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 +
 +      let (funding_locked, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
 +      let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked);
 +      for node in nodes.iter() {
 +              assert!(node.router.handle_channel_announcement(&announcement).unwrap());
 +              node.router.handle_channel_update(&as_update).unwrap();
 +              node.router.handle_channel_update(&bs_update).unwrap();
 +      }
 +
-       claim_payment(&nodes[0], &[&nodes[1]], our_payment_preimage);
++      send_payment(&nodes[0], &[&nodes[1]], 1000000, 1_000_000);
 +}
 +
 +#[test]
 +fn test_simple_manager_serialize_deserialize() {
 +      let mut nodes = create_network(2, &[None, None]);
 +      create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +
 +      let (our_payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
 +      let (_, our_payment_hash) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
 +
 +      nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 +
 +      let nodes_0_serialized = nodes[0].node.encode();
 +      let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
 +      nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut chan_0_monitor_serialized).unwrap();
 +
 +      nodes[0].chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(nodes[0].chain_monitor.clone(), nodes[0].tx_broadcaster.clone(), Arc::new(test_utils::TestLogger::new()), Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 })));
 +      let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
 +      let (_, chan_0_monitor) = <(Sha256dHash, ChannelMonitor)>::read(&mut chan_0_monitor_read, Arc::new(test_utils::TestLogger::new())).unwrap();
 +      assert!(chan_0_monitor_read.is_empty());
 +
 +      let mut nodes_0_read = &nodes_0_serialized[..];
 +      let keys_manager = Arc::new(test_utils::TestKeysInterface::new(&nodes[0].node_seed, Network::Testnet, Arc::new(test_utils::TestLogger::new())));
 +      let (_, nodes_0_deserialized) = {
 +              let mut channel_monitors = HashMap::new();
 +              channel_monitors.insert(chan_0_monitor.get_funding_txo().unwrap(), &chan_0_monitor);
 +              <(Sha256dHash, ChannelManager)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
 +                      default_config: UserConfig::new(),
 +                      keys_manager,
 +                      fee_estimator: Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 }),
 +                      monitor: nodes[0].chan_monitor.clone(),
 +                      chain_monitor: nodes[0].chain_monitor.clone(),
 +                      tx_broadcaster: nodes[0].tx_broadcaster.clone(),
 +                      logger: Arc::new(test_utils::TestLogger::new()),
 +                      channel_monitors: &channel_monitors,
 +              }).unwrap()
 +      };
 +      assert!(nodes_0_read.is_empty());
 +
 +      assert!(nodes[0].chan_monitor.add_update_monitor(chan_0_monitor.get_funding_txo().unwrap(), chan_0_monitor).is_ok());
 +      nodes[0].node = Arc::new(nodes_0_deserialized);
 +      check_added_monitors!(nodes[0], 1);
 +
 +      reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 +
 +      fail_payment(&nodes[0], &[&nodes[1]], our_payment_hash);
-       claim_payment(&nodes[2], &[&nodes[0], &nodes[1]], our_payment_preimage);
++      claim_payment(&nodes[0], &[&nodes[1]], our_payment_preimage, 1_000_000);
 +}
 +
 +#[test]
 +fn test_manager_serialize_deserialize_inconsistent_monitor() {
 +      // Test deserializing a ChannelManager with an out-of-date ChannelMonitor
 +      let mut nodes = create_network(4, &[None, None, None, None]);
 +      create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +      create_announced_chan_between_nodes(&nodes, 2, 0, LocalFeatures::new(), LocalFeatures::new());
 +      let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 3, LocalFeatures::new(), LocalFeatures::new());
 +
 +      let (our_payment_preimage, _) = route_payment(&nodes[2], &[&nodes[0], &nodes[1]], 1000000);
 +
 +      // Serialize the ChannelManager here, but the monitor we keep up-to-date
 +      let nodes_0_serialized = nodes[0].node.encode();
 +
 +      route_payment(&nodes[0], &[&nodes[3]], 1000000);
 +      nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 +      nodes[2].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 +      nodes[3].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 +
 +      // Now the ChannelMonitor (which is now out-of-sync with ChannelManager for channel w/
 +      // nodes[3])
 +      let mut node_0_monitors_serialized = Vec::new();
 +      for monitor in nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter() {
 +              let mut writer = test_utils::TestVecWriter(Vec::new());
 +              monitor.1.write_for_disk(&mut writer).unwrap();
 +              node_0_monitors_serialized.push(writer.0);
 +      }
 +
 +      nodes[0].chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(nodes[0].chain_monitor.clone(), nodes[0].tx_broadcaster.clone(), Arc::new(test_utils::TestLogger::new()), Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 })));
 +      let mut node_0_monitors = Vec::new();
 +      for serialized in node_0_monitors_serialized.iter() {
 +              let mut read = &serialized[..];
 +              let (_, monitor) = <(Sha256dHash, ChannelMonitor)>::read(&mut read, Arc::new(test_utils::TestLogger::new())).unwrap();
 +              assert!(read.is_empty());
 +              node_0_monitors.push(monitor);
 +      }
 +
 +      let mut nodes_0_read = &nodes_0_serialized[..];
 +      let keys_manager = Arc::new(test_utils::TestKeysInterface::new(&nodes[0].node_seed, Network::Testnet, Arc::new(test_utils::TestLogger::new())));
 +      let (_, nodes_0_deserialized) = <(Sha256dHash, ChannelManager)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
 +              default_config: UserConfig::new(),
 +              keys_manager,
 +              fee_estimator: Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 }),
 +              monitor: nodes[0].chan_monitor.clone(),
 +              chain_monitor: nodes[0].chain_monitor.clone(),
 +              tx_broadcaster: nodes[0].tx_broadcaster.clone(),
 +              logger: Arc::new(test_utils::TestLogger::new()),
 +              channel_monitors: &node_0_monitors.iter().map(|monitor| { (monitor.get_funding_txo().unwrap(), monitor) }).collect(),
 +      }).unwrap();
 +      assert!(nodes_0_read.is_empty());
 +
 +      { // Channel close should result in a commitment tx and an HTLC tx
 +              let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
 +              assert_eq!(txn.len(), 2);
 +              assert_eq!(txn[0].input[0].previous_output.txid, funding_tx.txid());
 +              assert_eq!(txn[1].input[0].previous_output.txid, txn[0].txid());
 +      }
 +
 +      for monitor in node_0_monitors.drain(..) {
 +              assert!(nodes[0].chan_monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor).is_ok());
 +              check_added_monitors!(nodes[0], 1);
 +      }
 +      nodes[0].node = Arc::new(nodes_0_deserialized);
 +
 +      // nodes[1] and nodes[2] have no lost state with nodes[0]...
 +      reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 +      reconnect_nodes(&nodes[0], &nodes[2], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 +      //... and we can even still claim the payment!
-       claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
++      claim_payment(&nodes[2], &[&nodes[0], &nodes[1]], our_payment_preimage, 1_000_000);
 +
 +      nodes[3].node.peer_connected(&nodes[0].node.get_our_node_id());
 +      let reestablish = get_event_msg!(nodes[3], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
 +      nodes[0].node.peer_connected(&nodes[3].node.get_our_node_id());
 +      if let Err(msgs::LightningError { action: msgs::ErrorAction::SendErrorMessage { msg }, .. }) = nodes[0].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish) {
 +              assert_eq!(msg.channel_id, channel_id);
 +      } else { panic!("Unexpected result"); }
 +}
 +
 +macro_rules! check_spendable_outputs {
 +      ($node: expr, $der_idx: expr) => {
 +              {
 +                      let events = $node.chan_monitor.simple_monitor.get_and_clear_pending_events();
 +                      let mut txn = Vec::new();
 +                      for event in events {
 +                              match event {
 +                                      Event::SpendableOutputs { ref outputs } => {
 +                                              for outp in outputs {
 +                                                      match *outp {
 +                                                              SpendableOutputDescriptor::DynamicOutputP2WPKH { ref outpoint, ref key, ref output } => {
 +                                                                      let input = TxIn {
 +                                                                              previous_output: outpoint.clone(),
 +                                                                              script_sig: Script::new(),
 +                                                                              sequence: 0,
 +                                                                              witness: Vec::new(),
 +                                                                      };
 +                                                                      let outp = TxOut {
 +                                                                              script_pubkey: Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(),
 +                                                                              value: output.value,
 +                                                                      };
 +                                                                      let mut spend_tx = Transaction {
 +                                                                              version: 2,
 +                                                                              lock_time: 0,
 +                                                                              input: vec![input],
 +                                                                              output: vec![outp],
 +                                                                      };
 +                                                                      let secp_ctx = Secp256k1::new();
 +                                                                      let remotepubkey = PublicKey::from_secret_key(&secp_ctx, &key);
 +                                                                      let witness_script = Address::p2pkh(&::bitcoin::PublicKey{compressed: true, key: remotepubkey}, Network::Testnet).script_pubkey();
 +                                                                      let sighash = Message::from_slice(&bip143::SighashComponents::new(&spend_tx).sighash_all(&spend_tx.input[0], &witness_script, output.value)[..]).unwrap();
 +                                                                      let remotesig = secp_ctx.sign(&sighash, key);
 +                                                                      spend_tx.input[0].witness.push(remotesig.serialize_der().to_vec());
 +                                                                      spend_tx.input[0].witness[0].push(SigHashType::All as u8);
 +                                                                      spend_tx.input[0].witness.push(remotepubkey.serialize().to_vec());
 +                                                                      txn.push(spend_tx);
 +                                                              },
 +                                                              SpendableOutputDescriptor::DynamicOutputP2WSH { ref outpoint, ref key, ref witness_script, ref to_self_delay, ref output } => {
 +                                                                      let input = TxIn {
 +                                                                              previous_output: outpoint.clone(),
 +                                                                              script_sig: Script::new(),
 +                                                                              sequence: *to_self_delay as u32,
 +                                                                              witness: Vec::new(),
 +                                                                      };
 +                                                                      let outp = TxOut {
 +                                                                              script_pubkey: Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(),
 +                                                                              value: output.value,
 +                                                                      };
 +                                                                      let mut spend_tx = Transaction {
 +                                                                              version: 2,
 +                                                                              lock_time: 0,
 +                                                                              input: vec![input],
 +                                                                              output: vec![outp],
 +                                                                      };
 +                                                                      let secp_ctx = Secp256k1::new();
 +                                                                      let sighash = Message::from_slice(&bip143::SighashComponents::new(&spend_tx).sighash_all(&spend_tx.input[0], witness_script, output.value)[..]).unwrap();
 +                                                                      let local_delaysig = secp_ctx.sign(&sighash, key);
 +                                                                      spend_tx.input[0].witness.push(local_delaysig.serialize_der().to_vec());
 +                                                                      spend_tx.input[0].witness[0].push(SigHashType::All as u8);
 +                                                                      spend_tx.input[0].witness.push(vec!(0));
 +                                                                      spend_tx.input[0].witness.push(witness_script.clone().into_bytes());
 +                                                                      txn.push(spend_tx);
 +                                                              },
 +                                                              SpendableOutputDescriptor::StaticOutput { ref outpoint, ref output } => {
 +                                                                      let secp_ctx = Secp256k1::new();
 +                                                                      let input = TxIn {
 +                                                                              previous_output: outpoint.clone(),
 +                                                                              script_sig: Script::new(),
 +                                                                              sequence: 0,
 +                                                                              witness: Vec::new(),
 +                                                                      };
 +                                                                      let outp = TxOut {
 +                                                                              script_pubkey: Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(),
 +                                                                              value: output.value,
 +                                                                      };
 +                                                                      let mut spend_tx = Transaction {
 +                                                                              version: 2,
 +                                                                              lock_time: 0,
 +                                                                              input: vec![input],
 +                                                                              output: vec![outp.clone()],
 +                                                                      };
 +                                                                      let secret = {
 +                                                                              match ExtendedPrivKey::new_master(Network::Testnet, &$node.node_seed) {
 +                                                                                      Ok(master_key) => {
 +                                                                                              match master_key.ckd_priv(&secp_ctx, ChildNumber::from_hardened_idx($der_idx).expect("key space exhausted")) {
 +                                                                                                      Ok(key) => key,
 +                                                                                                      Err(_) => panic!("Your RNG is busted"),
 +                                                                                              }
 +                                                                                      }
 +                                                                                      Err(_) => panic!("Your rng is busted"),
 +                                                                              }
 +                                                                      };
 +                                                                      let pubkey = ExtendedPubKey::from_private(&secp_ctx, &secret).public_key;
 +                                                                      let witness_script = Address::p2pkh(&pubkey, Network::Testnet).script_pubkey();
 +                                                                      let sighash = Message::from_slice(&bip143::SighashComponents::new(&spend_tx).sighash_all(&spend_tx.input[0], &witness_script, output.value)[..]).unwrap();
 +                                                                      let sig = secp_ctx.sign(&sighash, &secret.private_key.key);
 +                                                                      spend_tx.input[0].witness.push(sig.serialize_der().to_vec());
 +                                                                      spend_tx.input[0].witness[0].push(SigHashType::All as u8);
 +                                                                      spend_tx.input[0].witness.push(pubkey.key.serialize().to_vec());
 +                                                                      txn.push(spend_tx);
 +                                                              },
 +                                                      }
 +                                              }
 +                                      },
 +                                      _ => panic!("Unexpected event"),
 +                              };
 +                      }
 +                      txn
 +              }
 +      }
 +}
 +
 +#[test]
 +fn test_claim_sizeable_push_msat() {
 +      // Incidentally test SpendableOutput event generation due to detection of to_local output on commitment tx
 +      let nodes = create_network(2, &[None, None]);
 +
 +      let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 99000000, LocalFeatures::new(), LocalFeatures::new());
 +      nodes[1].node.force_close_channel(&chan.2);
 +      check_closed_broadcast!(nodes[1]);
 +      let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
 +      assert_eq!(node_txn.len(), 1);
 +      check_spends!(node_txn[0], chan.3.clone());
 +      assert_eq!(node_txn[0].output.len(), 2); // We can't force trimming of to_remote output as channel_reserve_satoshis block us to do so at channel opening
 +
 +      let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +      nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn[0].clone()] }, 0);
 +      let spend_txn = check_spendable_outputs!(nodes[1], 1);
 +      assert_eq!(spend_txn.len(), 1);
 +      check_spends!(spend_txn[0], node_txn[0].clone());
 +}
 +
 +#[test]
 +fn test_claim_on_remote_sizeable_push_msat() {
 +      // Same test as previous, just test on remote commitment tx, as per_commitment_point registration changes following you're funder/fundee and
 +      // to_remote output is encumbered by a P2WPKH
 +
 +      let nodes = create_network(2, &[None, None]);
 +
 +      let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 99000000, LocalFeatures::new(), LocalFeatures::new());
 +      nodes[0].node.force_close_channel(&chan.2);
 +      check_closed_broadcast!(nodes[0]);
 +
 +      let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
 +      assert_eq!(node_txn.len(), 1);
 +      check_spends!(node_txn[0], chan.3.clone());
 +      assert_eq!(node_txn[0].output.len(), 2); // We can't force trimming of to_remote output as channel_reserve_satoshis block us to do so at channel opening
 +
 +      let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +      nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn[0].clone()] }, 0);
 +      check_closed_broadcast!(nodes[1]);
 +      let spend_txn = check_spendable_outputs!(nodes[1], 1);
 +      assert_eq!(spend_txn.len(), 2);
 +      assert_eq!(spend_txn[0], spend_txn[1]);
 +      check_spends!(spend_txn[0], node_txn[0].clone());
 +}
 +
 +#[test]
 +fn test_claim_on_remote_revoked_sizeable_push_msat() {
 +      // Same test as previous, just test on remote revoked commitment tx, as per_commitment_point registration changes following you're funder/fundee and
 +      // to_remote output is encumbered by a P2WPKH
 +
 +      let nodes = create_network(2, &[None, None]);
 +
 +      let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 59000000, LocalFeatures::new(), LocalFeatures::new());
 +      let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
 +      let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn.clone();
 +      assert_eq!(revoked_local_txn[0].input.len(), 1);
 +      assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
 +
-       assert!(nodes[1].node.claim_funds(payment_preimage));
++      claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage, 3_000_000);
 +      let  header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +      nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
 +      check_closed_broadcast!(nodes[1]);
 +
 +      let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
 +      let spend_txn = check_spendable_outputs!(nodes[1], 1);
 +      assert_eq!(spend_txn.len(), 4);
 +      assert_eq!(spend_txn[0], spend_txn[2]); // to_remote output on revoked remote commitment_tx
 +      check_spends!(spend_txn[0], revoked_local_txn[0].clone());
 +      assert_eq!(spend_txn[1], spend_txn[3]); // to_local output on local commitment tx
 +      check_spends!(spend_txn[1], node_txn[0].clone());
 +}
 +
 +#[test]
 +fn test_static_spendable_outputs_preimage_tx() {
 +      let nodes = create_network(2, &[None, None]);
 +
 +      // Create some initial channels
 +      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +
 +      let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
 +
 +      let commitment_tx = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
 +      assert_eq!(commitment_tx[0].input.len(), 1);
 +      assert_eq!(commitment_tx[0].input[0].previous_output.txid, chan_1.3.txid());
 +
 +      // Settle A's commitment tx on B's chain
 +      let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
++      assert!(nodes[1].node.claim_funds(payment_preimage, 3_000_000));
 +      check_added_monitors!(nodes[1], 1);
 +      nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_tx[0].clone()] }, 1);
 +      let events = nodes[1].node.get_and_clear_pending_msg_events();
 +      match events[0] {
 +              MessageSendEvent::UpdateHTLCs { .. } => {},
 +              _ => panic!("Unexpected event"),
 +      }
 +      match events[1] {
 +              MessageSendEvent::BroadcastChannelUpdate { .. } => {},
 +              _ => panic!("Unexepected event"),
 +      }
 +
 +      // Check B's monitor was able to send back output descriptor event for preimage tx on A's commitment tx
 +      let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); // ChannelManager : 1 (local commitment tx), ChannelMonitor: 2 (1 preimage tx) * 2 (block-rescan)
 +      check_spends!(node_txn[0], commitment_tx[0].clone());
 +      assert_eq!(node_txn[0], node_txn[2]);
 +      assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
 +      check_spends!(node_txn[1], chan_1.3.clone());
 +
 +      let spend_txn = check_spendable_outputs!(nodes[1], 1); // , 0, 0, 1, 1);
 +      assert_eq!(spend_txn.len(), 2);
 +      assert_eq!(spend_txn[0], spend_txn[1]);
 +      check_spends!(spend_txn[0], node_txn[0].clone());
 +}
 +
 +#[test]
 +fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() {
 +      let nodes = create_network(2, &[None, None]);
 +
 +      // Create some initial channels
 +      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +
 +      let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
 +      let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.iter().next().unwrap().1.last_local_commitment_txn.clone();
 +      assert_eq!(revoked_local_txn[0].input.len(), 1);
 +      assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
 +
-       claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
++      claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage, 3_000_000);
 +
 +      let  header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +      nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
 +      check_closed_broadcast!(nodes[1]);
 +
 +      let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
 +      assert_eq!(node_txn.len(), 3);
 +      assert_eq!(node_txn.pop().unwrap(), node_txn[0]);
 +      assert_eq!(node_txn[0].input.len(), 2);
 +      check_spends!(node_txn[0], revoked_local_txn[0].clone());
 +
 +      let spend_txn = check_spendable_outputs!(nodes[1], 1);
 +      assert_eq!(spend_txn.len(), 2);
 +      assert_eq!(spend_txn[0], spend_txn[1]);
 +      check_spends!(spend_txn[0], node_txn[0].clone());
 +}
 +
 +#[test]
 +fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() {
 +      let nodes = create_network(2, &[None, None]);
 +
 +      // Create some initial channels
 +      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +
 +      let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
 +      let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
 +      assert_eq!(revoked_local_txn[0].input.len(), 1);
 +      assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
 +
-       claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
++      claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage, 3_000_000);
 +
 +      let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +      // A will generate HTLC-Timeout from revoked commitment tx
 +      nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
 +      check_closed_broadcast!(nodes[0]);
 +
 +      let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
 +      assert_eq!(revoked_htlc_txn.len(), 3);
 +      assert_eq!(revoked_htlc_txn[0], revoked_htlc_txn[2]);
 +      assert_eq!(revoked_htlc_txn[0].input.len(), 1);
 +      assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
 +      check_spends!(revoked_htlc_txn[0], revoked_local_txn[0].clone());
 +      check_spends!(revoked_htlc_txn[1], chan_1.3.clone());
 +
 +      // B will generate justice tx from A's revoked commitment/HTLC tx
 +      nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] }, 1);
 +      check_closed_broadcast!(nodes[1]);
 +
 +      let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
 +      assert_eq!(node_txn.len(), 4);
 +      assert_eq!(node_txn[3].input.len(), 1);
 +      check_spends!(node_txn[3], revoked_htlc_txn[0].clone());
 +
 +      // Check B's ChannelMonitor was able to generate the right spendable output descriptor
 +      let spend_txn = check_spendable_outputs!(nodes[1], 1);
 +      assert_eq!(spend_txn.len(), 3);
 +      assert_eq!(spend_txn[0], spend_txn[1]);
 +      check_spends!(spend_txn[0], node_txn[0].clone());
 +      check_spends!(spend_txn[2], node_txn[3].clone());
 +}
 +
 +#[test]
 +fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() {
 +      let nodes = create_network(2, &[None, None]);
 +
 +      // Create some initial channels
 +      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +
 +      let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
 +      let revoked_local_txn = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
 +      assert_eq!(revoked_local_txn[0].input.len(), 1);
 +      assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
 +
-       send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
-       send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
++      claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage, 3_000_000);
 +
 +      let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +      // B will generate HTLC-Success from revoked commitment tx
 +      nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
 +      check_closed_broadcast!(nodes[1]);
 +      let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
 +
 +      assert_eq!(revoked_htlc_txn.len(), 3);
 +      assert_eq!(revoked_htlc_txn[0], revoked_htlc_txn[2]);
 +      assert_eq!(revoked_htlc_txn[0].input.len(), 1);
 +      assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
 +      check_spends!(revoked_htlc_txn[0], revoked_local_txn[0].clone());
 +
 +      // A will generate justice tx from B's revoked commitment/HTLC tx
 +      nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] }, 1);
 +      check_closed_broadcast!(nodes[0]);
 +
 +      let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
 +      assert_eq!(node_txn.len(), 4);
 +      assert_eq!(node_txn[3].input.len(), 1);
 +      check_spends!(node_txn[3], revoked_htlc_txn[0].clone());
 +
 +      // Check A's ChannelMonitor was able to generate the right spendable output descriptor
 +      let spend_txn = check_spendable_outputs!(nodes[0], 1);
 +      assert_eq!(spend_txn.len(), 5);
 +      assert_eq!(spend_txn[0], spend_txn[2]);
 +      assert_eq!(spend_txn[1], spend_txn[3]);
 +      check_spends!(spend_txn[0], revoked_local_txn[0].clone()); // spending to_remote output from revoked local tx
 +      check_spends!(spend_txn[1], node_txn[2].clone()); // spending justice tx output from revoked local tx htlc received output
 +      check_spends!(spend_txn[4], node_txn[3].clone()); // spending justice tx output on htlc success tx
 +}
 +
 +#[test]
 +fn test_onchain_to_onchain_claim() {
 +      // Test that in case of channel closure, we detect the state of output thanks to
 +      // ChainWatchInterface and claim HTLC on downstream peer's remote commitment tx.
 +      // First, have C claim an HTLC against its own latest commitment transaction.
 +      // Then, broadcast these to B, which should update the monitor downstream on the A<->B
 +      // channel.
 +      // Finally, check that B will claim the HTLC output if A's latest commitment transaction
 +      // gets broadcast.
 +
 +      let nodes = create_network(3, &[None, None, None]);
 +
 +      // Create some initial channels
 +      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +      let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
 +
 +      // Rebalance the network a bit by relaying one payment through all the channels ...
-       nodes[2].node.claim_funds(payment_preimage);
++      send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000, 8_000_000);
++      send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000, 8_000_000);
 +
 +      let (payment_preimage, _payment_hash) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000);
 +      let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
 +      let commitment_tx = nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().last_local_commitment_txn.clone();
 +      check_spends!(commitment_tx[0], chan_2.3.clone());
-       nodes[2].node.claim_funds(our_payment_preimage);
++      nodes[2].node.claim_funds(payment_preimage, 3_000_000);
 +      check_added_monitors!(nodes[2], 1);
 +      let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
 +      assert!(updates.update_add_htlcs.is_empty());
 +      assert!(updates.update_fail_htlcs.is_empty());
 +      assert_eq!(updates.update_fulfill_htlcs.len(), 1);
 +      assert!(updates.update_fail_malformed_htlcs.is_empty());
 +
 +      nodes[2].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1);
 +      check_closed_broadcast!(nodes[2]);
 +
 +      let c_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 2 (commitment tx, HTLC-Success tx), ChannelMonitor : 1 (HTLC-Success tx)
 +      assert_eq!(c_txn.len(), 3);
 +      assert_eq!(c_txn[0], c_txn[2]);
 +      assert_eq!(commitment_tx[0], c_txn[1]);
 +      check_spends!(c_txn[1], chan_2.3.clone());
 +      check_spends!(c_txn[2], c_txn[1].clone());
 +      assert_eq!(c_txn[1].input[0].witness.clone().last().unwrap().len(), 71);
 +      assert_eq!(c_txn[2].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
 +      assert!(c_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
 +      assert_eq!(c_txn[0].lock_time, 0); // Success tx
 +
 +      // So we broadcast C's commitment tx and HTLC-Success on B's chain, we should successfully be able to extract preimage and update downstream monitor
 +      nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![c_txn[1].clone(), c_txn[2].clone()]}, 1);
 +      {
 +              let mut b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
 +              assert_eq!(b_txn.len(), 4);
 +              assert_eq!(b_txn[0], b_txn[3]);
 +              check_spends!(b_txn[1], chan_2.3); // B local commitment tx, issued by ChannelManager
 +              check_spends!(b_txn[2], b_txn[1].clone()); // HTLC-Timeout on B local commitment tx, issued by ChannelManager
 +              assert_eq!(b_txn[2].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
 +              assert!(b_txn[2].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
 +              assert_ne!(b_txn[2].lock_time, 0); // Timeout tx
 +              check_spends!(b_txn[0], c_txn[1].clone()); // timeout tx on C remote commitment tx, issued by ChannelMonitor, * 2 due to block rescan
 +              assert_eq!(b_txn[0].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
 +              assert!(b_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
 +              assert_ne!(b_txn[2].lock_time, 0); // Timeout tx
 +              b_txn.clear();
 +      }
 +      let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
 +      check_added_monitors!(nodes[1], 1);
 +      match msg_events[0] {
 +              MessageSendEvent::BroadcastChannelUpdate {  .. } => {},
 +              _ => panic!("Unexpected event"),
 +      }
 +      match msg_events[1] {
 +              MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
 +                      assert!(update_add_htlcs.is_empty());
 +                      assert!(update_fail_htlcs.is_empty());
 +                      assert_eq!(update_fulfill_htlcs.len(), 1);
 +                      assert!(update_fail_malformed_htlcs.is_empty());
 +                      assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
 +              },
 +              _ => panic!("Unexpected event"),
 +      };
 +      // Broadcast A's commitment tx on B's chain to see if we are able to claim inbound HTLC with our HTLC-Success tx
 +      let commitment_tx = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
 +      nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1);
 +      let b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
 +      assert_eq!(b_txn.len(), 3);
 +      check_spends!(b_txn[1], chan_1.3); // Local commitment tx, issued by ChannelManager
 +      assert_eq!(b_txn[0], b_txn[2]); // HTLC-Success tx, issued by ChannelMonitor, * 2 due to block rescan
 +      check_spends!(b_txn[0], commitment_tx[0].clone());
 +      assert_eq!(b_txn[0].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
 +      assert!(b_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
 +      assert_eq!(b_txn[2].lock_time, 0); // Success tx
 +
 +      check_closed_broadcast!(nodes[1]);
 +}
 +
 +#[test]
 +fn test_duplicate_payment_hash_one_failure_one_success() {
 +      // Topology : A --> B --> C
 +      // We route 2 payments with same hash between B and C, one will be timeout, the other successfully claim
 +      let mut nodes = create_network(3, &[None, None, None]);
 +
 +      create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +      let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
 +
 +      let (our_payment_preimage, duplicate_payment_hash) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 900000);
 +      *nodes[0].network_payment_count.borrow_mut() -= 1;
 +      assert_eq!(route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 900000).1, duplicate_payment_hash);
 +
 +      let commitment_txn = nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().last_local_commitment_txn.clone();
 +      assert_eq!(commitment_txn[0].input.len(), 1);
 +      check_spends!(commitment_txn[0], chan_2.3.clone());
 +
 +      let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +      nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_txn[0].clone()] }, 1);
 +      check_closed_broadcast!(nodes[1]);
 +
 +      let htlc_timeout_tx;
 +      { // Extract one of the two HTLC-Timeout transaction
 +              let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
 +              assert_eq!(node_txn.len(), 7);
 +              assert_eq!(node_txn[0], node_txn[5]);
 +              assert_eq!(node_txn[1], node_txn[6]);
 +              check_spends!(node_txn[0], commitment_txn[0].clone());
 +              assert_eq!(node_txn[0].input.len(), 1);
 +              check_spends!(node_txn[1], commitment_txn[0].clone());
 +              assert_eq!(node_txn[1].input.len(), 1);
 +              assert_ne!(node_txn[0].input[0], node_txn[1].input[0]);
 +              check_spends!(node_txn[2], chan_2.3.clone());
 +              check_spends!(node_txn[3], node_txn[2].clone());
 +              check_spends!(node_txn[4], node_txn[2].clone());
 +              htlc_timeout_tx = node_txn[1].clone();
 +      }
 +
-       nodes[1].node.claim_funds(payment_preimage);
++      nodes[2].node.claim_funds(our_payment_preimage, 900_000);
 +      nodes[2].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_txn[0].clone()] }, 1);
 +      check_added_monitors!(nodes[2], 2);
 +      let events = nodes[2].node.get_and_clear_pending_msg_events();
 +      match events[0] {
 +              MessageSendEvent::UpdateHTLCs { .. } => {},
 +              _ => panic!("Unexpected event"),
 +      }
 +      match events[1] {
 +              MessageSendEvent::BroadcastChannelUpdate { .. } => {},
 +              _ => panic!("Unexepected event"),
 +      }
 +      let htlc_success_txn: Vec<_> = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
 +      assert_eq!(htlc_success_txn.len(), 5);
 +      check_spends!(htlc_success_txn[2], chan_2.3.clone());
 +      assert_eq!(htlc_success_txn[0], htlc_success_txn[3]);
 +      assert_eq!(htlc_success_txn[0].input.len(), 1);
 +      assert_eq!(htlc_success_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
 +      assert_eq!(htlc_success_txn[1], htlc_success_txn[4]);
 +      assert_eq!(htlc_success_txn[1].input.len(), 1);
 +      assert_eq!(htlc_success_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
 +      assert_ne!(htlc_success_txn[0].input[0], htlc_success_txn[1].input[0]);
 +      check_spends!(htlc_success_txn[0], commitment_txn[0].clone());
 +      check_spends!(htlc_success_txn[1], commitment_txn[0].clone());
 +
 +      nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![htlc_timeout_tx] }, 200);
 +      connect_blocks(&nodes[1].chain_monitor, ANTI_REORG_DELAY - 1, 200, true, header.bitcoin_hash());
 +      expect_pending_htlcs_forwardable!(nodes[1]);
 +      let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +      assert!(htlc_updates.update_add_htlcs.is_empty());
 +      assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
 +      assert_eq!(htlc_updates.update_fail_htlcs[0].htlc_id, 1);
 +      assert!(htlc_updates.update_fulfill_htlcs.is_empty());
 +      assert!(htlc_updates.update_fail_malformed_htlcs.is_empty());
 +      check_added_monitors!(nodes[1], 1);
 +
 +      nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_updates.update_fail_htlcs[0]).unwrap();
 +      assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 +      {
 +              commitment_signed_dance!(nodes[0], nodes[1], &htlc_updates.commitment_signed, false, true);
 +              let events = nodes[0].node.get_and_clear_pending_msg_events();
 +              assert_eq!(events.len(), 1);
 +              match events[0] {
 +                      MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelClosed { .. }  } => {
 +                      },
 +                      _ => { panic!("Unexpected event"); }
 +              }
 +      }
 +      let events = nodes[0].node.get_and_clear_pending_events();
 +      match events[0] {
 +              Event::PaymentFailed { ref payment_hash, .. } => {
 +                      assert_eq!(*payment_hash, duplicate_payment_hash);
 +              }
 +              _ => panic!("Unexpected event"),
 +      }
 +
 +      // Solve 2nd HTLC by broadcasting on B's chain HTLC-Success Tx from C
 +      nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![htlc_success_txn[0].clone()] }, 200);
 +      let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +      assert!(updates.update_add_htlcs.is_empty());
 +      assert!(updates.update_fail_htlcs.is_empty());
 +      assert_eq!(updates.update_fulfill_htlcs.len(), 1);
 +      assert_eq!(updates.update_fulfill_htlcs[0].htlc_id, 0);
 +      assert!(updates.update_fail_malformed_htlcs.is_empty());
 +      check_added_monitors!(nodes[1], 1);
 +
 +      nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]).unwrap();
 +      commitment_signed_dance!(nodes[0], nodes[1], &updates.commitment_signed, false);
 +
 +      let events = nodes[0].node.get_and_clear_pending_events();
 +      match events[0] {
 +              Event::PaymentSent { ref payment_preimage } => {
 +                      assert_eq!(*payment_preimage, our_payment_preimage);
 +              }
 +              _ => panic!("Unexpected event"),
 +      }
 +}
 +
 +#[test]
 +fn test_dynamic_spendable_outputs_local_htlc_success_tx() {
 +      let nodes = create_network(2, &[None, None]);
 +
 +      // Create some initial channels
 +      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +
 +      let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000).0;
 +      let local_txn = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
 +      assert_eq!(local_txn[0].input.len(), 1);
 +      check_spends!(local_txn[0], chan_1.3.clone());
 +
 +      // Give B knowledge of preimage to be able to generate a local HTLC-Success Tx
-       send_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 500000);
-       send_payment(&nodes[1], &[&nodes[2], &nodes[3], &nodes[5]], 500000);
++      nodes[1].node.claim_funds(payment_preimage, 9_000_000);
 +      check_added_monitors!(nodes[1], 1);
 +      let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +      nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![local_txn[0].clone()] }, 1);
 +      let events = nodes[1].node.get_and_clear_pending_msg_events();
 +      match events[0] {
 +              MessageSendEvent::UpdateHTLCs { .. } => {},
 +              _ => panic!("Unexpected event"),
 +      }
 +      match events[1] {
 +              MessageSendEvent::BroadcastChannelUpdate { .. } => {},
 +              _ => panic!("Unexepected event"),
 +      }
 +      let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
 +      assert_eq!(node_txn[0].input.len(), 1);
 +      assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
 +      check_spends!(node_txn[0], local_txn[0].clone());
 +
 +      // Verify that B is able to spend its own HTLC-Success tx thanks to spendable output event given back by its ChannelMonitor
 +      let spend_txn = check_spendable_outputs!(nodes[1], 1);
 +      assert_eq!(spend_txn.len(), 2);
 +      check_spends!(spend_txn[0], node_txn[0].clone());
 +      check_spends!(spend_txn[1], node_txn[2].clone());
 +}
 +
 +fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, announce_latest: bool) {
 +      // Test that we fail backwards the full set of HTLCs we need to when remote broadcasts an
 +      // unrevoked commitment transaction.
 +      // This includes HTLCs which were below the dust threshold as well as HTLCs which were awaiting
 +      // a remote RAA before they could be failed backwards (and combinations thereof).
 +      // We also test duplicate-hash HTLCs by adding two nodes on each side of the target nodes which
 +      // use the same payment hashes.
 +      // Thus, we use a six-node network:
 +      //
 +      // A \         / E
 +      //    - C - D -
 +      // B /         \ F
 +      // And test where C fails back to A/B when D announces its latest commitment transaction
 +      let nodes = create_network(6, &[None, None, None, None, None, None]);
 +
 +      create_announced_chan_between_nodes(&nodes, 0, 2, LocalFeatures::new(), LocalFeatures::new());
 +      create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
 +      let chan = create_announced_chan_between_nodes(&nodes, 2, 3, LocalFeatures::new(), LocalFeatures::new());
 +      create_announced_chan_between_nodes(&nodes, 3, 4, LocalFeatures::new(), LocalFeatures::new());
 +      create_announced_chan_between_nodes(&nodes, 3, 5, LocalFeatures::new(), LocalFeatures::new());
 +
 +      // Rebalance and check output sanity...
-       send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
++      send_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 500000, 500_000);
++      send_payment(&nodes[1], &[&nodes[2], &nodes[3], &nodes[5]], 500000, 500_000);
 +      assert_eq!(nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn[0].output.len(), 2);
 +
 +      let ds_dust_limit = nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().our_dust_limit_satoshis;
 +      // 0th HTLC:
 +      let (_, payment_hash_1) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
 +      // 1st HTLC:
 +      let (_, payment_hash_2) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
 +      let route = nodes[1].router.get_route(&nodes[5].node.get_our_node_id(), None, &Vec::new(), ds_dust_limit*1000, TEST_FINAL_CLTV).unwrap();
 +      // 2nd HTLC:
 +      send_along_route_with_hash(&nodes[1], route.clone(), &[&nodes[2], &nodes[3], &nodes[5]], ds_dust_limit*1000, payment_hash_1); // not added < dust limit + HTLC tx fee
 +      // 3rd HTLC:
 +      send_along_route_with_hash(&nodes[1], route, &[&nodes[2], &nodes[3], &nodes[5]], ds_dust_limit*1000, payment_hash_2); // not added < dust limit + HTLC tx fee
 +      // 4th HTLC:
 +      let (_, payment_hash_3) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
 +      // 5th HTLC:
 +      let (_, payment_hash_4) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
 +      let route = nodes[1].router.get_route(&nodes[5].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
 +      // 6th HTLC:
 +      send_along_route_with_hash(&nodes[1], route.clone(), &[&nodes[2], &nodes[3], &nodes[5]], 1000000, payment_hash_3);
 +      // 7th HTLC:
 +      send_along_route_with_hash(&nodes[1], route, &[&nodes[2], &nodes[3], &nodes[5]], 1000000, payment_hash_4);
 +
 +      // 8th HTLC:
 +      let (_, payment_hash_5) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
 +      // 9th HTLC:
 +      let route = nodes[1].router.get_route(&nodes[5].node.get_our_node_id(), None, &Vec::new(), ds_dust_limit*1000, TEST_FINAL_CLTV).unwrap();
 +      send_along_route_with_hash(&nodes[1], route, &[&nodes[2], &nodes[3], &nodes[5]], ds_dust_limit*1000, payment_hash_5); // not added < dust limit + HTLC tx fee
 +
 +      // 10th HTLC:
 +      let (_, payment_hash_6) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
 +      // 11th HTLC:
 +      let route = nodes[1].router.get_route(&nodes[5].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
 +      send_along_route_with_hash(&nodes[1], route, &[&nodes[2], &nodes[3], &nodes[5]], 1000000, payment_hash_6);
 +
 +      // Double-check that six of the new HTLC were added
 +      // We now have six HTLCs pending over the dust limit and six HTLCs under the dust limit (ie,
 +      // with to_local and to_remote outputs, 8 outputs and 6 HTLCs not included).
 +      assert_eq!(nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn.len(), 1);
 +      assert_eq!(nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn[0].output.len(), 8);
 +
 +      // Now fail back three of the over-dust-limit and three of the under-dust-limit payments in one go.
 +      // Fail 0th below-dust, 4th above-dust, 8th above-dust, 10th below-dust HTLCs
 +      assert!(nodes[4].node.fail_htlc_backwards(&payment_hash_1));
 +      assert!(nodes[4].node.fail_htlc_backwards(&payment_hash_3));
 +      assert!(nodes[4].node.fail_htlc_backwards(&payment_hash_5));
 +      assert!(nodes[4].node.fail_htlc_backwards(&payment_hash_6));
 +      check_added_monitors!(nodes[4], 0);
 +      expect_pending_htlcs_forwardable!(nodes[4]);
 +      check_added_monitors!(nodes[4], 1);
 +
 +      let four_removes = get_htlc_update_msgs!(nodes[4], nodes[3].node.get_our_node_id());
 +      nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[0]).unwrap();
 +      nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[1]).unwrap();
 +      nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[2]).unwrap();
 +      nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[3]).unwrap();
 +      commitment_signed_dance!(nodes[3], nodes[4], four_removes.commitment_signed, false);
 +
 +      // Fail 3rd below-dust and 7th above-dust HTLCs
 +      assert!(nodes[5].node.fail_htlc_backwards(&payment_hash_2));
 +      assert!(nodes[5].node.fail_htlc_backwards(&payment_hash_4));
 +      check_added_monitors!(nodes[5], 0);
 +      expect_pending_htlcs_forwardable!(nodes[5]);
 +      check_added_monitors!(nodes[5], 1);
 +
 +      let two_removes = get_htlc_update_msgs!(nodes[5], nodes[3].node.get_our_node_id());
 +      nodes[3].node.handle_update_fail_htlc(&nodes[5].node.get_our_node_id(), &two_removes.update_fail_htlcs[0]).unwrap();
 +      nodes[3].node.handle_update_fail_htlc(&nodes[5].node.get_our_node_id(), &two_removes.update_fail_htlcs[1]).unwrap();
 +      commitment_signed_dance!(nodes[3], nodes[5], two_removes.commitment_signed, false);
 +
 +      let ds_prev_commitment_tx = nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn.clone();
 +
 +      expect_pending_htlcs_forwardable!(nodes[3]);
 +      check_added_monitors!(nodes[3], 1);
 +      let six_removes = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id());
 +      nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[0]).unwrap();
 +      nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[1]).unwrap();
 +      nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[2]).unwrap();
 +      nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[3]).unwrap();
 +      nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[4]).unwrap();
 +      nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[5]).unwrap();
 +      if deliver_last_raa {
 +              commitment_signed_dance!(nodes[2], nodes[3], six_removes.commitment_signed, false);
 +      } else {
 +              let _cs_last_raa = commitment_signed_dance!(nodes[2], nodes[3], six_removes.commitment_signed, false, true, false, true);
 +      }
 +
 +      // D's latest commitment transaction now contains 1st + 2nd + 9th HTLCs (implicitly, they're
 +      // below the dust limit) and the 5th + 6th + 11th HTLCs. It has failed back the 0th, 3rd, 4th,
 +      // 7th, 8th, and 10th, but as we haven't yet delivered the final RAA to C, the fails haven't
 +      // propagated back to A/B yet (and D has two unrevoked commitment transactions).
 +      //
 +      // We now broadcast the latest commitment transaction, which *should* result in failures for
 +      // the 0th, 1st, 2nd, 3rd, 4th, 7th, 8th, 9th, and 10th HTLCs, ie all the below-dust HTLCs and
 +      // the non-broadcast above-dust HTLCs.
 +      //
 +      // Alternatively, we may broadcast the previous commitment transaction, which should only
 +      // result in failures for the below-dust HTLCs, ie the 0th, 1st, 2nd, 3rd, 9th, and 10th HTLCs.
 +      let ds_last_commitment_tx = nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn.clone();
 +
 +      let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +      if announce_latest {
 +              nodes[2].chain_monitor.block_connected_checked(&header, 1, &[&ds_last_commitment_tx[0]], &[1; 1]);
 +      } else {
 +              nodes[2].chain_monitor.block_connected_checked(&header, 1, &[&ds_prev_commitment_tx[0]], &[1; 1]);
 +      }
 +      connect_blocks(&nodes[2].chain_monitor, ANTI_REORG_DELAY - 1, 1, true,  header.bitcoin_hash());
 +      check_closed_broadcast!(nodes[2]);
 +      expect_pending_htlcs_forwardable!(nodes[2]);
 +      check_added_monitors!(nodes[2], 2);
 +
 +      let cs_msgs = nodes[2].node.get_and_clear_pending_msg_events();
 +      assert_eq!(cs_msgs.len(), 2);
 +      let mut a_done = false;
 +      for msg in cs_msgs {
 +              match msg {
 +                      MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
 +                              // Both under-dust HTLCs and the one above-dust HTLC that we had already failed
 +                              // should be failed-backwards here.
 +                              let target = if *node_id == nodes[0].node.get_our_node_id() {
 +                                      // If announce_latest, expect 0th, 1st, 4th, 8th, 10th HTLCs, else only 0th, 1st, 10th below-dust HTLCs
 +                                      for htlc in &updates.update_fail_htlcs {
 +                                              assert!(htlc.htlc_id == 1 || htlc.htlc_id == 2 || htlc.htlc_id == 6 || if announce_latest { htlc.htlc_id == 3 || htlc.htlc_id == 5 } else { false });
 +                                      }
 +                                      assert_eq!(updates.update_fail_htlcs.len(), if announce_latest { 5 } else { 3 });
 +                                      assert!(!a_done);
 +                                      a_done = true;
 +                                      &nodes[0]
 +                              } else {
 +                                      // If announce_latest, expect 2nd, 3rd, 7th, 9th HTLCs, else only 2nd, 3rd, 9th below-dust HTLCs
 +                                      for htlc in &updates.update_fail_htlcs {
 +                                              assert!(htlc.htlc_id == 1 || htlc.htlc_id == 2 || htlc.htlc_id == 5 || if announce_latest { htlc.htlc_id == 4 } else { false });
 +                                      }
 +                                      assert_eq!(*node_id, nodes[1].node.get_our_node_id());
 +                                      assert_eq!(updates.update_fail_htlcs.len(), if announce_latest { 4 } else { 3 });
 +                                      &nodes[1]
 +                              };
 +                              target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]).unwrap();
 +                              target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[1]).unwrap();
 +                              target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[2]).unwrap();
 +                              if announce_latest {
 +                                      target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[3]).unwrap();
 +                                      if *node_id == nodes[0].node.get_our_node_id() {
 +                                              target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[4]).unwrap();
 +                                      }
 +                              }
 +                              commitment_signed_dance!(target, nodes[2], updates.commitment_signed, false, true);
 +                      },
 +                      _ => panic!("Unexpected event"),
 +              }
 +      }
 +
 +      let as_events = nodes[0].node.get_and_clear_pending_events();
 +      assert_eq!(as_events.len(), if announce_latest { 5 } else { 3 });
 +      let mut as_failds = HashSet::new();
 +      for event in as_events.iter() {
 +              if let &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, .. } = event {
 +                      assert!(as_failds.insert(*payment_hash));
 +                      if *payment_hash != payment_hash_2 {
 +                              assert_eq!(*rejected_by_dest, deliver_last_raa);
 +                      } else {
 +                              assert!(!rejected_by_dest);
 +                      }
 +              } else { panic!("Unexpected event"); }
 +      }
 +      assert!(as_failds.contains(&payment_hash_1));
 +      assert!(as_failds.contains(&payment_hash_2));
 +      if announce_latest {
 +              assert!(as_failds.contains(&payment_hash_3));
 +              assert!(as_failds.contains(&payment_hash_5));
 +      }
 +      assert!(as_failds.contains(&payment_hash_6));
 +
 +      let bs_events = nodes[1].node.get_and_clear_pending_events();
 +      assert_eq!(bs_events.len(), if announce_latest { 4 } else { 3 });
 +      let mut bs_failds = HashSet::new();
 +      for event in bs_events.iter() {
 +              if let &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, .. } = event {
 +                      assert!(bs_failds.insert(*payment_hash));
 +                      if *payment_hash != payment_hash_1 && *payment_hash != payment_hash_5 {
 +                              assert_eq!(*rejected_by_dest, deliver_last_raa);
 +                      } else {
 +                              assert!(!rejected_by_dest);
 +                      }
 +              } else { panic!("Unexpected event"); }
 +      }
 +      assert!(bs_failds.contains(&payment_hash_1));
 +      assert!(bs_failds.contains(&payment_hash_2));
 +      if announce_latest {
 +              assert!(bs_failds.contains(&payment_hash_4));
 +      }
 +      assert!(bs_failds.contains(&payment_hash_5));
 +
 +      // For each HTLC which was not failed-back by normal process (ie deliver_last_raa), we should
 +      // get a PaymentFailureNetworkUpdate. A should have gotten 4 HTLCs which were failed-back due
 +      // to unknown-preimage-etc, B should have gotten 2. Thus, in the
 +      // announce_latest && deliver_last_raa case, we should have 5-4=1 and 4-2=2
 +      // PaymentFailureNetworkUpdates.
 +      let as_msg_events = nodes[0].node.get_and_clear_pending_msg_events();
 +      assert_eq!(as_msg_events.len(), if deliver_last_raa { 1 } else if !announce_latest { 3 } else { 5 });
 +      let bs_msg_events = nodes[1].node.get_and_clear_pending_msg_events();
 +      assert_eq!(bs_msg_events.len(), if deliver_last_raa { 2 } else if !announce_latest { 3 } else { 4 });
 +      for event in as_msg_events.iter().chain(bs_msg_events.iter()) {
 +              match event {
 +                      &MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {},
 +                      _ => panic!("Unexpected event"),
 +              }
 +      }
 +}
 +
 +#[test]
 +fn test_fail_backwards_latest_remote_announce_a() {
 +      do_test_fail_backwards_unrevoked_remote_announce(false, true);
 +}
 +
 +#[test]
 +fn test_fail_backwards_latest_remote_announce_b() {
 +      do_test_fail_backwards_unrevoked_remote_announce(true, true);
 +}
 +
 +#[test]
 +fn test_fail_backwards_previous_remote_announce() {
 +      do_test_fail_backwards_unrevoked_remote_announce(false, false);
 +      // Note that true, true doesn't make sense as it implies we announce a revoked state, which is
 +      // tested for in test_commitment_revoked_fail_backward_exhaustive()
 +}
 +
 +#[test]
 +fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() {
 +      let nodes = create_network(2, &[None, None]);
 +
 +      // Create some initial channels
 +      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +
 +      route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000).0;
 +      let local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
 +      assert_eq!(local_txn[0].input.len(), 1);
 +      check_spends!(local_txn[0], chan_1.3.clone());
 +
 +      // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
 +      let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +      nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![local_txn[0].clone()] }, 200);
 +      check_closed_broadcast!(nodes[0]);
 +
 +      let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
 +      assert_eq!(node_txn[0].input.len(), 1);
 +      assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
 +      check_spends!(node_txn[0], local_txn[0].clone());
 +
 +      // Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor
 +      let spend_txn = check_spendable_outputs!(nodes[0], 1);
 +      assert_eq!(spend_txn.len(), 8);
 +      assert_eq!(spend_txn[0], spend_txn[2]);
 +      assert_eq!(spend_txn[0], spend_txn[4]);
 +      assert_eq!(spend_txn[0], spend_txn[6]);
 +      assert_eq!(spend_txn[1], spend_txn[3]);
 +      assert_eq!(spend_txn[1], spend_txn[5]);
 +      assert_eq!(spend_txn[1], spend_txn[7]);
 +      check_spends!(spend_txn[0], local_txn[0].clone());
 +      check_spends!(spend_txn[1], node_txn[0].clone());
 +}
 +
 +#[test]
 +fn test_static_output_closing_tx() {
 +      let nodes = create_network(2, &[None, None]);
 +
 +      let chan = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +
-       assert!(nodes[1].node.claim_funds(our_payment_preimage));
++      send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000, 8_000_000);
 +      let closing_tx = close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true).2;
 +
 +      let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +      nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![closing_tx.clone()] }, 1);
 +      let spend_txn = check_spendable_outputs!(nodes[0], 2);
 +      assert_eq!(spend_txn.len(), 1);
 +      check_spends!(spend_txn[0], closing_tx.clone());
 +
 +      nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![closing_tx.clone()] }, 1);
 +      let spend_txn = check_spendable_outputs!(nodes[1], 2);
 +      assert_eq!(spend_txn.len(), 1);
 +      check_spends!(spend_txn[0], closing_tx);
 +}
 +
 +fn do_htlc_claim_local_commitment_only(use_dust: bool) {
 +      let nodes = create_network(2, &[None, None]);
 +      let chan = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +
 +      let (our_payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1]], if use_dust { 50000 } else { 3000000 });
 +
 +      // Claim the payment, but don't deliver A's commitment_signed, resulting in the HTLC only being
 +      // present in B's local commitment transaction, but none of A's commitment transactions.
-       send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 40000);
++      assert!(nodes[1].node.claim_funds(our_payment_preimage, if use_dust { 50_000 } else { 3_000_000 }));
 +      check_added_monitors!(nodes[1], 1);
 +
 +      let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +      nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]).unwrap();
 +      let events = nodes[0].node.get_and_clear_pending_events();
 +      assert_eq!(events.len(), 1);
 +      match events[0] {
 +              Event::PaymentSent { payment_preimage } => {
 +                      assert_eq!(payment_preimage, our_payment_preimage);
 +              },
 +              _ => panic!("Unexpected event"),
 +      }
 +
 +      nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      let as_updates = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
 +      nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_updates.0).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +
 +      let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +      for i in 1..TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + CHAN_CONFIRM_DEPTH + 1 {
 +              nodes[1].chain_monitor.block_connected_checked(&header, i, &Vec::new(), &Vec::new());
 +              header.prev_blockhash = header.bitcoin_hash();
 +      }
 +      test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS });
 +      check_closed_broadcast!(nodes[1]);
 +}
 +
 +fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
 +      let mut nodes = create_network(2, &[None, None]);
 +      let chan = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +
 +      let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), if use_dust { 50000 } else { 3000000 }, TEST_FINAL_CLTV).unwrap();
 +      let (_, payment_hash) = get_payment_preimage_hash!(nodes[0]);
 +      nodes[0].node.send_payment(route, payment_hash).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +
 +      let _as_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
 +
 +      // As far as A is concerned, the HTLC is now present only in the latest remote commitment
 +      // transaction, however it is not in A's latest local commitment, so we can just broadcast that
 +      // to "time out" the HTLC.
 +
 +      let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +      for i in 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + CHAN_CONFIRM_DEPTH + 1 {
 +              nodes[0].chain_monitor.block_connected_checked(&header, i, &Vec::new(), &Vec::new());
 +              header.prev_blockhash = header.bitcoin_hash();
 +      }
 +      test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
 +      check_closed_broadcast!(nodes[0]);
 +}
 +
 +fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) {
 +      let nodes = create_network(3, &[None, None, None]);
 +      let chan = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +
 +      // Fail the payment, but don't deliver A's final RAA, resulting in the HTLC only being present
 +      // in B's previous (unrevoked) commitment transaction, but none of A's commitment transactions.
 +      // Also optionally test that we *don't* fail the channel in case the commitment transaction was
 +      // actually revoked.
 +      let htlc_value = if use_dust { 50000 } else { 3000000 };
 +      let (_, our_payment_hash) = route_payment(&nodes[0], &[&nodes[1]], htlc_value);
 +      assert!(nodes[1].node.fail_htlc_backwards(&our_payment_hash));
 +      expect_pending_htlcs_forwardable!(nodes[1]);
 +      check_added_monitors!(nodes[1], 1);
 +
 +      let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +      nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]).unwrap();
 +      nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      let as_updates = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
 +      nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_updates.0).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +      nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.1).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +      let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
 +
 +      if check_revoke_no_close {
 +              nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
 +              check_added_monitors!(nodes[0], 1);
 +      }
 +
 +      let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +      for i in 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + CHAN_CONFIRM_DEPTH + 1 {
 +              nodes[0].chain_monitor.block_connected_checked(&header, i, &Vec::new(), &Vec::new());
 +              header.prev_blockhash = header.bitcoin_hash();
 +      }
 +      if !check_revoke_no_close {
 +              test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
 +              check_closed_broadcast!(nodes[0]);
 +      } else {
 +              let events = nodes[0].node.get_and_clear_pending_events();
 +              assert_eq!(events.len(), 1);
 +              match events[0] {
 +                      Event::PaymentFailed { payment_hash, rejected_by_dest, .. } => {
 +                              assert_eq!(payment_hash, our_payment_hash);
 +                              assert!(rejected_by_dest);
 +                      },
 +                      _ => panic!("Unexpected event"),
 +              }
 +      }
 +}
 +
 +// Test that we close channels on-chain when broadcastable HTLCs reach their timeout window.
 +// There are only a few cases to test here:
 +//  * its not really normative behavior, but we test that below-dust HTLCs "included" in
 +//    broadcastable commitment transactions result in channel closure,
 +//  * its included in an unrevoked-but-previous remote commitment transaction,
 +//  * its included in the latest remote or local commitment transactions.
 +// We test each of the three possible commitment transactions individually and use both dust and
 +// non-dust HTLCs.
 +// Note that we don't bother testing both outbound and inbound HTLC failures for each case, and we
 +// assume they are handled the same across all six cases, as both outbound and inbound failures are
 +// tested for at least one of the cases in other tests.
 +#[test]
 +fn htlc_claim_single_commitment_only_a() {
 +      do_htlc_claim_local_commitment_only(true);
 +      do_htlc_claim_local_commitment_only(false);
 +
 +      do_htlc_claim_current_remote_commitment_only(true);
 +      do_htlc_claim_current_remote_commitment_only(false);
 +}
 +
 +#[test]
 +fn htlc_claim_single_commitment_only_b() {
 +      do_htlc_claim_previous_remote_commitment_only(true, false);
 +      do_htlc_claim_previous_remote_commitment_only(false, false);
 +      do_htlc_claim_previous_remote_commitment_only(true, true);
 +      do_htlc_claim_previous_remote_commitment_only(false, true);
 +}
 +
 +fn run_onion_failure_test<F1,F2>(_name: &str, test_case: u8, nodes: &Vec<Node>, route: &Route, payment_hash: &PaymentHash, callback_msg: F1, callback_node: F2, expected_retryable: bool, expected_error_code: Option<u16>, expected_channel_update: Option<HTLCFailChannelUpdate>)
 +      where F1: for <'a> FnMut(&'a mut msgs::UpdateAddHTLC),
 +                              F2: FnMut(),
 +{
 +      run_onion_failure_test_with_fail_intercept(_name, test_case, nodes, route, payment_hash, callback_msg, |_|{}, callback_node, expected_retryable, expected_error_code, expected_channel_update);
 +}
 +
 +// test_case
 +// 0: node1 fails backward
 +// 1: final node fails backward
 +// 2: payment completed but the user rejects the payment
 +// 3: final node fails backward (but tamper onion payloads from node0)
 +// 100: trigger error in the intermediate node and tamper returning fail_htlc
 +// 200: trigger error in the final node and tamper returning fail_htlc
 +fn run_onion_failure_test_with_fail_intercept<F1,F2,F3>(_name: &str, test_case: u8, nodes: &Vec<Node>, route: &Route, payment_hash: &PaymentHash, mut callback_msg: F1, mut callback_fail: F2, mut callback_node: F3, expected_retryable: bool, expected_error_code: Option<u16>, expected_channel_update: Option<HTLCFailChannelUpdate>)
 +      where F1: for <'a> FnMut(&'a mut msgs::UpdateAddHTLC),
 +                              F2: for <'a> FnMut(&'a mut msgs::UpdateFailHTLC),
 +                              F3: FnMut(),
 +{
 +
 +      // reset block height
 +      let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +      for ix in 0..nodes.len() {
 +              nodes[ix].chain_monitor.block_connected_checked(&header, 1, &Vec::new()[..], &[0; 0]);
 +      }
 +
 +      macro_rules! expect_event {
 +              ($node: expr, $event_type: path) => {{
 +                      let events = $node.node.get_and_clear_pending_events();
 +                      assert_eq!(events.len(), 1);
 +                      match events[0] {
 +                              $event_type { .. } => {},
 +                              _ => panic!("Unexpected event"),
 +                      }
 +              }}
 +      }
 +
 +      macro_rules! expect_htlc_forward {
 +              ($node: expr) => {{
 +                      expect_event!($node, Event::PendingHTLCsForwardable);
 +                      $node.node.process_pending_htlc_forwards();
 +              }}
 +      }
 +
 +      // 0 ~~> 2 send payment
 +      nodes[0].node.send_payment(route.clone(), payment_hash.clone()).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      let update_0 = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
 +      // temper update_add (0 => 1)
 +      let mut update_add_0 = update_0.update_add_htlcs[0].clone();
 +      if test_case == 0 || test_case == 3 || test_case == 100 {
 +              callback_msg(&mut update_add_0);
 +              callback_node();
 +      }
 +      // 0 => 1 update_add & CS
 +      nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &update_add_0).unwrap();
 +      commitment_signed_dance!(nodes[1], nodes[0], &update_0.commitment_signed, false, true);
 +
 +      let update_1_0 = match test_case {
 +              0|100 => { // intermediate node failure; fail backward to 0
 +                      let update_1_0 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +                      assert!(update_1_0.update_fail_htlcs.len()+update_1_0.update_fail_malformed_htlcs.len()==1 && (update_1_0.update_fail_htlcs.len()==1 || update_1_0.update_fail_malformed_htlcs.len()==1));
 +                      update_1_0
 +              },
 +              1|2|3|200 => { // final node failure; forwarding to 2
 +                      assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 +                      // forwarding on 1
 +                      if test_case != 200 {
 +                              callback_node();
 +                      }
 +                      expect_htlc_forward!(&nodes[1]);
 +
 +                      let update_1 = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
 +                      check_added_monitors!(&nodes[1], 1);
 +                      assert_eq!(update_1.update_add_htlcs.len(), 1);
 +                      // tamper update_add (1 => 2)
 +                      let mut update_add_1 = update_1.update_add_htlcs[0].clone();
 +                      if test_case != 3 && test_case != 200 {
 +                              callback_msg(&mut update_add_1);
 +                      }
 +
 +                      // 1 => 2
 +                      nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &update_add_1).unwrap();
 +                      commitment_signed_dance!(nodes[2], nodes[1], update_1.commitment_signed, false, true);
 +
 +                      if test_case == 2 || test_case == 200 {
 +                              expect_htlc_forward!(&nodes[2]);
 +                              expect_event!(&nodes[2], Event::PaymentReceived);
 +                              callback_node();
 +                              expect_pending_htlcs_forwardable!(nodes[2]);
 +                      }
 +
 +                      let update_2_1 = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
 +                      if test_case == 2 || test_case == 200 {
 +                              check_added_monitors!(&nodes[2], 1);
 +                      }
 +                      assert!(update_2_1.update_fail_htlcs.len() == 1);
 +
 +                      let mut fail_msg = update_2_1.update_fail_htlcs[0].clone();
 +                      if test_case == 200 {
 +                              callback_fail(&mut fail_msg);
 +                      }
 +
 +                      // 2 => 1
 +                      nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &fail_msg).unwrap();
 +                      commitment_signed_dance!(nodes[1], nodes[2], update_2_1.commitment_signed, true);
 +
 +                      // backward fail on 1
 +                      let update_1_0 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +                      assert!(update_1_0.update_fail_htlcs.len() == 1);
 +                      update_1_0
 +              },
 +              _ => unreachable!(),
 +      };
 +
 +      // 1 => 0 commitment_signed_dance
 +      if update_1_0.update_fail_htlcs.len() > 0 {
 +              let mut fail_msg = update_1_0.update_fail_htlcs[0].clone();
 +              if test_case == 100 {
 +                      callback_fail(&mut fail_msg);
 +              }
 +              nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_msg).unwrap();
 +      } else {
 +              nodes[0].node.handle_update_fail_malformed_htlc(&nodes[1].node.get_our_node_id(), &update_1_0.update_fail_malformed_htlcs[0]).unwrap();
 +      };
 +
 +      commitment_signed_dance!(nodes[0], nodes[1], update_1_0.commitment_signed, false, true);
 +
 +      let events = nodes[0].node.get_and_clear_pending_events();
 +      assert_eq!(events.len(), 1);
 +      if let &Event::PaymentFailed { payment_hash:_, ref rejected_by_dest, ref error_code } = &events[0] {
 +              assert_eq!(*rejected_by_dest, !expected_retryable);
 +              assert_eq!(*error_code, expected_error_code);
 +      } else {
 +              panic!("Uexpected event");
 +      }
 +
 +      let events = nodes[0].node.get_and_clear_pending_msg_events();
 +      if expected_channel_update.is_some() {
 +              assert_eq!(events.len(), 1);
 +              match events[0] {
 +                      MessageSendEvent::PaymentFailureNetworkUpdate { ref update } => {
 +                              match update {
 +                                      &HTLCFailChannelUpdate::ChannelUpdateMessage { .. } => {
 +                                              if let HTLCFailChannelUpdate::ChannelUpdateMessage { .. } = expected_channel_update.unwrap() {} else {
 +                                                      panic!("channel_update not found!");
 +                                              }
 +                                      },
 +                                      &HTLCFailChannelUpdate::ChannelClosed { ref short_channel_id, ref is_permanent } => {
 +                                              if let HTLCFailChannelUpdate::ChannelClosed { short_channel_id: ref expected_short_channel_id, is_permanent: ref expected_is_permanent } = expected_channel_update.unwrap() {
 +                                                      assert!(*short_channel_id == *expected_short_channel_id);
 +                                                      assert!(*is_permanent == *expected_is_permanent);
 +                                              } else {
 +                                                      panic!("Unexpected message event");
 +                                              }
 +                                      },
 +                                      &HTLCFailChannelUpdate::NodeFailure { ref node_id, ref is_permanent } => {
 +                                              if let HTLCFailChannelUpdate::NodeFailure { node_id: ref expected_node_id, is_permanent: ref expected_is_permanent } = expected_channel_update.unwrap() {
 +                                                      assert!(*node_id == *expected_node_id);
 +                                                      assert!(*is_permanent == *expected_is_permanent);
 +                                              } else {
 +                                                      panic!("Unexpected message event");
 +                                              }
 +                                      },
 +                              }
 +                      },
 +                      _ => panic!("Unexpected message event"),
 +              }
 +      } else {
 +              assert_eq!(events.len(), 0);
 +      }
 +}
 +
 +impl msgs::ChannelUpdate {
 +      fn dummy() -> msgs::ChannelUpdate {
 +              use secp256k1::ffi::Signature as FFISignature;
 +              use secp256k1::Signature;
 +              msgs::ChannelUpdate {
 +                      signature: Signature::from(FFISignature::new()),
 +                      contents: msgs::UnsignedChannelUpdate {
 +                              chain_hash: Sha256dHash::hash(&vec![0u8][..]),
 +                              short_channel_id: 0,
 +                              timestamp: 0,
 +                              flags: 0,
 +                              cltv_expiry_delta: 0,
 +                              htlc_minimum_msat: 0,
 +                              fee_base_msat: 0,
 +                              fee_proportional_millionths: 0,
 +                              excess_data: vec![],
 +                      }
 +              }
 +      }
 +}
 +
 +#[test]
 +fn test_onion_failure() {
 +      use ln::msgs::ChannelUpdate;
 +      use ln::channelmanager::CLTV_FAR_FAR_AWAY;
 +      use secp256k1;
 +
 +      const BADONION: u16 = 0x8000;
 +      const PERM: u16 = 0x4000;
 +      const NODE: u16 = 0x2000;
 +      const UPDATE: u16 = 0x1000;
 +
 +      let mut nodes = create_network(3, &[None, None, None]);
 +      for node in nodes.iter() {
 +              *node.keys_manager.override_session_priv.lock().unwrap() = Some(SecretKey::from_slice(&[3; 32]).unwrap());
 +      }
 +      let channels = [create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new()), create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new())];
 +      let (_, payment_hash) = get_payment_preimage_hash!(nodes[0]);
 +      let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 40000, TEST_FINAL_CLTV).unwrap();
 +      // positve case
-       send_payment(&nodes[0], &vec!(&nodes[1])[..], max_in_flight);
++      send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 40000, 40_000);
 +
 +      // intermediate node failure
 +      run_onion_failure_test("invalid_realm", 0, &nodes, &route, &payment_hash, |msg| {
 +              let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
 +              let cur_height = nodes[0].node.latest_block_height.load(Ordering::Acquire) as u32 + 1;
 +              let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route, &session_priv).unwrap();
 +              let (mut onion_payloads, _htlc_msat, _htlc_cltv) = onion_utils::build_onion_payloads(&route, cur_height).unwrap();
 +              onion_payloads[0].realm = 3;
 +              msg.onion_routing_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, &payment_hash);
 +      }, ||{}, true, Some(PERM|1), Some(msgs::HTLCFailChannelUpdate::ChannelClosed{short_channel_id: channels[1].0.contents.short_channel_id, is_permanent: true}));//XXX incremented channels idx here
 +
 +      // final node failure
 +      run_onion_failure_test("invalid_realm", 3, &nodes, &route, &payment_hash, |msg| {
 +              let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
 +              let cur_height = nodes[0].node.latest_block_height.load(Ordering::Acquire) as u32 + 1;
 +              let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route, &session_priv).unwrap();
 +              let (mut onion_payloads, _htlc_msat, _htlc_cltv) = onion_utils::build_onion_payloads(&route, cur_height).unwrap();
 +              onion_payloads[1].realm = 3;
 +              msg.onion_routing_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, &payment_hash);
 +      }, ||{}, false, Some(PERM|1), Some(msgs::HTLCFailChannelUpdate::ChannelClosed{short_channel_id: channels[1].0.contents.short_channel_id, is_permanent: true}));
 +
 +      // the following three with run_onion_failure_test_with_fail_intercept() test only the origin node
 +      // receiving simulated fail messages
 +      // intermediate node failure
 +      run_onion_failure_test_with_fail_intercept("temporary_node_failure", 100, &nodes, &route, &payment_hash, |msg| {
 +              // trigger error
 +              msg.amount_msat -= 1;
 +      }, |msg| {
 +              // and tamper returning error message
 +              let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
 +              let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route, &session_priv).unwrap();
 +              msg.reason = onion_utils::build_first_hop_failure_packet(&onion_keys[0].shared_secret[..], NODE|2, &[0;0]);
 +      }, ||{}, true, Some(NODE|2), Some(msgs::HTLCFailChannelUpdate::NodeFailure{node_id: route.hops[0].pubkey, is_permanent: false}));
 +
 +      // final node failure
 +      run_onion_failure_test_with_fail_intercept("temporary_node_failure", 200, &nodes, &route, &payment_hash, |_msg| {}, |msg| {
 +              // and tamper returning error message
 +              let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
 +              let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route, &session_priv).unwrap();
 +              msg.reason = onion_utils::build_first_hop_failure_packet(&onion_keys[1].shared_secret[..], NODE|2, &[0;0]);
 +      }, ||{
 +              nodes[2].node.fail_htlc_backwards(&payment_hash);
 +      }, true, Some(NODE|2), Some(msgs::HTLCFailChannelUpdate::NodeFailure{node_id: route.hops[1].pubkey, is_permanent: false}));
 +
 +      // intermediate node failure
 +      run_onion_failure_test_with_fail_intercept("permanent_node_failure", 100, &nodes, &route, &payment_hash, |msg| {
 +              msg.amount_msat -= 1;
 +      }, |msg| {
 +              let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
 +              let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route, &session_priv).unwrap();
 +              msg.reason = onion_utils::build_first_hop_failure_packet(&onion_keys[0].shared_secret[..], PERM|NODE|2, &[0;0]);
 +      }, ||{}, true, Some(PERM|NODE|2), Some(msgs::HTLCFailChannelUpdate::NodeFailure{node_id: route.hops[0].pubkey, is_permanent: true}));
 +
 +      // final node failure
 +      run_onion_failure_test_with_fail_intercept("permanent_node_failure", 200, &nodes, &route, &payment_hash, |_msg| {}, |msg| {
 +              let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
 +              let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route, &session_priv).unwrap();
 +              msg.reason = onion_utils::build_first_hop_failure_packet(&onion_keys[1].shared_secret[..], PERM|NODE|2, &[0;0]);
 +      }, ||{
 +              nodes[2].node.fail_htlc_backwards(&payment_hash);
 +      }, false, Some(PERM|NODE|2), Some(msgs::HTLCFailChannelUpdate::NodeFailure{node_id: route.hops[1].pubkey, is_permanent: true}));
 +
 +      // intermediate node failure
 +      run_onion_failure_test_with_fail_intercept("required_node_feature_missing", 100, &nodes, &route, &payment_hash, |msg| {
 +              msg.amount_msat -= 1;
 +      }, |msg| {
 +              let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
 +              let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route, &session_priv).unwrap();
 +              msg.reason = onion_utils::build_first_hop_failure_packet(&onion_keys[0].shared_secret[..], PERM|NODE|3, &[0;0]);
 +      }, ||{
 +              nodes[2].node.fail_htlc_backwards(&payment_hash);
 +      }, true, Some(PERM|NODE|3), Some(msgs::HTLCFailChannelUpdate::NodeFailure{node_id: route.hops[0].pubkey, is_permanent: true}));
 +
 +      // final node failure
 +      run_onion_failure_test_with_fail_intercept("required_node_feature_missing", 200, &nodes, &route, &payment_hash, |_msg| {}, |msg| {
 +              let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
 +              let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route, &session_priv).unwrap();
 +              msg.reason = onion_utils::build_first_hop_failure_packet(&onion_keys[1].shared_secret[..], PERM|NODE|3, &[0;0]);
 +      }, ||{
 +              nodes[2].node.fail_htlc_backwards(&payment_hash);
 +      }, false, Some(PERM|NODE|3), Some(msgs::HTLCFailChannelUpdate::NodeFailure{node_id: route.hops[1].pubkey, is_permanent: true}));
 +
 +      run_onion_failure_test("invalid_onion_version", 0, &nodes, &route, &payment_hash, |msg| { msg.onion_routing_packet.version = 1; }, ||{}, true,
 +              Some(BADONION|PERM|4), None);
 +
 +      run_onion_failure_test("invalid_onion_hmac", 0, &nodes, &route, &payment_hash, |msg| { msg.onion_routing_packet.hmac = [3; 32]; }, ||{}, true,
 +              Some(BADONION|PERM|5), None);
 +
 +      run_onion_failure_test("invalid_onion_key", 0, &nodes, &route, &payment_hash, |msg| { msg.onion_routing_packet.public_key = Err(secp256k1::Error::InvalidPublicKey);}, ||{}, true,
 +              Some(BADONION|PERM|6), None);
 +
 +      run_onion_failure_test_with_fail_intercept("temporary_channel_failure", 100, &nodes, &route, &payment_hash, |msg| {
 +              msg.amount_msat -= 1;
 +      }, |msg| {
 +              let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
 +              let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route, &session_priv).unwrap();
 +              msg.reason = onion_utils::build_first_hop_failure_packet(&onion_keys[0].shared_secret[..], UPDATE|7, &ChannelUpdate::dummy().encode_with_len()[..]);
 +      }, ||{}, true, Some(UPDATE|7), Some(msgs::HTLCFailChannelUpdate::ChannelUpdateMessage{msg: ChannelUpdate::dummy()}));
 +
 +      run_onion_failure_test_with_fail_intercept("permanent_channel_failure", 100, &nodes, &route, &payment_hash, |msg| {
 +              msg.amount_msat -= 1;
 +      }, |msg| {
 +              let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
 +              let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route, &session_priv).unwrap();
 +              msg.reason = onion_utils::build_first_hop_failure_packet(&onion_keys[0].shared_secret[..], PERM|8, &[0;0]);
 +              // short_channel_id from the processing node
 +      }, ||{}, true, Some(PERM|8), Some(msgs::HTLCFailChannelUpdate::ChannelClosed{short_channel_id: channels[1].0.contents.short_channel_id, is_permanent: true}));
 +
 +      run_onion_failure_test_with_fail_intercept("required_channel_feature_missing", 100, &nodes, &route, &payment_hash, |msg| {
 +              msg.amount_msat -= 1;
 +      }, |msg| {
 +              let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
 +              let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route, &session_priv).unwrap();
 +              msg.reason = onion_utils::build_first_hop_failure_packet(&onion_keys[0].shared_secret[..], PERM|9, &[0;0]);
 +              // short_channel_id from the processing node
 +      }, ||{}, true, Some(PERM|9), Some(msgs::HTLCFailChannelUpdate::ChannelClosed{short_channel_id: channels[1].0.contents.short_channel_id, is_permanent: true}));
 +
 +      let mut bogus_route = route.clone();
 +      bogus_route.hops[1].short_channel_id -= 1;
 +      run_onion_failure_test("unknown_next_peer", 0, &nodes, &bogus_route, &payment_hash, |_| {}, ||{}, true, Some(PERM|10),
 +        Some(msgs::HTLCFailChannelUpdate::ChannelClosed{short_channel_id: bogus_route.hops[1].short_channel_id, is_permanent:true}));
 +
 +      let amt_to_forward = nodes[1].node.channel_state.lock().unwrap().by_id.get(&channels[1].2).unwrap().get_their_htlc_minimum_msat() - 1;
 +      let mut bogus_route = route.clone();
 +      let route_len = bogus_route.hops.len();
 +      bogus_route.hops[route_len-1].fee_msat = amt_to_forward;
 +      run_onion_failure_test("amount_below_minimum", 0, &nodes, &bogus_route, &payment_hash, |_| {}, ||{}, true, Some(UPDATE|11), Some(msgs::HTLCFailChannelUpdate::ChannelUpdateMessage{msg: ChannelUpdate::dummy()}));
 +
 +      //TODO: with new config API, we will be able to generate both valid and
 +      //invalid channel_update cases.
 +      run_onion_failure_test("fee_insufficient", 0, &nodes, &route, &payment_hash, |msg| {
 +              msg.amount_msat -= 1;
 +      }, || {}, true, Some(UPDATE|12), Some(msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id: channels[0].0.contents.short_channel_id, is_permanent: true}));
 +
 +      run_onion_failure_test("incorrect_cltv_expiry", 0, &nodes, &route, &payment_hash, |msg| {
 +              // need to violate: cltv_expiry - cltv_expiry_delta >= outgoing_cltv_value
 +              msg.cltv_expiry -= 1;
 +      }, || {}, true, Some(UPDATE|13), Some(msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id: channels[0].0.contents.short_channel_id, is_permanent: true}));
 +
 +      run_onion_failure_test("expiry_too_soon", 0, &nodes, &route, &payment_hash, |msg| {
 +              let height = msg.cltv_expiry - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS + 1;
 +              let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +              nodes[1].chain_monitor.block_connected_checked(&header, height, &Vec::new()[..], &[0; 0]);
 +      }, ||{}, true, Some(UPDATE|14), Some(msgs::HTLCFailChannelUpdate::ChannelUpdateMessage{msg: ChannelUpdate::dummy()}));
 +
 +      run_onion_failure_test("unknown_payment_hash", 2, &nodes, &route, &payment_hash, |_| {}, || {
 +              nodes[2].node.fail_htlc_backwards(&payment_hash);
 +      }, false, Some(PERM|15), None);
 +
 +      run_onion_failure_test("final_expiry_too_soon", 1, &nodes, &route, &payment_hash, |msg| {
 +              let height = msg.cltv_expiry - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS + 1;
 +              let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +              nodes[2].chain_monitor.block_connected_checked(&header, height, &Vec::new()[..], &[0; 0]);
 +      }, || {}, true, Some(17), None);
 +
 +      run_onion_failure_test("final_incorrect_cltv_expiry", 1, &nodes, &route, &payment_hash, |_| {}, || {
 +              for (_, pending_forwards) in nodes[1].node.channel_state.lock().unwrap().borrow_parts().forward_htlcs.iter_mut() {
 +                      for f in pending_forwards.iter_mut() {
 +                              match f {
 +                                      &mut HTLCForwardInfo::AddHTLC { ref mut forward_info, .. } =>
 +                                              forward_info.outgoing_cltv_value += 1,
 +                                      _ => {},
 +                              }
 +                      }
 +              }
 +      }, true, Some(18), None);
 +
 +      run_onion_failure_test("final_incorrect_htlc_amount", 1, &nodes, &route, &payment_hash, |_| {}, || {
 +              // violate amt_to_forward > msg.amount_msat
 +              for (_, pending_forwards) in nodes[1].node.channel_state.lock().unwrap().borrow_parts().forward_htlcs.iter_mut() {
 +                      for f in pending_forwards.iter_mut() {
 +                              match f {
 +                                      &mut HTLCForwardInfo::AddHTLC { ref mut forward_info, .. } =>
 +                                              forward_info.amt_to_forward -= 1,
 +                                      _ => {},
 +                              }
 +                      }
 +              }
 +      }, true, Some(19), None);
 +
 +      run_onion_failure_test("channel_disabled", 0, &nodes, &route, &payment_hash, |_| {}, || {
 +              // disconnect event to the channel between nodes[1] ~ nodes[2]
 +              nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false);
 +              nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
 +      }, true, Some(UPDATE|20), Some(msgs::HTLCFailChannelUpdate::ChannelUpdateMessage{msg: ChannelUpdate::dummy()}));
 +      reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 +
 +      run_onion_failure_test("expiry_too_far", 0, &nodes, &route, &payment_hash, |msg| {
 +              let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
 +              let mut route = route.clone();
 +              let height = 1;
 +              route.hops[1].cltv_expiry_delta += CLTV_FAR_FAR_AWAY + route.hops[0].cltv_expiry_delta + 1;
 +              let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route, &session_priv).unwrap();
 +              let (onion_payloads, _, htlc_cltv) = onion_utils::build_onion_payloads(&route, height).unwrap();
 +              let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, &payment_hash);
 +              msg.cltv_expiry = htlc_cltv;
 +              msg.onion_routing_packet = onion_packet;
 +      }, ||{}, true, Some(21), None);
 +}
 +
 +#[test]
 +#[should_panic]
 +fn bolt2_open_channel_sending_node_checks_part1() { //This test needs to be on its own as we are catching a panic
 +      let nodes = create_network(2, &[None, None]);
 +      //Force duplicate channel ids
 +      for node in nodes.iter() {
 +              *node.keys_manager.override_channel_id_priv.lock().unwrap() = Some([0; 32]);
 +      }
 +
 +      // BOLT #2 spec: Sending node must ensure temporary_channel_id is unique from any other channel ID with the same peer.
 +      let channel_value_satoshis=10000;
 +      let push_msat=10001;
 +      nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42).unwrap();
 +      let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
 +      nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), LocalFeatures::new(), &node0_to_1_send_open_channel).unwrap();
 +
 +      //Create a second channel with a channel_id collision
 +      assert!(nodes[0].node.create_channel(nodes[0].node.get_our_node_id(), channel_value_satoshis, push_msat, 42).is_err());
 +}
 +
 +#[test]
 +fn bolt2_open_channel_sending_node_checks_part2() {
 +      let nodes = create_network(2, &[None, None]);
 +
 +      // BOLT #2 spec: Sending node must set funding_satoshis to less than 2^24 satoshis
 +      let channel_value_satoshis=2^24;
 +      let push_msat=10001;
 +      assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42).is_err());
 +
 +      // BOLT #2 spec: Sending node must set push_msat to equal or less than 1000 * funding_satoshis
 +      let channel_value_satoshis=10000;
 +      // Test when push_msat is equal to 1000 * funding_satoshis.
 +      let push_msat=1000*channel_value_satoshis+1;
 +      assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42).is_err());
 +
 +      // BOLT #2 spec: Sending node must set set channel_reserve_satoshis greater than or equal to dust_limit_satoshis
 +      let channel_value_satoshis=10000;
 +      let push_msat=10001;
 +      assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42).is_ok()); //Create a valid channel
 +      let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
 +      assert!(node0_to_1_send_open_channel.channel_reserve_satoshis>=node0_to_1_send_open_channel.dust_limit_satoshis);
 +
 +      // BOLT #2 spec: Sending node must set undefined bits in channel_flags to 0
 +      // Only the least-significant bit of channel_flags is currently defined resulting in channel_flags only having one of two possible states 0 or 1
 +      assert!(node0_to_1_send_open_channel.channel_flags<=1);
 +
 +      // BOLT #2 spec: Sending node should set to_self_delay sufficient to ensure the sender can irreversibly spend a commitment transaction output, in case of misbehaviour by the receiver.
 +      assert!(BREAKDOWN_TIMEOUT>0);
 +      assert!(node0_to_1_send_open_channel.to_self_delay==BREAKDOWN_TIMEOUT);
 +
 +      // BOLT #2 spec: Sending node must ensure the chain_hash value identifies the chain it wishes to open the channel within.
 +      let chain_hash=genesis_block(Network::Testnet).header.bitcoin_hash();
 +      assert_eq!(node0_to_1_send_open_channel.chain_hash,chain_hash);
 +
 +      // BOLT #2 spec: Sending node must set funding_pubkey, revocation_basepoint, htlc_basepoint, payment_basepoint, and delayed_payment_basepoint to valid DER-encoded, compressed, secp256k1 pubkeys.
 +      assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.funding_pubkey.serialize()).is_ok());
 +      assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.revocation_basepoint.serialize()).is_ok());
 +      assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.htlc_basepoint.serialize()).is_ok());
 +      assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.payment_basepoint.serialize()).is_ok());
 +      assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.delayed_payment_basepoint.serialize()).is_ok());
 +}
 +
 +// BOLT 2 Requirements for the Sender when constructing and sending an update_add_htlc message.
 +// BOLT 2 Requirement: MUST NOT offer amount_msat it cannot pay for in the remote commitment transaction at the current feerate_per_kw (see "Updating Fees") while maintaining its channel reserve.
 +//TODO: I don't believe this is explicitly enforced when sending an HTLC but as the Fee aspect of the BOLT specs is in flux leaving this as a TODO.
 +
 +#[test]
 +fn test_update_add_htlc_bolt2_sender_value_below_minimum_msat() {
 +      //BOLT2 Requirement: MUST offer amount_msat greater than 0.
 +      //BOLT2 Requirement: MUST NOT offer amount_msat below the receiving node's htlc_minimum_msat (same validation check catches both of these)
 +      let mut nodes = create_network(2, &[None, None]);
 +      let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, LocalFeatures::new(), LocalFeatures::new());
 +      let mut route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV).unwrap();
 +      let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
 +
 +      route.hops[0].fee_msat = 0;
 +
 +      let err = nodes[0].node.send_payment(route, our_payment_hash);
 +
 +      if let Err(APIError::ChannelUnavailable{err}) = err {
 +              assert_eq!(err, "Cannot send less than their minimum HTLC value");
 +      } else {
 +              assert!(false);
 +      }
 +}
 +
 +#[test]
 +fn test_update_add_htlc_bolt2_sender_cltv_expiry_too_high() {
 +      //BOLT 2 Requirement: MUST set cltv_expiry less than 500000000.
 +      //It is enforced when constructing a route.
 +      let mut nodes = create_network(2, &[None, None]);
 +      let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 0, LocalFeatures::new(), LocalFeatures::new());
 +      let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 100000000, 500000001).unwrap();
 +      let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
 +
 +      let err = nodes[0].node.send_payment(route, our_payment_hash);
 +
 +      if let Err(APIError::RouteError{err}) = err {
 +              assert_eq!(err, "Channel CLTV overflowed?!");
 +      } else {
 +              assert!(false);
 +      }
 +}
 +
 +#[test]
 +fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment() {
 +      //BOLT 2 Requirement: if result would be offering more than the remote's max_accepted_htlcs HTLCs, in the remote commitment transaction: MUST NOT add an HTLC.
 +      //BOLT 2 Requirement: for the first HTLC it offers MUST set id to 0.
 +      //BOLT 2 Requirement: MUST increase the value of id by 1 for each successive offer.
 +      let mut nodes = create_network(2, &[None, None]);
 +      let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0, LocalFeatures::new(), LocalFeatures::new());
 +      let max_accepted_htlcs = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().their_max_accepted_htlcs as u64;
 +
 +      for i in 0..max_accepted_htlcs {
 +              let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV).unwrap();
 +              let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
 +              let payment_event = {
 +                      nodes[0].node.send_payment(route, our_payment_hash).unwrap();
 +                      check_added_monitors!(nodes[0], 1);
 +
 +                      let mut events = nodes[0].node.get_and_clear_pending_msg_events();
 +                      assert_eq!(events.len(), 1);
 +                      if let MessageSendEvent::UpdateHTLCs { node_id: _, updates: msgs::CommitmentUpdate{ update_add_htlcs: ref htlcs, .. }, } = events[0] {
 +                              assert_eq!(htlcs[0].htlc_id, i);
 +                      } else {
 +                              assert!(false);
 +                      }
 +                      SendEvent::from_event(events.remove(0))
 +              };
 +              nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
 +              check_added_monitors!(nodes[1], 0);
 +              commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
 +
 +              expect_pending_htlcs_forwardable!(nodes[1]);
 +              expect_payment_received!(nodes[1], our_payment_hash, 100000);
 +      }
 +      let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV).unwrap();
 +      let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
 +      let err = nodes[0].node.send_payment(route, our_payment_hash);
 +
 +      if let Err(APIError::ChannelUnavailable{err}) = err {
 +              assert_eq!(err, "Cannot push more than their max accepted HTLCs");
 +      } else {
 +              assert!(false);
 +      }
 +}
 +
 +#[test]
 +fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_value_in_flight() {
 +      //BOLT 2 Requirement: if the sum of total offered HTLCs would exceed the remote's max_htlc_value_in_flight_msat: MUST NOT add an HTLC.
 +      let mut nodes = create_network(2, &[None, None]);
 +      let channel_value = 100000;
 +      let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, 0, LocalFeatures::new(), LocalFeatures::new());
 +      let max_in_flight = get_channel_value_stat!(nodes[0], chan.2).their_max_htlc_value_in_flight_msat;
 +
-       send_payment(&nodes[0], &[&nodes[1]], max_in_flight);
++      send_payment(&nodes[0], &vec!(&nodes[1])[..], max_in_flight, max_in_flight);
 +
 +      let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], max_in_flight+1, TEST_FINAL_CLTV).unwrap();
 +      let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
 +      let err = nodes[0].node.send_payment(route, our_payment_hash);
 +
 +      if let Err(APIError::ChannelUnavailable{err}) = err {
 +              assert_eq!(err, "Cannot send value that would put us over the max HTLC value in flight our peer will accept");
 +      } else {
 +              assert!(false);
 +      }
 +
-       nodes[1].node.claim_funds(our_payment_preimage);
++      send_payment(&nodes[0], &[&nodes[1]], max_in_flight, max_in_flight);
 +}
 +
 +// BOLT 2 Requirements for the Receiver when handling an update_add_htlc message.
 +#[test]
 +fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() {
 +      //BOLT2 Requirement: receiving an amount_msat equal to 0, OR less than its own htlc_minimum_msat -> SHOULD fail the channel.
 +      let mut nodes = create_network(2, &[None, None]);
 +      let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, LocalFeatures::new(), LocalFeatures::new());
 +      let htlc_minimum_msat: u64;
 +      {
 +              let chan_lock = nodes[0].node.channel_state.lock().unwrap();
 +              let channel = chan_lock.by_id.get(&chan.2).unwrap();
 +              htlc_minimum_msat = channel.get_our_htlc_minimum_msat();
 +      }
 +      let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], htlc_minimum_msat, TEST_FINAL_CLTV).unwrap();
 +      let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
 +      nodes[0].node.send_payment(route, our_payment_hash).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
 +      updates.update_add_htlcs[0].amount_msat = htlc_minimum_msat-1;
 +      let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
 +      if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err {
 +              assert_eq!(err, "Remote side tried to send less than our minimum HTLC value");
 +      } else {
 +              assert!(false);
 +      }
 +      assert!(nodes[1].node.list_channels().is_empty());
 +      check_closed_broadcast!(nodes[1]);
 +}
 +
 +#[test]
 +fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() {
 +      //BOLT2 Requirement: receiving an amount_msat that the sending node cannot afford at the current feerate_per_kw (while maintaining its channel reserve): SHOULD fail the channel
 +      let mut nodes = create_network(2, &[None, None]);
 +      let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, LocalFeatures::new(), LocalFeatures::new());
 +
 +      let their_channel_reserve = get_channel_value_stat!(nodes[0], chan.2).channel_reserve_msat;
 +
 +      let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 5000000-their_channel_reserve, TEST_FINAL_CLTV).unwrap();
 +      let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
 +      nodes[0].node.send_payment(route, our_payment_hash).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
 +
 +      updates.update_add_htlcs[0].amount_msat = 5000000-their_channel_reserve+1;
 +      let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
 +
 +      if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err {
 +              assert_eq!(err, "Remote HTLC add would put them over their reserve value");
 +      } else {
 +              assert!(false);
 +      }
 +
 +      assert!(nodes[1].node.list_channels().is_empty());
 +      check_closed_broadcast!(nodes[1]);
 +}
 +
 +#[test]
 +fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() {
 +      //BOLT 2 Requirement: if a sending node adds more than its max_accepted_htlcs HTLCs to its local commitment transaction: SHOULD fail the channel
 +      //BOLT 2 Requirement: MUST allow multiple HTLCs with the same payment_hash.
 +      let mut nodes = create_network(2, &[None, None]);
 +      let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, LocalFeatures::new(), LocalFeatures::new());
 +      let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 3999999, TEST_FINAL_CLTV).unwrap();
 +      let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
 +
 +      let session_priv = SecretKey::from_slice(&{
 +              let mut session_key = [0; 32];
 +              let mut rng = thread_rng();
 +              rng.fill_bytes(&mut session_key);
 +              session_key
 +      }).expect("RNG is bad!");
 +
 +      let cur_height = nodes[0].node.latest_block_height.load(Ordering::Acquire) as u32 + 1;
 +      let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::signing_only(), &route, &session_priv).unwrap();
 +      let (onion_payloads, _htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route, cur_height).unwrap();
 +      let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, &our_payment_hash);
 +
 +      let mut msg = msgs::UpdateAddHTLC {
 +              channel_id: chan.2,
 +              htlc_id: 0,
 +              amount_msat: 1000,
 +              payment_hash: our_payment_hash,
 +              cltv_expiry: htlc_cltv,
 +              onion_routing_packet: onion_packet.clone(),
 +      };
 +
 +      for i in 0..super::channel::OUR_MAX_HTLCS {
 +              msg.htlc_id = i as u64;
 +              nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg).unwrap();
 +      }
 +      msg.htlc_id = (super::channel::OUR_MAX_HTLCS) as u64;
 +      let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
 +
 +      if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err {
 +              assert_eq!(err, "Remote tried to push more than our max accepted HTLCs");
 +      } else {
 +              assert!(false);
 +      }
 +
 +      assert!(nodes[1].node.list_channels().is_empty());
 +      check_closed_broadcast!(nodes[1]);
 +}
 +
 +#[test]
 +fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() {
 +      //OR adds more than its max_htlc_value_in_flight_msat worth of offered HTLCs to its local commitment transaction: SHOULD fail the channel
 +      let mut nodes = create_network(2, &[None, None]);
 +      let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, LocalFeatures::new(), LocalFeatures::new());
 +      let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 1000000, TEST_FINAL_CLTV).unwrap();
 +      let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
 +      nodes[0].node.send_payment(route, our_payment_hash).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
 +      updates.update_add_htlcs[0].amount_msat = get_channel_value_stat!(nodes[1], chan.2).their_max_htlc_value_in_flight_msat + 1;
 +      let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
 +
 +      if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err {
 +              assert_eq!(err,"Remote HTLC add would put them over our max HTLC value");
 +      } else {
 +              assert!(false);
 +      }
 +
 +      assert!(nodes[1].node.list_channels().is_empty());
 +      check_closed_broadcast!(nodes[1]);
 +}
 +
 +#[test]
 +fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() {
 +      //BOLT2 Requirement: if sending node sets cltv_expiry to greater or equal to 500000000: SHOULD fail the channel.
 +      let mut nodes = create_network(2, &[None, None]);
 +      create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, LocalFeatures::new(), LocalFeatures::new());
 +      let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 3999999, TEST_FINAL_CLTV).unwrap();
 +      let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
 +      nodes[0].node.send_payment(route, our_payment_hash).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
 +      updates.update_add_htlcs[0].cltv_expiry = 500000000;
 +      let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
 +
 +      if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err {
 +              assert_eq!(err,"Remote provided CLTV expiry in seconds instead of block height");
 +      } else {
 +              assert!(false);
 +      }
 +
 +      assert!(nodes[1].node.list_channels().is_empty());
 +      check_closed_broadcast!(nodes[1]);
 +}
 +
 +#[test]
 +fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() {
 +      //BOLT 2 requirement: if the sender did not previously acknowledge the commitment of that HTLC: MUST ignore a repeated id value after a reconnection.
 +      // We test this by first testing that that repeated HTLCs pass commitment signature checks
 +      // after disconnect and that non-sequential htlc_ids result in a channel failure.
 +      let mut nodes = create_network(2, &[None, None]);
 +      create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +      let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 1000000, TEST_FINAL_CLTV).unwrap();
 +      let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
 +      nodes[0].node.send_payment(route, our_payment_hash).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
 +      nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]).unwrap();
 +
 +      //Disconnect and Reconnect
 +      nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
 +      nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 +      nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
 +      let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
 +      assert_eq!(reestablish_1.len(), 1);
 +      nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
 +      let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
 +      assert_eq!(reestablish_2.len(), 1);
 +      nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]).unwrap();
 +      handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
 +      nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]).unwrap();
 +      handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
 +
 +      //Resend HTLC
 +      nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]).unwrap();
 +      assert_eq!(updates.commitment_signed.htlc_signatures.len(), 1);
 +      nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +      let _bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +
 +      let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
 +      if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err {
 +              assert_eq!(err, "Remote skipped HTLC ID");
 +      } else {
 +              assert!(false);
 +      }
 +
 +      assert!(nodes[1].node.list_channels().is_empty());
 +      check_closed_broadcast!(nodes[1]);
 +}
 +
 +#[test]
 +fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() {
 +      //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions:     MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
 +
 +      let mut nodes = create_network(2, &[None, None]);
 +      let chan = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +
 +      let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 1000000, TEST_FINAL_CLTV).unwrap();
 +      let (our_payment_preimage, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
 +      nodes[0].node.send_payment(route, our_payment_hash).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
 +      nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]).unwrap();
 +
 +      let update_msg = msgs::UpdateFulfillHTLC{
 +              channel_id: chan.2,
 +              htlc_id: 0,
 +              payment_preimage: our_payment_preimage,
 +      };
 +
 +      let err = nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
 +
 +      if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err {
 +              assert_eq!(err, "Remote tried to fulfill/fail HTLC before it had been committed");
 +      } else {
 +              assert!(false);
 +      }
 +
 +      assert!(nodes[0].node.list_channels().is_empty());
 +      check_closed_broadcast!(nodes[0]);
 +}
 +
 +#[test]
 +fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() {
 +      //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions:     MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
 +
 +      let mut nodes = create_network(2, &[None, None]);
 +      let chan = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +
 +      let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 1000000, TEST_FINAL_CLTV).unwrap();
 +      let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
 +      nodes[0].node.send_payment(route, our_payment_hash).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
 +      nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]).unwrap();
 +
 +      let update_msg = msgs::UpdateFailHTLC{
 +              channel_id: chan.2,
 +              htlc_id: 0,
 +              reason: msgs::OnionErrorPacket { data: Vec::new()},
 +      };
 +
 +      let err = nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
 +
 +      if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err {
 +              assert_eq!(err, "Remote tried to fulfill/fail HTLC before it had been committed");
 +      } else {
 +              assert!(false);
 +      }
 +
 +      assert!(nodes[0].node.list_channels().is_empty());
 +      check_closed_broadcast!(nodes[0]);
 +}
 +
 +#[test]
 +fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitment() {
 +      //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions:     MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
 +
 +      let mut nodes = create_network(2, &[None, None]);
 +      let chan = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +
 +      let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 1000000, TEST_FINAL_CLTV).unwrap();
 +      let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
 +      nodes[0].node.send_payment(route, our_payment_hash).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
 +      nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]).unwrap();
 +
 +      let update_msg = msgs::UpdateFailMalformedHTLC{
 +              channel_id: chan.2,
 +              htlc_id: 0,
 +              sha256_of_onion: [1; 32],
 +              failure_code: 0x8000,
 +      };
 +
 +      let err = nodes[0].node.handle_update_fail_malformed_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
 +
 +      if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err {
 +              assert_eq!(err, "Remote tried to fulfill/fail HTLC before it had been committed");
 +      } else {
 +              assert!(false);
 +      }
 +
 +      assert!(nodes[0].node.list_channels().is_empty());
 +      check_closed_broadcast!(nodes[0]);
 +}
 +
 +#[test]
 +fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() {
 +      //BOLT 2 Requirement: A receiving node: if the id does not correspond to an HTLC in its current commitment transaction MUST fail the channel.
 +
 +      let nodes = create_network(2, &[None, None]);
 +      create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +
 +      let our_payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 100000).0;
 +
-       nodes[1].node.claim_funds(our_payment_preimage);
++      nodes[1].node.claim_funds(our_payment_preimage, 100_000);
 +      check_added_monitors!(nodes[1], 1);
 +
 +      let events = nodes[1].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events.len(), 1);
 +      let mut update_fulfill_msg: msgs::UpdateFulfillHTLC = {
 +              match events[0] {
 +                      MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
 +                              assert!(update_add_htlcs.is_empty());
 +                              assert_eq!(update_fulfill_htlcs.len(), 1);
 +                              assert!(update_fail_htlcs.is_empty());
 +                              assert!(update_fail_malformed_htlcs.is_empty());
 +                              assert!(update_fee.is_none());
 +                              update_fulfill_htlcs[0].clone()
 +                      },
 +                      _ => panic!("Unexpected event"),
 +              }
 +      };
 +
 +      update_fulfill_msg.htlc_id = 1;
 +
 +      let err = nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_msg);
 +      if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err {
 +              assert_eq!(err, "Remote tried to fulfill/fail an HTLC we couldn't find");
 +      } else {
 +              assert!(false);
 +      }
 +
 +      assert!(nodes[0].node.list_channels().is_empty());
 +      check_closed_broadcast!(nodes[0]);
 +}
 +
 +#[test]
 +fn test_update_fulfill_htlc_bolt2_wrong_preimage() {
 +      //BOLT 2 Requirement: A receiving node: if the payment_preimage value in update_fulfill_htlc doesn't SHA256 hash to the corresponding HTLC payment_hash MUST fail the channel.
 +
 +      let nodes = create_network(2, &[None, None]);
 +      create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +
 +      let our_payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 100000).0;
 +
-       send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
++      nodes[1].node.claim_funds(our_payment_preimage, 100_000);
 +      check_added_monitors!(nodes[1], 1);
 +
 +      let events = nodes[1].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events.len(), 1);
 +      let mut update_fulfill_msg: msgs::UpdateFulfillHTLC = {
 +              match events[0] {
 +                      MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
 +                              assert!(update_add_htlcs.is_empty());
 +                              assert_eq!(update_fulfill_htlcs.len(), 1);
 +                              assert!(update_fail_htlcs.is_empty());
 +                              assert!(update_fail_malformed_htlcs.is_empty());
 +                              assert!(update_fee.is_none());
 +                              update_fulfill_htlcs[0].clone()
 +                      },
 +                      _ => panic!("Unexpected event"),
 +              }
 +      };
 +
 +      update_fulfill_msg.payment_preimage = PaymentPreimage([1; 32]);
 +
 +      let err = nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_msg);
 +      if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err {
 +              assert_eq!(err, "Remote tried to fulfill HTLC with an incorrect preimage");
 +      } else {
 +              assert!(false);
 +      }
 +
 +      assert!(nodes[0].node.list_channels().is_empty());
 +      check_closed_broadcast!(nodes[0]);
 +}
 +
 +
 +#[test]
 +fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_message() {
 +      //BOLT 2 Requirement: A receiving node: if the BADONION bit in failure_code is not set for update_fail_malformed_htlc MUST fail the channel.
 +
 +      let mut nodes = create_network(2, &[None, None]);
 +      create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, LocalFeatures::new(), LocalFeatures::new());
 +      let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 1000000, TEST_FINAL_CLTV).unwrap();
 +      let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
 +      nodes[0].node.send_payment(route, our_payment_hash).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +
 +      let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
 +      updates.update_add_htlcs[0].onion_routing_packet.version = 1; //Produce a malformed HTLC message
 +
 +      nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]).unwrap();
 +      check_added_monitors!(nodes[1], 0);
 +      commitment_signed_dance!(nodes[1], nodes[0], updates.commitment_signed, false, true);
 +
 +      let events = nodes[1].node.get_and_clear_pending_msg_events();
 +
 +      let mut update_msg: msgs::UpdateFailMalformedHTLC = {
 +              match events[0] {
 +                      MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
 +                              assert!(update_add_htlcs.is_empty());
 +                              assert!(update_fulfill_htlcs.is_empty());
 +                              assert!(update_fail_htlcs.is_empty());
 +                              assert_eq!(update_fail_malformed_htlcs.len(), 1);
 +                              assert!(update_fee.is_none());
 +                              update_fail_malformed_htlcs[0].clone()
 +                      },
 +                      _ => panic!("Unexpected event"),
 +              }
 +      };
 +      update_msg.failure_code &= !0x8000;
 +      let err = nodes[0].node.handle_update_fail_malformed_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
 +      if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err {
 +              assert_eq!(err, "Got update_fail_malformed_htlc with BADONION not set");
 +      } else {
 +              assert!(false);
 +      }
 +
 +      assert!(nodes[0].node.list_channels().is_empty());
 +      check_closed_broadcast!(nodes[0]);
 +}
 +
 +#[test]
 +fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_update_fail_htlc() {
 +      //BOLT 2 Requirement: a receiving node which has an outgoing HTLC canceled by update_fail_malformed_htlc:
 +      //    * MUST return an error in the update_fail_htlc sent to the link which originally sent the HTLC, using the failure_code given and setting the data to sha256_of_onion.
 +
 +      let mut nodes = create_network(3, &[None, None, None]);
 +      create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, LocalFeatures::new(), LocalFeatures::new());
 +      create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1000000, 1000000, LocalFeatures::new(), LocalFeatures::new());
 +
 +      let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 100000, TEST_FINAL_CLTV).unwrap();
 +      let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
 +
 +      //First hop
 +      let mut payment_event = {
 +              nodes[0].node.send_payment(route, our_payment_hash).unwrap();
 +              check_added_monitors!(nodes[0], 1);
 +              let mut events = nodes[0].node.get_and_clear_pending_msg_events();
 +              assert_eq!(events.len(), 1);
 +              SendEvent::from_event(events.remove(0))
 +      };
 +      nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
 +      check_added_monitors!(nodes[1], 0);
 +      commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
 +      expect_pending_htlcs_forwardable!(nodes[1]);
 +      let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events_2.len(), 1);
 +      check_added_monitors!(nodes[1], 1);
 +      payment_event = SendEvent::from_event(events_2.remove(0));
 +      assert_eq!(payment_event.msgs.len(), 1);
 +
 +      //Second Hop
 +      payment_event.msgs[0].onion_routing_packet.version = 1; //Produce a malformed HTLC message
 +      nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
 +      check_added_monitors!(nodes[2], 0);
 +      commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false, true);
 +
 +      let events_3 = nodes[2].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events_3.len(), 1);
 +      let update_msg : (msgs::UpdateFailMalformedHTLC, msgs::CommitmentSigned) = {
 +              match events_3[0] {
 +                      MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
 +                              assert!(update_add_htlcs.is_empty());
 +                              assert!(update_fulfill_htlcs.is_empty());
 +                              assert!(update_fail_htlcs.is_empty());
 +                              assert_eq!(update_fail_malformed_htlcs.len(), 1);
 +                              assert!(update_fee.is_none());
 +                              (update_fail_malformed_htlcs[0].clone(), commitment_signed.clone())
 +                      },
 +                      _ => panic!("Unexpected event"),
 +              }
 +      };
 +
 +      nodes[1].node.handle_update_fail_malformed_htlc(&nodes[2].node.get_our_node_id(), &update_msg.0).unwrap();
 +
 +      check_added_monitors!(nodes[1], 0);
 +      commitment_signed_dance!(nodes[1], nodes[2], update_msg.1, false, true);
 +      expect_pending_htlcs_forwardable!(nodes[1]);
 +      let events_4 = nodes[1].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events_4.len(), 1);
 +
 +      //Confirm that handlinge the update_malformed_htlc message produces an update_fail_htlc message to be forwarded back along the route
 +      match events_4[0] {
 +              MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
 +                      assert!(update_add_htlcs.is_empty());
 +                      assert!(update_fulfill_htlcs.is_empty());
 +                      assert_eq!(update_fail_htlcs.len(), 1);
 +                      assert!(update_fail_malformed_htlcs.is_empty());
 +                      assert!(update_fee.is_none());
 +              },
 +              _ => panic!("Unexpected event"),
 +      };
 +
 +      check_added_monitors!(nodes[1], 1);
 +}
 +
 +fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) {
 +      // Dust-HTLC failure updates must be delayed until failure-trigger tx (in this case local commitment) reach ANTI_REORG_DELAY
 +      // We can have at most two valid local commitment tx, so both cases must be covered, and both txs must be checked to get them all as
 +      // HTLC could have been removed from lastest local commitment tx but still valid until we get remote RAA
 +
 +      let nodes = create_network(2, &[None, None]);
 +      let chan =create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +
 +      let bs_dust_limit = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().our_dust_limit_satoshis;
 +
 +      // We route 2 dust-HTLCs between A and B
 +      let (_, payment_hash_1) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
 +      let (_, payment_hash_2) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
 +      route_payment(&nodes[0], &[&nodes[1]], 1000000);
 +
 +      // Cache one local commitment tx as previous
 +      let as_prev_commitment_tx = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn.clone();
 +
 +      // Fail one HTLC to prune it in the will-be-latest-local commitment tx
 +      assert!(nodes[1].node.fail_htlc_backwards(&payment_hash_2));
 +      check_added_monitors!(nodes[1], 0);
 +      expect_pending_htlcs_forwardable!(nodes[1]);
 +      check_added_monitors!(nodes[1], 1);
 +
 +      let remove = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +      nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &remove.update_fail_htlcs[0]).unwrap();
 +      nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &remove.commitment_signed).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +
 +      // Cache one local commitment tx as lastest
 +      let as_last_commitment_tx = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn.clone();
 +
 +      let events = nodes[0].node.get_and_clear_pending_msg_events();
 +      match events[0] {
 +              MessageSendEvent::SendRevokeAndACK { node_id, .. } => {
 +                      assert_eq!(node_id, nodes[1].node.get_our_node_id());
 +              },
 +              _ => panic!("Unexpected event"),
 +      }
 +      match events[1] {
 +              MessageSendEvent::UpdateHTLCs { node_id, .. } => {
 +                      assert_eq!(node_id, nodes[1].node.get_our_node_id());
 +              },
 +              _ => panic!("Unexpected event"),
 +      }
 +
 +      assert_ne!(as_prev_commitment_tx, as_last_commitment_tx);
 +      // Fail the 2 dust-HTLCs, move their failure in maturation buffer (htlc_updated_waiting_threshold_conf)
 +      let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +      if announce_latest {
 +              nodes[0].chain_monitor.block_connected_checked(&header, 1, &[&as_last_commitment_tx[0]], &[1; 1]);
 +      } else {
 +              nodes[0].chain_monitor.block_connected_checked(&header, 1, &[&as_prev_commitment_tx[0]], &[1; 1]);
 +      }
 +
 +      let events = nodes[0].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events.len(), 1);
 +      match events[0] {
 +              MessageSendEvent::BroadcastChannelUpdate { .. } => {},
 +              _ => panic!("Unexpected event"),
 +      }
 +
 +      assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
 +      connect_blocks(&nodes[0].chain_monitor, ANTI_REORG_DELAY - 1, 1, true,  header.bitcoin_hash());
 +      let events = nodes[0].node.get_and_clear_pending_events();
 +      // Only 2 PaymentFailed events should show up, over-dust HTLC has to be failed by timeout tx
 +      assert_eq!(events.len(), 2);
 +      let mut first_failed = false;
 +      for event in events {
 +              match event {
 +                      Event::PaymentFailed { payment_hash, .. } => {
 +                              if payment_hash == payment_hash_1 {
 +                                      assert!(!first_failed);
 +                                      first_failed = true;
 +                              } else {
 +                                      assert_eq!(payment_hash, payment_hash_2);
 +                              }
 +                      }
 +                      _ => panic!("Unexpected event"),
 +              }
 +      }
 +}
 +
 +#[test]
 +fn test_failure_delay_dust_htlc_local_commitment() {
 +      do_test_failure_delay_dust_htlc_local_commitment(true);
 +      do_test_failure_delay_dust_htlc_local_commitment(false);
 +}
 +
 +#[test]
 +fn test_no_failure_dust_htlc_local_commitment() {
 +      // Transaction filters for failing back dust htlc based on local commitment txn infos has been
 +      // prone to error, we test here that a dummy transaction don't fail them.
 +
 +      let nodes = create_network(2, &[None, None]);
 +      let chan = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +
 +      // Rebalance a bit
-       claim_payment(&nodes[0], &vec!(&nodes[1])[..], preimage_1);
-       claim_payment(&nodes[1], &vec!(&nodes[0])[..], preimage_2);
++      send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000, 8_000_000);
 +
 +      let as_dust_limit = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().our_dust_limit_satoshis;
 +      let bs_dust_limit = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().our_dust_limit_satoshis;
 +
 +      // We route 2 dust-HTLCs between A and B
 +      let (preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
 +      let (preimage_2, _) = route_payment(&nodes[1], &[&nodes[0]], as_dust_limit*1000);
 +
 +      // Build a dummy invalid transaction trying to spend a commitment tx
 +      let input = TxIn {
 +              previous_output: BitcoinOutPoint { txid: chan.3.txid(), vout: 0 },
 +              script_sig: Script::new(),
 +              sequence: 0,
 +              witness: Vec::new(),
 +      };
 +
 +      let outp = TxOut {
 +              script_pubkey: Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(),
 +              value: 10000,
 +      };
 +
 +      let dummy_tx = Transaction {
 +              version: 2,
 +              lock_time: 0,
 +              input: vec![input],
 +              output: vec![outp]
 +      };
 +
 +      let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +      nodes[0].chan_monitor.simple_monitor.block_connected(&header, 1, &[&dummy_tx], &[1;1]);
 +      assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
 +      assert_eq!(nodes[0].node.get_and_clear_pending_msg_events().len(), 0);
 +      // We broadcast a few more block to check everything is all right
 +      connect_blocks(&nodes[0].chain_monitor, 20, 1, true,  header.bitcoin_hash());
 +      assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
 +      assert_eq!(nodes[0].node.get_and_clear_pending_msg_events().len(), 0);
 +
-               claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3);
++      claim_payment(&nodes[0], &vec!(&nodes[1])[..], preimage_1, bs_dust_limit*1000);
++      claim_payment(&nodes[1], &vec!(&nodes[0])[..], preimage_2, as_dust_limit*1000);
 +}
 +
 +fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
 +      // Outbound HTLC-failure updates must be cancelled if we get a reorg before we reach ANTI_REORG_DELAY.
 +      // Broadcast of revoked remote commitment tx, trigger failure-update of dust/non-dust HTLCs
 +      // Broadcast of remote commitment tx, trigger failure-update of dust-HTLCs
 +      // Broadcast of timeout tx on remote commitment tx, trigger failure-udate of non-dust HTLCs
 +      // Broadcast of local commitment tx, trigger failure-update of dust-HTLCs
 +      // Broadcast of HTLC-timeout tx on local commitment tx, trigger failure-update of non-dust HTLCs
 +
 +      let nodes = create_network(3, &[None, None, None]);
 +      let chan = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +
 +      let bs_dust_limit = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().our_dust_limit_satoshis;
 +
 +      let (_payment_preimage_1, dust_hash) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
 +      let (_payment_preimage_2, non_dust_hash) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
 +
 +      let as_commitment_tx = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn.clone();
 +      let bs_commitment_tx = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn.clone();
 +
 +      // We revoked bs_commitment_tx
 +      if revoked {
 +              let (payment_preimage_3, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
-       send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
-       send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
++              claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3, 1_000_000);
 +      }
 +
 +      let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +      let mut timeout_tx = Vec::new();
 +      if local {
 +              // We fail dust-HTLC 1 by broadcast of local commitment tx
 +              nodes[0].chain_monitor.block_connected_checked(&header, 1, &[&as_commitment_tx[0]], &[1; 1]);
 +              let events = nodes[0].node.get_and_clear_pending_msg_events();
 +              assert_eq!(events.len(), 1);
 +              match events[0] {
 +                      MessageSendEvent::BroadcastChannelUpdate { .. } => {},
 +                      _ => panic!("Unexpected event"),
 +              }
 +              assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
 +              timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0].clone());
 +              let parent_hash  = connect_blocks(&nodes[0].chain_monitor, ANTI_REORG_DELAY - 1, 2, true, header.bitcoin_hash());
 +              let events = nodes[0].node.get_and_clear_pending_events();
 +              assert_eq!(events.len(), 1);
 +              match events[0] {
 +                      Event::PaymentFailed { payment_hash, .. } => {
 +                              assert_eq!(payment_hash, dust_hash);
 +                      },
 +                      _ => panic!("Unexpected event"),
 +              }
 +              assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
 +              // We fail non-dust-HTLC 2 by broadcast of local HTLC-timeout tx on local commitment tx
 +              let header_2 = BlockHeader { version: 0x20000000, prev_blockhash: parent_hash, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +              assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
 +              nodes[0].chain_monitor.block_connected_checked(&header_2, 7, &[&timeout_tx[0]], &[1; 1]);
 +              let header_3 = BlockHeader { version: 0x20000000, prev_blockhash: header_2.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +              connect_blocks(&nodes[0].chain_monitor, ANTI_REORG_DELAY - 1, 8, true, header_3.bitcoin_hash());
 +              let events = nodes[0].node.get_and_clear_pending_events();
 +              assert_eq!(events.len(), 1);
 +              match events[0] {
 +                      Event::PaymentFailed { payment_hash, .. } => {
 +                              assert_eq!(payment_hash, non_dust_hash);
 +                      },
 +                      _ => panic!("Unexpected event"),
 +              }
 +      } else {
 +              // We fail dust-HTLC 1 by broadcast of remote commitment tx. If revoked, fail also non-dust HTLC
 +              nodes[0].chain_monitor.block_connected_checked(&header, 1, &[&bs_commitment_tx[0]], &[1; 1]);
 +              assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
 +              let events = nodes[0].node.get_and_clear_pending_msg_events();
 +              assert_eq!(events.len(), 1);
 +              match events[0] {
 +                      MessageSendEvent::BroadcastChannelUpdate { .. } => {},
 +                      _ => panic!("Unexpected event"),
 +              }
 +              timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0].clone());
 +              let parent_hash  = connect_blocks(&nodes[0].chain_monitor, ANTI_REORG_DELAY - 1, 2, true, header.bitcoin_hash());
 +              let header_2 = BlockHeader { version: 0x20000000, prev_blockhash: parent_hash, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +              if !revoked {
 +                      let events = nodes[0].node.get_and_clear_pending_events();
 +                      assert_eq!(events.len(), 1);
 +                      match events[0] {
 +                              Event::PaymentFailed { payment_hash, .. } => {
 +                                      assert_eq!(payment_hash, dust_hash);
 +                              },
 +                              _ => panic!("Unexpected event"),
 +                      }
 +                      assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
 +                      // We fail non-dust-HTLC 2 by broadcast of local timeout tx on remote commitment tx
 +                      nodes[0].chain_monitor.block_connected_checked(&header_2, 7, &[&timeout_tx[0]], &[1; 1]);
 +                      assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
 +                      let header_3 = BlockHeader { version: 0x20000000, prev_blockhash: header_2.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +                      connect_blocks(&nodes[0].chain_monitor, ANTI_REORG_DELAY - 1, 8, true, header_3.bitcoin_hash());
 +                      let events = nodes[0].node.get_and_clear_pending_events();
 +                      assert_eq!(events.len(), 1);
 +                      match events[0] {
 +                              Event::PaymentFailed { payment_hash, .. } => {
 +                                      assert_eq!(payment_hash, non_dust_hash);
 +                              },
 +                              _ => panic!("Unexpected event"),
 +                      }
 +              } else {
 +                      // If revoked, both dust & non-dust HTLCs should have been failed after ANTI_REORG_DELAY confs of revoked
 +                      // commitment tx
 +                      let events = nodes[0].node.get_and_clear_pending_events();
 +                      assert_eq!(events.len(), 2);
 +                      let first;
 +                      match events[0] {
 +                              Event::PaymentFailed { payment_hash, .. } => {
 +                                      if payment_hash == dust_hash { first = true; }
 +                                      else { first = false; }
 +                              },
 +                              _ => panic!("Unexpected event"),
 +                      }
 +                      match events[1] {
 +                              Event::PaymentFailed { payment_hash, .. } => {
 +                                      if first { assert_eq!(payment_hash, non_dust_hash); }
 +                                      else { assert_eq!(payment_hash, dust_hash); }
 +                              },
 +                              _ => panic!("Unexpected event"),
 +                      }
 +              }
 +      }
 +}
 +
 +#[test]
 +fn test_sweep_outbound_htlc_failure_update() {
 +      do_test_sweep_outbound_htlc_failure_update(false, true);
 +      do_test_sweep_outbound_htlc_failure_update(false, false);
 +      do_test_sweep_outbound_htlc_failure_update(true, false);
 +}
 +
 +#[test]
 +fn test_upfront_shutdown_script() {
 +      // BOLT 2 : Option upfront shutdown script, if peer commit its closing_script at channel opening
 +      // enforce it at shutdown message
 +
 +      let mut config = UserConfig::new();
 +      config.channel_options.announced_channel = true;
 +      config.peer_channel_config_limits.force_announced_channel_preference = false;
 +      config.channel_options.commit_upfront_shutdown_pubkey = false;
 +      let nodes = create_network(3, &[None, Some(config), None]);
 +
 +      // We test that in case of peer committing upfront to a script, if it changes at closing, we refuse to sign
 +      let flags = LocalFeatures::new();
 +      let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1000000, 1000000, flags.clone(), flags.clone());
 +      nodes[0].node.close_channel(&OutPoint::new(chan.3.txid(), 0).to_channel_id()).unwrap();
 +      let mut node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[2].node.get_our_node_id());
 +      node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh();
 +      // Test we enforce upfront_scriptpbukey if by providing a diffrent one at closing that  we disconnect peer
 +      if let Err(error) = nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown) {
 +              match error.action {
 +                      ErrorAction::SendErrorMessage { msg } => {
 +                              assert_eq!(msg.data,"Got shutdown request with a scriptpubkey which did not match their previous scriptpubkey");
 +                      },
 +                      _ => { assert!(false); }
 +              }
 +      } else { assert!(false); }
 +      let events = nodes[2].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events.len(), 1);
 +      match events[0] {
 +              MessageSendEvent::BroadcastChannelUpdate { .. } => {},
 +              _ => panic!("Unexpected event"),
 +      }
 +
 +      // We test that in case of peer committing upfront to a script, if it doesn't change at closing, we sign
 +      let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1000000, 1000000, flags.clone(), flags.clone());
 +      nodes[0].node.close_channel(&OutPoint::new(chan.3.txid(), 0).to_channel_id()).unwrap();
 +      let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[2].node.get_our_node_id());
 +      // We test that in case of peer committing upfront to a script, if it oesn't change at closing, we sign
 +      if let Ok(_) = nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown) {}
 +      else { assert!(false) }
 +      let events = nodes[2].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events.len(), 1);
 +      match events[0] {
 +              MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[0].node.get_our_node_id()) }
 +              _ => panic!("Unexpected event"),
 +      }
 +
 +      // We test that if case of peer non-signaling we don't enforce committed script at channel opening
 +      let mut flags_no = LocalFeatures::new();
 +      flags_no.unset_upfront_shutdown_script();
 +      let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, flags_no, flags.clone());
 +      nodes[0].node.close_channel(&OutPoint::new(chan.3.txid(), 0).to_channel_id()).unwrap();
 +      let mut node_1_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
 +      node_1_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh();
 +      if let Ok(_) = nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_1_shutdown) {}
 +      else { assert!(false) }
 +      let events = nodes[1].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events.len(), 1);
 +      match events[0] {
 +              MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[0].node.get_our_node_id()) }
 +              _ => panic!("Unexpected event"),
 +      }
 +
 +      // We test that if user opt-out, we provide a zero-length script at channel opening and we are able to close
 +      // channel smoothly, opt-out is from channel initiator here
 +      let chan = create_announced_chan_between_nodes_with_value(&nodes, 1, 0, 1000000, 1000000, flags.clone(), flags.clone());
 +      nodes[1].node.close_channel(&OutPoint::new(chan.3.txid(), 0).to_channel_id()).unwrap();
 +      let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
 +      node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh();
 +      if let Ok(_) = nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_0_shutdown) {}
 +      else { assert!(false) }
 +      let events = nodes[0].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events.len(), 1);
 +      match events[0] {
 +              MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) }
 +              _ => panic!("Unexpected event"),
 +      }
 +
 +      //// We test that if user opt-out, we provide a zero-length script at channel opening and we are able to close
 +      //// channel smoothly
 +      let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, flags.clone(), flags.clone());
 +      nodes[1].node.close_channel(&OutPoint::new(chan.3.txid(), 0).to_channel_id()).unwrap();
 +      let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
 +      node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh();
 +      if let Ok(_) = nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_0_shutdown) {}
 +      else { assert!(false) }
 +      let events = nodes[0].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events.len(), 2);
 +      match events[0] {
 +              MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) }
 +              _ => panic!("Unexpected event"),
 +      }
 +      match events[1] {
 +              MessageSendEvent::SendClosingSigned { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) }
 +              _ => panic!("Unexpected event"),
 +      }
 +}
 +
 +#[test]
 +fn test_user_configurable_csv_delay() {
 +      // We test our channel constructors yield errors when we pass them absurd csv delay
 +
 +      let mut low_our_to_self_config = UserConfig::new();
 +      low_our_to_self_config.own_channel_config.our_to_self_delay = 6;
 +      let mut high_their_to_self_config = UserConfig::new();
 +      high_their_to_self_config.peer_channel_config_limits.their_to_self_delay = 100;
 +      let nodes = create_network(2, &[Some(high_their_to_self_config.clone()), None]);
 +
 +      // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in Channel::new_outbound()
 +      let keys_manager: Arc<KeysInterface> = Arc::new(KeysManager::new(&nodes[0].node_seed, Network::Testnet, Arc::new(test_utils::TestLogger::new()), 10, 20));
 +      if let Err(error) = Channel::new_outbound(&test_utils::TestFeeEstimator { sat_per_kw: 253 }, &keys_manager, nodes[1].node.get_our_node_id(), 1000000, 1000000, 0, Arc::new(test_utils::TestLogger::new()), &low_our_to_self_config) {
 +              match error {
 +                      APIError::APIMisuseError { err } => { assert_eq!(err, "Configured with an unreasonable our_to_self_delay putting user funds at risks"); },
 +                      _ => panic!("Unexpected event"),
 +              }
 +      } else { assert!(false) }
 +
 +      // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in Channel::new_from_req()
 +      nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42).unwrap();
 +      let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
 +      open_channel.to_self_delay = 200;
 +      if let Err(error) = Channel::new_from_req(&test_utils::TestFeeEstimator { sat_per_kw: 253 }, &keys_manager, nodes[1].node.get_our_node_id(), LocalFeatures::new(), &open_channel, 0, Arc::new(test_utils::TestLogger::new()), &low_our_to_self_config) {
 +              match error {
 +                      ChannelError::Close(err) => { assert_eq!(err, "Configured with an unreasonable our_to_self_delay putting user funds at risks"); },
 +                      _ => panic!("Unexpected event"),
 +              }
 +      } else { assert!(false); }
 +
 +      // We test msg.to_self_delay <= config.their_to_self_delay is enforced in Chanel::accept_channel()
 +      nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1000000, 1000000, 42).unwrap();
 +      nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), LocalFeatures::new(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id())).unwrap();
 +      let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
 +      accept_channel.to_self_delay = 200;
 +      if let Err(error) = nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), LocalFeatures::new(), &accept_channel) {
 +              match error.action {
 +                      ErrorAction::SendErrorMessage { msg } => {
 +                              assert_eq!(msg.data,"They wanted our payments to be delayed by a needlessly long period");
 +                      },
 +                      _ => { assert!(false); }
 +              }
 +      } else { assert!(false); }
 +
 +      // We test msg.to_self_delay <= config.their_to_self_delay is enforced in Channel::new_from_req()
 +      nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42).unwrap();
 +      let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
 +      open_channel.to_self_delay = 200;
 +      if let Err(error) = Channel::new_from_req(&test_utils::TestFeeEstimator { sat_per_kw: 253 }, &keys_manager, nodes[1].node.get_our_node_id(), LocalFeatures::new(), &open_channel, 0, Arc::new(test_utils::TestLogger::new()), &high_their_to_self_config) {
 +              match error {
 +                      ChannelError::Close(err) => { assert_eq!(err, "They wanted our payments to be delayed by a needlessly long period"); },
 +                      _ => panic!("Unexpected event"),
 +              }
 +      } else { assert!(false); }
 +}
 +
 +#[test]
 +fn test_data_loss_protect() {
 +      // We want to be sure that :
 +      // * we don't broadcast our Local Commitment Tx in case of fallen behind
 +      // * we close channel in case of detecting other being fallen behind
 +      // * we are able to claim our own outputs thanks to remote my_current_per_commitment_point
 +      let mut nodes = create_network(2, &[None, None]);
 +
 +      let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, LocalFeatures::new(), LocalFeatures::new());
 +
 +      // Cache node A state before any channel update
 +      let previous_node_state = nodes[0].node.encode();
 +      let mut previous_chan_monitor_state = test_utils::TestVecWriter(Vec::new());
 +      nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut previous_chan_monitor_state).unwrap();
 +
++      send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000, 8_000_000);
++      send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000, 8_000_000);
 +
 +      nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
 +      nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 +
 +      // Restore node A from previous state
 +      let logger: Arc<Logger> = Arc::new(test_utils::TestLogger::with_id(format!("node {}", 0)));
 +      let chan_monitor = <(Sha256dHash, ChannelMonitor)>::read(&mut ::std::io::Cursor::new(previous_chan_monitor_state.0), Arc::clone(&logger)).unwrap().1;
 +      let chain_monitor = Arc::new(ChainWatchInterfaceUtil::new(Network::Testnet, Arc::clone(&logger)));
 +      let tx_broadcaster = Arc::new(test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new())});
 +      let feeest = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 });
 +      let monitor = Arc::new(test_utils::TestChannelMonitor::new(chain_monitor.clone(), tx_broadcaster.clone(), logger.clone(), feeest.clone()));
 +      let mut channel_monitors = HashMap::new();
 +      channel_monitors.insert(OutPoint { txid: chan.3.txid(), index: 0 }, &chan_monitor);
 +      let node_state_0 = <(Sha256dHash, ChannelManager)>::read(&mut ::std::io::Cursor::new(previous_node_state), ChannelManagerReadArgs {
 +              keys_manager: Arc::new(keysinterface::KeysManager::new(&nodes[0].node_seed, Network::Testnet, Arc::clone(&logger), 42, 21)),
 +              fee_estimator: feeest.clone(),
 +              monitor: monitor.clone(),
 +              chain_monitor: chain_monitor.clone(),
 +              logger: Arc::clone(&logger),
 +              tx_broadcaster,
 +              default_config: UserConfig::new(),
 +              channel_monitors: &channel_monitors
 +      }).unwrap().1;
 +      nodes[0].node = Arc::new(node_state_0);
 +      monitor.add_update_monitor(OutPoint { txid: chan.3.txid(), index: 0 }, chan_monitor.clone()).is_ok();
 +      nodes[0].chan_monitor = monitor;
 +      nodes[0].chain_monitor = chain_monitor;
 +      check_added_monitors!(nodes[0], 1);
 +
 +      nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
 +      nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
 +
 +      let reestablish_0 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
 +
 +      // Check we update monitor following learning of per_commitment_point from B
 +      if let Err(err) = nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_0[0])  {
 +              match err.action {
 +                      ErrorAction::SendErrorMessage { msg } => {
 +                              assert_eq!(msg.data, "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can't do any automated broadcasting");
 +                      },
 +                      _ => panic!("Unexpected event!"),
 +              }
 +      } else { assert!(false); }
 +      check_added_monitors!(nodes[0], 1);
 +
 +      {
 +              let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
 +              assert_eq!(node_txn.len(), 0);
 +      }
 +
 +      let mut reestablish_1 = Vec::with_capacity(1);
 +      for msg in nodes[0].node.get_and_clear_pending_msg_events() {
 +              if let MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } = msg {
 +                      assert_eq!(*node_id, nodes[1].node.get_our_node_id());
 +                      reestablish_1.push(msg.clone());
 +              } else if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg {
 +              } else {
 +                      panic!("Unexpected event")
 +              }
 +      }
 +
 +      // Check we close channel detecting A is fallen-behind
 +      if let Err(err) = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]) {
 +              match err.action {
 +                      ErrorAction::SendErrorMessage { msg } => {
 +                              assert_eq!(msg.data, "Peer attempted to reestablish channel with a very old local commitment transaction"); },
 +                      _ => panic!("Unexpected event!"),
 +              }
 +      } else { assert!(false); }
 +
 +      let events = nodes[1].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events.len(), 1);
 +      match events[0] {
 +              MessageSendEvent::BroadcastChannelUpdate { .. } => {},
 +              _ => panic!("Unexpected event"),
 +      }
 +
 +      // Check A is able to claim to_remote output
 +      let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
 +      assert_eq!(node_txn.len(), 1);
 +      check_spends!(node_txn[0], chan.3.clone());
 +      assert_eq!(node_txn[0].output.len(), 2);
 +      let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
 +      nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn[0].clone()]}, 1);
 +      let spend_txn = check_spendable_outputs!(nodes[0], 1);
 +      assert_eq!(spend_txn.len(), 1);
 +      check_spends!(spend_txn[0], node_txn[0].clone());
 +}
++
++#[test]
++fn test_check_htlc_underpaying() {
++      // Send payment through A -> B but A is maliciously
++      // sending a probe payment (i.e less than expected value0
++      // to B, B should refuse payment.
++
++      let nodes = create_network(2, &[None, None, None]);
++
++      // Create some initial channels
++      create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
++
++      let (payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1]], 10_000);
++
++      // Node 3 is expecting payment of 100_000 but receive 10_000,
++      // fail htlc like we didn't know the preimage.
++      nodes[1].node.claim_funds(payment_preimage, 100_000);
++      nodes[1].node.process_pending_htlc_forwards();
++
++      let events = nodes[1].node.get_and_clear_pending_msg_events();
++      assert_eq!(events.len(), 1);
++      let (update_fail_htlc, commitment_signed) = match events[0] {
++              MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
++                      assert!(update_add_htlcs.is_empty());
++                      assert!(update_fulfill_htlcs.is_empty());
++                      assert_eq!(update_fail_htlcs.len(), 1);
++                      assert!(update_fail_malformed_htlcs.is_empty());
++                      assert!(update_fee.is_none());
++                      (update_fail_htlcs[0].clone(), commitment_signed)
++              },
++              _ => panic!("Unexpected event"),
++      };
++      check_added_monitors!(nodes[1], 1);
++
++      nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlc).unwrap();
++      commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true);
++
++      let events = nodes[0].node.get_and_clear_pending_events();
++      assert_eq!(events.len(), 1);
++      if let &Event::PaymentFailed { payment_hash:_, ref rejected_by_dest, ref error_code } = &events[0] {
++              assert_eq!(*rejected_by_dest, true);
++              assert_eq!(error_code.unwrap(), 0x4000|15);
++      } else {
++              panic!("Unexpected event");
++      }
++      nodes[1].node.get_and_clear_pending_events();
++}