Merge pull request #310 from ariard/2019-02-clarify-send-htlc-policy
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Fri, 2 Aug 2019 19:30:41 +0000 (19:30 +0000)
committerGitHub <noreply@github.com>
Fri, 2 Aug 2019 19:30:41 +0000 (19:30 +0000)
Clarify policy applied in send htlc error msgs

1  2 
src/ln/channel.rs
src/ln/functional_test_utils.rs
src/ln/functional_tests.rs

diff --combined src/ln/channel.rs
index 5abbd4a29d0ea71cfa69b7b60363c936ccffea29,07fb3474a5321419db09ca222fd1f10dea6d23df..9dfa4c459714e600f7a9387f7acc91c28b36b70c
@@@ -2,23 -2,22 +2,23 @@@ use bitcoin::blockdata::block::BlockHea
  use bitcoin::blockdata::script::{Script,Builder};
  use bitcoin::blockdata::transaction::{TxIn, TxOut, Transaction, SigHashType};
  use bitcoin::blockdata::opcodes;
 -use bitcoin::util::hash::{BitcoinHash, Sha256dHash};
 +use bitcoin::util::hash::BitcoinHash;
  use bitcoin::util::bip143;
  use bitcoin::consensus::encode::{self, Encodable, Decodable};
  
  use bitcoin_hashes::{Hash, HashEngine};
  use bitcoin_hashes::sha256::Hash as Sha256;
  use bitcoin_hashes::hash160::Hash as Hash160;
 +use bitcoin_hashes::sha256d::Hash as Sha256dHash;
  
  use secp256k1::key::{PublicKey,SecretKey};
  use secp256k1::{Secp256k1,Signature};
  use secp256k1;
  
  use ln::msgs;
 -use ln::msgs::{DecodeError, OptionalField};
 +use ln::msgs::{DecodeError, OptionalField, LocalFeatures};
  use ln::channelmonitor::ChannelMonitor;
 -use ln::channelmanager::{PendingHTLCStatus, HTLCSource, HTLCFailReason, HTLCFailureMsg, PendingForwardHTLCInfo, RAACommitmentOrder, PaymentPreimage, PaymentHash};
 +use ln::channelmanager::{PendingHTLCStatus, HTLCSource, HTLCFailReason, HTLCFailureMsg, PendingForwardHTLCInfo, RAACommitmentOrder, PaymentPreimage, PaymentHash, BREAKDOWN_TIMEOUT, MAX_LOCAL_BREAKDOWN_TIMEOUT};
  use ln::chan_utils::{TxCreationKeys,HTLCOutputInCommitment,HTLC_SUCCESS_TX_WEIGHT,HTLC_TIMEOUT_TX_WEIGHT};
  use ln::chan_utils;
  use chain::chaininterface::{FeeEstimator,ConfirmationTarget};
@@@ -33,6 -32,7 +33,6 @@@ use util::config::{UserConfig,ChannelCo
  use std;
  use std::default::Default;
  use std::{cmp,mem};
 -use std::time::Instant;
  use std::sync::{Arc};
  
  #[cfg(test)]
@@@ -106,19 -106,19 +106,19 @@@ enum OutboundHTLCState 
        Committed,
        /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
        /// the change (though they'll need to revoke before we fail the payment).
 -      RemoteRemoved,
 +      RemoteRemoved(Option<HTLCFailReason>),
        /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
        /// the remote side hasn't yet revoked their previous state, which we need them to do before we
        /// can do any backwards failing. Implies AwaitingRemoteRevoke.
        /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
        /// remote revoke_and_ack on a previous state before we can do so.
 -      AwaitingRemoteRevokeToRemove,
 +      AwaitingRemoteRevokeToRemove(Option<HTLCFailReason>),
        /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
        /// the remote side hasn't yet revoked their previous state, which we need them to do before we
        /// can do any backwards failing. Implies AwaitingRemoteRevoke.
        /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
        /// revoke_and_ack to drop completely.
 -      AwaitingRemovedRemoteRevoke,
 +      AwaitingRemovedRemoteRevoke(Option<HTLCFailReason>),
  }
  
  struct OutboundHTLCOutput {
        payment_hash: PaymentHash,
        state: OutboundHTLCState,
        source: HTLCSource,
 -      /// If we're in a removed state, set if they failed, otherwise None
 -      fail_reason: Option<HTLCFailReason>,
  }
  
  /// See AwaitingRemoteRevoke ChannelState for more info
  enum HTLCUpdateAwaitingACK {
 -      AddHTLC {
 +      AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
                // always outbound
                amount_msat: u64,
                cltv_expiry: u32,
                payment_hash: PaymentHash,
                source: HTLCSource,
                onion_routing_packet: msgs::OnionPacket,
 -              time_created: Instant, //TODO: Some kind of timeout thing-a-majig
        },
        ClaimHTLC {
                payment_preimage: PaymentPreimage,
@@@ -181,9 -184,9 +181,9 @@@ enum ChannelState 
        /// "disconnected" and no updates are allowed until after we've done a channel_reestablish
        /// dance.
        PeerDisconnected = (1 << 7),
 -      /// Flag which is set on ChannelFunded and FundingSent indicating the user has told us they
 -      /// failed to update our ChannelMonitor somewhere and we should pause sending any outbound
 -      /// messages until they've managed to do so.
 +      /// Flag which is set on ChannelFunded, FundingCreated, and FundingSent indicating the user has
 +      /// told us they failed to update our ChannelMonitor somewhere and we should pause sending any
 +      /// outbound messages until they've managed to do so.
        MonitorUpdateFailed = (1 << 8),
        /// Flag which implies that we have sent a commitment_signed but are awaiting the responding
        /// revoke_and_ack message. During this time period, we can't generate new commitment_signed
@@@ -235,22 -238,19 +235,22 @@@ pub(super) struct Channel 
        cur_local_commitment_transaction_number: u64,
        cur_remote_commitment_transaction_number: u64,
        value_to_self_msat: u64, // Excluding all pending_htlcs, excluding fees
 -      /// Upon receipt of a channel_reestablish we have to figure out whether to send a
 -      /// revoke_and_ack first or a commitment update first. Generally, we prefer to send
 -      /// revoke_and_ack first, but if we had a pending commitment update of our own waiting on a
 -      /// remote revoke when we received the latest commitment update from the remote we have to make
 -      /// sure that commitment update gets resent first.
 -      received_commitment_while_awaiting_raa: bool,
        pending_inbound_htlcs: Vec<InboundHTLCOutput>,
        pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
        holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
  
 +      /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
 +      /// need to ensure we resend them in the order we originally generated them. Note that because
 +      /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
 +      /// sufficient to simply set this to the opposite of any message we are generating as we
 +      /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
 +      /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
 +      /// send it first.
 +      resend_order: RAACommitmentOrder,
 +
 +      monitor_pending_funding_locked: bool,
        monitor_pending_revoke_and_ack: bool,
        monitor_pending_commitment_signed: bool,
 -      monitor_pending_order: Option<RAACommitmentOrder>,
        monitor_pending_forwards: Vec<(PendingForwardHTLCInfo, u64)>,
        monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
  
        their_htlc_minimum_msat: u64,
        our_htlc_minimum_msat: u64,
        their_to_self_delay: u16,
 -      //implied by BREAKDOWN_TIMEOUT: our_to_self_delay: u16,
 +      our_to_self_delay: u16,
        #[cfg(test)]
        pub their_max_accepted_htlcs: u16,
        #[cfg(not(test))]
@@@ -348,6 -348,14 +348,6 @@@ pub const OUR_MAX_HTLCS: u16 = 50; //TO
  /// on ice until the funding transaction gets more confirmations, but the LN protocol doesn't
  /// really allow for this, so instead we're stuck closing it out at that point.
  const UNCONF_THRESHOLD: u32 = 6;
 -/// The amount of time we require our counterparty wait to claim their money (ie time between when
 -/// we, or our watchtower, must check for them having broadcast a theft transaction).
 -#[cfg(not(test))]
 -const BREAKDOWN_TIMEOUT: u16 = 6 * 24 * 7; //TODO?
 -#[cfg(test)]
 -pub const BREAKDOWN_TIMEOUT: u16 = 6 * 24 * 7; //TODO?
 -/// The amount of time we're willing to wait to claim money back to us
 -const MAX_LOCAL_BREAKDOWN_TIMEOUT: u16 = 6 * 24 * 14;
  /// Exposing these two constants for use in test in ChannelMonitor
  pub const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
  pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
@@@ -403,6 -411,13 +403,6 @@@ impl Channel 
                1000 // TODO
        }
  
 -      fn derive_minimum_depth(_channel_value_satoshis_msat: u64, _value_to_self_msat: u64) -> u32 {
 -              // Note that in order to comply with BOLT 7 announcement_signatures requirements this must
 -              // be at least 6.
 -              const CONF_TARGET: u32 = 12; //TODO: Should be much higher
 -              CONF_TARGET
 -      }
 -
        // Constructors:
        pub fn new_outbound(fee_estimator: &FeeEstimator, keys_provider: &Arc<KeysInterface>, their_node_id: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_id: u64, logger: Arc<Logger>, config: &UserConfig) -> Result<Channel, APIError> {
                let chan_keys = keys_provider.get_channel_keys(false);
                if push_msat > channel_value_satoshis * 1000 {
                        return Err(APIError::APIMisuseError{err: "push value > channel value"});
                }
 +              if config.own_channel_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
 +                      return Err(APIError::APIMisuseError{err: "Configured with an unreasonable our_to_self_delay putting user funds at risks"});
 +              }
  
  
                let background_feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Background);
  
                let secp_ctx = Secp256k1::new();
                let channel_monitor = ChannelMonitor::new(&chan_keys.revocation_base_key, &chan_keys.delayed_payment_base_key,
 -                                                        &chan_keys.htlc_base_key, &chan_keys.payment_base_key, &keys_provider.get_shutdown_pubkey(), BREAKDOWN_TIMEOUT,
 +                                                        &chan_keys.htlc_base_key, &chan_keys.payment_base_key, &keys_provider.get_shutdown_pubkey(), config.own_channel_config.our_to_self_delay,
                                                          keys_provider.get_destination_script(), logger.clone());
  
                Ok(Channel {
                        cur_local_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
                        cur_remote_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
                        value_to_self_msat: channel_value_satoshis * 1000 - push_msat,
 -                      received_commitment_while_awaiting_raa: false,
  
                        pending_inbound_htlcs: Vec::new(),
                        pending_outbound_htlcs: Vec::new(),
                        next_remote_htlc_id: 0,
                        channel_update_count: 1,
  
 +                      resend_order: RAACommitmentOrder::CommitmentFirst,
 +
 +                      monitor_pending_funding_locked: false,
                        monitor_pending_revoke_and_ack: false,
                        monitor_pending_commitment_signed: false,
 -                      monitor_pending_order: None,
                        monitor_pending_forwards: Vec::new(),
                        monitor_pending_failures: Vec::new(),
  
                        their_htlc_minimum_msat: 0,
                        our_htlc_minimum_msat: Channel::derive_our_htlc_minimum_msat(feerate),
                        their_to_self_delay: 0,
 +                      our_to_self_delay: config.own_channel_config.our_to_self_delay,
                        their_max_accepted_htlcs: 0,
                        minimum_depth: 0, // Filled in in accept_channel
  
  
        /// Creates a new channel from a remote sides' request for one.
        /// Assumes chain_hash has already been checked and corresponds with what we expect!
 -      pub fn new_from_req(fee_estimator: &FeeEstimator, keys_provider: &Arc<KeysInterface>, their_node_id: PublicKey, msg: &msgs::OpenChannel, user_id: u64, logger: Arc<Logger>, config: &UserConfig) -> Result<Channel, ChannelError> {
 +      pub fn new_from_req(fee_estimator: &FeeEstimator, keys_provider: &Arc<KeysInterface>, their_node_id: PublicKey, their_local_features: LocalFeatures, msg: &msgs::OpenChannel, user_id: u64, logger: Arc<Logger>, config: &UserConfig) -> Result<Channel, ChannelError> {
                let chan_keys = keys_provider.get_channel_keys(true);
                let mut local_config = (*config).channel_options.clone();
  
 +              if config.own_channel_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
 +                      return Err(ChannelError::Close("Configured with an unreasonable our_to_self_delay putting user funds at risks"));
 +              }
 +
                // Check sanity of message fields:
                if msg.funding_satoshis >= MAX_FUNDING_SATOSHIS {
                        return Err(ChannelError::Close("funding value > 2^24"));
                }
                Channel::check_remote_fee(fee_estimator, msg.feerate_per_kw)?;
  
 -              if msg.to_self_delay > MAX_LOCAL_BREAKDOWN_TIMEOUT {
 +              if msg.to_self_delay > config.peer_channel_config_limits.their_to_self_delay || msg.to_self_delay > MAX_LOCAL_BREAKDOWN_TIMEOUT {
                        return Err(ChannelError::Close("They wanted our payments to be delayed by a needlessly long period"));
                }
                if msg.max_accepted_htlcs < 1 {
                }
  
                // Now check against optional parameters as set by config...
 -              if msg.funding_satoshis < config.channel_limits.min_funding_satoshis {
 +              if msg.funding_satoshis < config.peer_channel_config_limits.min_funding_satoshis {
                        return Err(ChannelError::Close("funding satoshis is less than the user specified limit"));
                }
 -              if msg.htlc_minimum_msat > config.channel_limits.max_htlc_minimum_msat {
 +              if msg.htlc_minimum_msat > config.peer_channel_config_limits.max_htlc_minimum_msat {
                        return Err(ChannelError::Close("htlc minimum msat is higher than the user specified limit"));
                }
 -              if msg.max_htlc_value_in_flight_msat < config.channel_limits.min_max_htlc_value_in_flight_msat {
 +              if msg.max_htlc_value_in_flight_msat < config.peer_channel_config_limits.min_max_htlc_value_in_flight_msat {
                        return Err(ChannelError::Close("max htlc value in flight msat is less than the user specified limit"));
                }
 -              if msg.channel_reserve_satoshis > config.channel_limits.max_channel_reserve_satoshis {
 +              if msg.channel_reserve_satoshis > config.peer_channel_config_limits.max_channel_reserve_satoshis {
                        return Err(ChannelError::Close("channel reserve satoshis is higher than the user specified limit"));
                }
 -              if msg.max_accepted_htlcs < config.channel_limits.min_max_accepted_htlcs {
 +              if msg.max_accepted_htlcs < config.peer_channel_config_limits.min_max_accepted_htlcs {
                        return Err(ChannelError::Close("max accepted htlcs is less than the user specified limit"));
                }
 -              if msg.dust_limit_satoshis < config.channel_limits.min_dust_limit_satoshis {
 +              if msg.dust_limit_satoshis < config.peer_channel_config_limits.min_dust_limit_satoshis {
                        return Err(ChannelError::Close("dust limit satoshis is less than the user specified limit"));
                }
 -              if msg.dust_limit_satoshis > config.channel_limits.max_dust_limit_satoshis {
 +              if msg.dust_limit_satoshis > config.peer_channel_config_limits.max_dust_limit_satoshis {
                        return Err(ChannelError::Close("dust limit satoshis is greater than the user specified limit"));
                }
  
                // Convert things into internal flags and prep our state:
  
                let their_announce = if (msg.channel_flags & 1) == 1 { true } else { false };
 -              if config.channel_limits.force_announced_channel_preference {
 +              if config.peer_channel_config_limits.force_announced_channel_preference {
                        if local_config.announced_channel != their_announce {
                                return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours"));
                        }
  
                let secp_ctx = Secp256k1::new();
                let mut channel_monitor = ChannelMonitor::new(&chan_keys.revocation_base_key, &chan_keys.delayed_payment_base_key,
 -                                                            &chan_keys.htlc_base_key, &chan_keys.payment_base_key, &keys_provider.get_shutdown_pubkey(), BREAKDOWN_TIMEOUT,
 +                                                            &chan_keys.htlc_base_key, &chan_keys.payment_base_key, &keys_provider.get_shutdown_pubkey(), config.own_channel_config.our_to_self_delay,
                                                              keys_provider.get_destination_script(), logger.clone());
                channel_monitor.set_their_base_keys(&msg.htlc_basepoint, &msg.delayed_payment_basepoint);
                channel_monitor.set_their_to_self_delay(msg.to_self_delay);
  
 +              let their_shutdown_scriptpubkey = if their_local_features.supports_upfront_shutdown_script() {
 +                      match &msg.shutdown_scriptpubkey {
 +                              &OptionalField::Present(ref script) => {
 +                                      // Peer is signaling upfront_shutdown and has provided a non-accepted scriptpubkey format. We enforce it while receiving shutdown msg
 +                                      if script.is_p2pkh() || script.is_p2sh() || script.is_v0_p2wsh() || script.is_v0_p2wpkh() {
 +                                              Some(script.clone())
 +                                      // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
 +                                      } else if script.len() == 0 {
 +                                              None
 +                                      // Peer is signaling upfront_shutdown and has provided a non-accepted scriptpubkey format. Fail the channel
 +                                      } else {
 +                                              return Err(ChannelError::Close("Peer is signaling upfront_shutdown but has provided a non-accepted scriptpubkey format"));
 +                                      }
 +                              },
 +                              // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
 +                              &OptionalField::Absent => {
 +                                      return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out"));
 +                              }
 +                      }
 +              } else { None };
 +
                let mut chan = Channel {
                        user_id: user_id,
                        config: local_config,
                        cur_local_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
                        cur_remote_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
                        value_to_self_msat: msg.push_msat,
 -                      received_commitment_while_awaiting_raa: false,
  
                        pending_inbound_htlcs: Vec::new(),
                        pending_outbound_htlcs: Vec::new(),
                        next_remote_htlc_id: 0,
                        channel_update_count: 1,
  
 +                      resend_order: RAACommitmentOrder::CommitmentFirst,
 +
 +                      monitor_pending_funding_locked: false,
                        monitor_pending_revoke_and_ack: false,
                        monitor_pending_commitment_signed: false,
 -                      monitor_pending_order: None,
                        monitor_pending_forwards: Vec::new(),
                        monitor_pending_failures: Vec::new(),
  
                        their_htlc_minimum_msat: msg.htlc_minimum_msat,
                        our_htlc_minimum_msat: Channel::derive_our_htlc_minimum_msat(msg.feerate_per_kw as u64),
                        their_to_self_delay: msg.to_self_delay,
 +                      our_to_self_delay: config.own_channel_config.our_to_self_delay,
                        their_max_accepted_htlcs: msg.max_accepted_htlcs,
 -                      minimum_depth: Channel::derive_minimum_depth(msg.funding_satoshis*1000, msg.push_msat),
 +                      minimum_depth: config.own_channel_config.minimum_depth,
  
                        their_funding_pubkey: Some(msg.funding_pubkey),
                        their_revocation_basepoint: Some(msg.revocation_basepoint),
                        their_prev_commitment_point: None,
                        their_node_id: their_node_id,
  
 -                      their_shutdown_scriptpubkey: None,
 +                      their_shutdown_scriptpubkey,
  
                        channel_monitor: channel_monitor,
  
                        let (include, state_name) = match htlc.state {
                                OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"),
                                OutboundHTLCState::Committed => (true, "Committed"),
 -                              OutboundHTLCState::RemoteRemoved => (generated_by_local, "RemoteRemoved"),
 -                              OutboundHTLCState::AwaitingRemoteRevokeToRemove => (generated_by_local, "AwaitingRemoteRevokeToRemove"),
 -                              OutboundHTLCState::AwaitingRemovedRemoteRevoke => (false, "AwaitingRemovedRemoteRevoke"),
 +                              OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"),
 +                              OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"),
 +                              OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"),
                        };
  
                        if include {
                        } else {
                                log_trace!(self, "   ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, log_bytes!(htlc.payment_hash.0), htlc.amount_msat, state_name);
                                match htlc.state {
 -                                      OutboundHTLCState::AwaitingRemoteRevokeToRemove|OutboundHTLCState::AwaitingRemovedRemoteRevoke => {
 -                                              if htlc.fail_reason.is_none() {
 -                                                      value_to_self_msat_offset -= htlc.amount_msat as i64;
 -                                              }
 +                                      OutboundHTLCState::AwaitingRemoteRevokeToRemove(None)|OutboundHTLCState::AwaitingRemovedRemoteRevoke(None) => {
 +                                              value_to_self_msat_offset -= htlc.amount_msat as i64;
                                        },
 -                                      OutboundHTLCState::RemoteRemoved => {
 -                                              if !generated_by_local && htlc.fail_reason.is_none() {
 +                                      OutboundHTLCState::RemoteRemoved(None) => {
 +                                              if !generated_by_local {
                                                        value_to_self_msat_offset -= htlc.amount_msat as i64;
                                                }
                                        },
                        }
                }
  
 -
                let value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
 -              let value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000 - self.value_to_self_msat - remote_htlc_total_msat) as i64 - value_to_self_msat_offset;
 +              assert!(value_to_self_msat >= 0);
 +              // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
 +              // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
 +              // "violate" their reserve value by couting those against it. Thus, we have to convert
 +              // everything to i64 before subtracting as otherwise we can overflow.
 +              let value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
 +              assert!(value_to_remote_msat >= 0);
  
                #[cfg(debug_assertions)]
                {
                        // Make sure that the to_self/to_remote is always either past the appropriate
                        // channel_reserve *or* it is making progress towards it.
 -                      // TODO: This should happen after fee calculation, but we don't handle that correctly
 -                      // yet!
                        let mut max_commitment_tx_output = if generated_by_local {
                                self.max_commitment_tx_output_local.lock().unwrap()
                        } else {
                let value_to_b = if local { value_to_remote } else { value_to_self };
  
                if value_to_a >= (dust_limit_satoshis as i64) {
 +                      log_trace!(self, "   ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
                        txouts.push((TxOut {
                                script_pubkey: chan_utils::get_revokeable_redeemscript(&keys.revocation_key,
 -                                                                                     if local { self.their_to_self_delay } else { BREAKDOWN_TIMEOUT },
 +                                                                                     if local { self.their_to_self_delay } else { self.our_to_self_delay },
                                                                                       &keys.a_delayed_payment_key).to_v0_p2wsh(),
                                value: value_to_a as u64
                        }, None));
                }
  
                if value_to_b >= (dust_limit_satoshis as i64) {
 +                      log_trace!(self, "   ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
                        txouts.push((TxOut {
                                script_pubkey: Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0)
                                                             .push_slice(&Hash160::hash(&keys.b_payment_key.serialize())[..])
                        }, None));
                }
  
 -              transaction_utils::sort_outputs(&mut txouts);
 +              transaction_utils::sort_outputs(&mut txouts, |a, b| {
 +                      if let &Some(ref a_htlc) = a {
 +                              if let &Some(ref b_htlc) = b {
 +                                      a_htlc.0.cltv_expiry.cmp(&b_htlc.0.cltv_expiry)
 +                                              // Note that due to hash collisions, we have to have a fallback comparison
 +                                              // here for fuzztarget mode (otherwise at least chanmon_fail_consistency
 +                                              // may fail)!
 +                                              .then(a_htlc.0.payment_hash.0.cmp(&b_htlc.0.payment_hash.0))
 +                              // For non-HTLC outputs, if they're copying our SPK we don't really care if we
 +                              // close the channel due to mismatches - they're doing something dumb:
 +                              } else { cmp::Ordering::Equal }
 +                      } else { cmp::Ordering::Equal }
 +              });
  
                let mut outputs: Vec<TxOut> = Vec::with_capacity(txouts.len());
                let mut htlcs_included: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(txouts.len() + included_dust_htlcs.len());
                        }, ()));
                }
  
 -              transaction_utils::sort_outputs(&mut txouts);
 +              transaction_utils::sort_outputs(&mut txouts, |_, _| { cmp::Ordering::Equal }); // Ordering doesnt matter if they used our pubkey...
  
                let mut outputs: Vec<TxOut> = Vec::new();
                for out in txouts.drain(..) {
        /// @local is used only to convert relevant internal structures which refer to remote vs local
        /// to decide value of outputs and direction of HTLCs.
        fn build_htlc_transaction(&self, prev_hash: &Sha256dHash, htlc: &HTLCOutputInCommitment, local: bool, keys: &TxCreationKeys, feerate_per_kw: u64) -> Transaction {
 -              chan_utils::build_htlc_transaction(prev_hash, feerate_per_kw, if local { self.their_to_self_delay } else { BREAKDOWN_TIMEOUT }, htlc, &keys.a_delayed_payment_key, &keys.revocation_key)
 +              chan_utils::build_htlc_transaction(prev_hash, feerate_per_kw, if local { self.their_to_self_delay } else { self.our_to_self_delay }, htlc, &keys.a_delayed_payment_key, &keys.revocation_key)
        }
  
        fn create_htlc_tx_signature(&self, tx: &Transaction, htlc: &HTLCOutputInCommitment, keys: &TxCreationKeys) -> Result<(Script, Signature, bool), ChannelError> {
  
        // Message handlers:
  
 -      pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, config: &UserConfig) -> Result<(), ChannelError> {
 +      pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, config: &UserConfig, their_local_features: LocalFeatures) -> Result<(), ChannelError> {
                // Check sanity of message fields:
                if !self.channel_outbound {
                        return Err(ChannelError::Close("Got an accept_channel message from an inbound peer"));
                if msg.htlc_minimum_msat >= (self.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000 {
                        return Err(ChannelError::Close("Minimum htlc value is full channel value"));
                }
 -              if msg.to_self_delay > MAX_LOCAL_BREAKDOWN_TIMEOUT {
 +              if msg.to_self_delay > config.peer_channel_config_limits.their_to_self_delay || msg.to_self_delay > MAX_LOCAL_BREAKDOWN_TIMEOUT {
                        return Err(ChannelError::Close("They wanted our payments to be delayed by a needlessly long period"));
                }
                if msg.max_accepted_htlcs < 1 {
                }
  
                // Now check against optional parameters as set by config...
 -              if msg.htlc_minimum_msat > config.channel_limits.max_htlc_minimum_msat {
 +              if msg.htlc_minimum_msat > config.peer_channel_config_limits.max_htlc_minimum_msat {
                        return Err(ChannelError::Close("htlc minimum msat is higher than the user specified limit"));
                }
 -              if msg.max_htlc_value_in_flight_msat < config.channel_limits.min_max_htlc_value_in_flight_msat {
 +              if msg.max_htlc_value_in_flight_msat < config.peer_channel_config_limits.min_max_htlc_value_in_flight_msat {
                        return Err(ChannelError::Close("max htlc value in flight msat is less than the user specified limit"));
                }
 -              if msg.channel_reserve_satoshis > config.channel_limits.max_channel_reserve_satoshis {
 +              if msg.channel_reserve_satoshis > config.peer_channel_config_limits.max_channel_reserve_satoshis {
                        return Err(ChannelError::Close("channel reserve satoshis is higher than the user specified limit"));
                }
 -              if msg.max_accepted_htlcs < config.channel_limits.min_max_accepted_htlcs {
 +              if msg.max_accepted_htlcs < config.peer_channel_config_limits.min_max_accepted_htlcs {
                        return Err(ChannelError::Close("max accepted htlcs is less than the user specified limit"));
                }
 -              if msg.dust_limit_satoshis < config.channel_limits.min_dust_limit_satoshis {
 +              if msg.dust_limit_satoshis < config.peer_channel_config_limits.min_dust_limit_satoshis {
                        return Err(ChannelError::Close("dust limit satoshis is less than the user specified limit"));
                }
 -              if msg.dust_limit_satoshis > config.channel_limits.max_dust_limit_satoshis {
 +              if msg.dust_limit_satoshis > config.peer_channel_config_limits.max_dust_limit_satoshis {
                        return Err(ChannelError::Close("dust limit satoshis is greater than the user specified limit"));
                }
 -              if msg.minimum_depth > config.channel_limits.max_minimum_depth {
 +              if msg.minimum_depth > config.peer_channel_config_limits.max_minimum_depth {
                        return Err(ChannelError::Close("We consider the minimum depth to be unreasonably large"));
                }
  
 +              let their_shutdown_scriptpubkey = if their_local_features.supports_upfront_shutdown_script() {
 +                      match &msg.shutdown_scriptpubkey {
 +                              &OptionalField::Present(ref script) => {
 +                                      // Peer is signaling upfront_shutdown and has provided a non-accepted scriptpubkey format. We enforce it while receiving shutdown msg
 +                                      if script.is_p2pkh() || script.is_p2sh() || script.is_v0_p2wsh() || script.is_v0_p2wpkh() {
 +                                              Some(script.clone())
 +                                      // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
 +                                      } else if script.len() == 0 {
 +                                              None
 +                                      // Peer is signaling upfront_shutdown and has provided a non-accepted scriptpubkey format. Fail the channel
 +                                      } else {
 +                                              return Err(ChannelError::Close("Peer is signaling upfront_shutdown but has provided a non-accepted scriptpubkey format"));
 +                                      }
 +                              },
 +                              // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
 +                              &OptionalField::Absent => {
 +                                      return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out"));
 +                              }
 +                      }
 +              } else { None };
 +
                self.channel_monitor.set_their_base_keys(&msg.htlc_basepoint, &msg.delayed_payment_basepoint);
  
                self.their_dust_limit_satoshis = msg.dust_limit_satoshis;
                self.their_delayed_payment_basepoint = Some(msg.delayed_payment_basepoint);
                self.their_htlc_basepoint = Some(msg.htlc_basepoint);
                self.their_cur_commitment_point = Some(msg.first_per_commitment_point);
 +              self.their_shutdown_scriptpubkey = their_shutdown_scriptpubkey;
  
                let obscure_factor = self.get_commitment_transaction_number_obscure_factor();
                self.channel_monitor.set_commitment_obscure_factor(obscure_factor);
                if !self.channel_outbound {
                        return Err(ChannelError::Close("Received funding_signed for an inbound channel?"));
                }
 -              if self.channel_state != ChannelState::FundingCreated as u32 {
 +              if self.channel_state & !(ChannelState::MonitorUpdateFailed as u32) != ChannelState::FundingCreated as u32 {
                        return Err(ChannelError::Close("Received funding_signed in strange state!"));
                }
                if self.channel_monitor.get_min_seen_secret() != (1 << 48) ||
                self.sign_commitment_transaction(&mut local_initial_commitment_tx, &msg.signature);
                self.channel_monitor.provide_latest_local_commitment_tx_info(local_initial_commitment_tx.clone(), local_keys, self.feerate_per_kw, Vec::new());
                self.last_local_commitment_txn = vec![local_initial_commitment_tx];
 -              self.channel_state = ChannelState::FundingSent as u32;
 +              self.channel_state = ChannelState::FundingSent as u32 | (self.channel_state & (ChannelState::MonitorUpdateFailed as u32));
                self.cur_local_commitment_transaction_number -= 1;
  
 -              Ok(self.channel_monitor.clone())
 +              if self.channel_state & (ChannelState::MonitorUpdateFailed as u32) == 0 {
 +                      Ok(self.channel_monitor.clone())
 +              } else {
 +                      Err(ChannelError::Ignore("Previous monitor update failure prevented funding_signed from allowing funding broadcast"))
 +              }
        }
  
        pub fn funding_locked(&mut self, msg: &msgs::FundingLocked) -> Result<(), ChannelError> {
                } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurFundingLocked as u32) {
                        self.channel_state = ChannelState::ChannelFunded as u32 | (self.channel_state & MULTI_STATE_FLAGS);
                        self.channel_update_count += 1;
 -              } else if self.channel_state & (ChannelState::ChannelFunded as u32) != 0 &&
 -                              // Note that funding_signed/funding_created will have decremented both by 1!
 -                              self.cur_local_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
 -                              self.cur_remote_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
 +              } else if (self.channel_state & (ChannelState::ChannelFunded as u32) != 0 &&
 +                               // Note that funding_signed/funding_created will have decremented both by 1!
 +                               self.cur_local_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
 +                               self.cur_remote_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1) ||
 +                              // If we reconnected before sending our funding locked they may still resend theirs:
 +                              (self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::TheirFundingLocked as u32) ==
 +                                                    (ChannelState::FundingSent as u32 | ChannelState::TheirFundingLocked as u32)) {
                        if self.their_cur_commitment_point != Some(msg.next_per_commitment_point) {
                                return Err(ChannelError::Close("Peer sent a reconnect funding_locked with a different point"));
                        }
                (htlc_outbound_count as u32, htlc_outbound_value_msat)
        }
  
 +      /// Get the available (ie not including pending HTLCs) inbound and outbound balance in msat.
 +      /// Doesn't bother handling the
 +      /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
 +      /// corner case properly.
 +      pub fn get_inbound_outbound_available_balance_msat(&self) -> (u64, u64) {
 +              // Note that we have to handle overflow due to the above case.
 +              (cmp::min(self.channel_value_satoshis as i64 * 1000 - self.value_to_self_msat as i64 - self.get_inbound_pending_htlc_stats().1 as i64, 0) as u64,
 +              cmp::min(self.value_to_self_msat as i64 - self.get_outbound_pending_htlc_stats().1 as i64, 0) as u64)
 +      }
 +
        pub fn update_add_htlc(&mut self, msg: &msgs::UpdateAddHTLC, pending_forward_state: PendingHTLCStatus) -> Result<(), ChannelError> {
                if (self.channel_state & (ChannelState::ChannelFunded as u32 | ChannelState::RemoteShutdownSent as u32)) != (ChannelState::ChannelFunded as u32) {
                        return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state"));
                if inbound_htlc_count + 1 > OUR_MAX_HTLCS as u32 {
                        return Err(ChannelError::Close("Remote tried to push more than our max accepted HTLCs"));
                }
-               //TODO: Spec is unclear if this is per-direction or in total (I assume per direction):
                // Check our_max_htlc_value_in_flight_msat
                if htlc_inbound_value_msat + msg.amount_msat > Channel::get_our_max_htlc_value_in_flight_msat(self.channel_value_satoshis) {
-                       return Err(ChannelError::Close("Remote HTLC add would put them over their max HTLC value in flight"));
+                       return Err(ChannelError::Close("Remote HTLC add would put them over our max HTLC value"));
                }
                // Check our_channel_reserve_satoshis (we're getting paid, so they have to at least meet
                // the reserve_satoshis we told them to always have as direct payment so that they lose
                // something if we punish them for broadcasting an old state).
 -              if htlc_inbound_value_msat + msg.amount_msat + self.value_to_self_msat > (self.channel_value_satoshis - Channel::get_our_channel_reserve_satoshis(self.channel_value_satoshis)) * 1000 {
 +              // Note that we don't really care about having a small/no to_remote output in our local
 +              // commitment transactions, as the purpose of the channel reserve is to ensure we can
 +              // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
 +              // present in the next commitment transaction we send them (at least for fulfilled ones,
 +              // failed ones won't modify value_to_self).
 +              // Note that we will send HTLCs which another instance of rust-lightning would think
 +              // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
 +              // Channel state once they will not be present in the next received commitment
 +              // transaction).
 +              let mut removed_outbound_total_msat = 0;
 +              for ref htlc in self.pending_outbound_htlcs.iter() {
 +                      if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(None) = htlc.state {
 +                              removed_outbound_total_msat += htlc.amount_msat;
 +                      } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(None) = htlc.state {
 +                              removed_outbound_total_msat += htlc.amount_msat;
 +                      }
 +              }
 +              if htlc_inbound_value_msat + msg.amount_msat + self.value_to_self_msat > (self.channel_value_satoshis - Channel::get_our_channel_reserve_satoshis(self.channel_value_satoshis)) * 1000 + removed_outbound_total_msat {
                        return Err(ChannelError::Close("Remote HTLC add would put them over their reserve value"));
                }
                if self.next_remote_htlc_id != msg.htlc_id {
                                        OutboundHTLCState::LocalAnnounced(_) =>
                                                return Err(ChannelError::Close("Remote tried to fulfill/fail HTLC before it had been committed")),
                                        OutboundHTLCState::Committed => {
 -                                              htlc.state = OutboundHTLCState::RemoteRemoved;
 -                                              htlc.fail_reason = fail_reason;
 +                                              htlc.state = OutboundHTLCState::RemoteRemoved(fail_reason);
                                        },
 -                                      OutboundHTLCState::AwaitingRemoteRevokeToRemove | OutboundHTLCState::AwaitingRemovedRemoteRevoke | OutboundHTLCState::RemoteRemoved =>
 +                                      OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
                                                return Err(ChannelError::Close("Remote tried to fulfill/fail HTLC that they'd already fulfilled/failed")),
                                }
                                return Ok(&htlc.source);
                        }
                }
  
 -              if self.channel_state & (ChannelState::MonitorUpdateFailed as u32) == 0 {
 -                      // This is a response to our post-monitor-failed unfreeze messages, so we can clear the
 -                      // monitor_pending_order requirement as we won't re-send the monitor_pending messages.
 -                      self.monitor_pending_order = None;
 -              }
 -
                self.channel_monitor.provide_latest_local_commitment_tx_info(local_commitment_tx.0, local_keys, self.feerate_per_kw, htlcs_and_sigs);
  
                for htlc in self.pending_inbound_htlcs.iter_mut() {
                        }
                }
                for htlc in self.pending_outbound_htlcs.iter_mut() {
 -                      if let OutboundHTLCState::RemoteRemoved = htlc.state {
 -                              htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove;
 +                      if let Some(fail_reason) = if let &mut OutboundHTLCState::RemoteRemoved(ref mut fail_reason) = &mut htlc.state {
 +                              Some(fail_reason.take())
 +                      } else { None } {
 +                              htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(fail_reason);
                                need_our_commitment = true;
                        }
                }
  
                self.cur_local_commitment_transaction_number -= 1;
                self.last_local_commitment_txn = new_local_commitment_txn;
 -              self.received_commitment_while_awaiting_raa = (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) != 0;
 +              // Note that if we need_our_commitment & !AwaitingRemoteRevoke we'll call
 +              // send_commitment_no_status_check() next which will reset this to RAAFirst.
 +              self.resend_order = RAACommitmentOrder::CommitmentFirst;
  
                if (self.channel_state & ChannelState::MonitorUpdateFailed as u32) != 0 {
                        // In case we initially failed monitor updating without requiring a response, we need
                        // to make sure the RAA gets sent first.
 -                      if !self.monitor_pending_commitment_signed {
 -                              self.monitor_pending_order = Some(RAACommitmentOrder::RevokeAndACKFirst);
 -                      }
                        self.monitor_pending_revoke_and_ack = true;
                        if need_our_commitment && (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
                                // If we were going to send a commitment_signed after the RAA, go ahead and do all
        fn free_holding_cell_htlcs(&mut self) -> Result<Option<(msgs::CommitmentUpdate, ChannelMonitor)>, ChannelError> {
                assert_eq!(self.channel_state & ChannelState::MonitorUpdateFailed as u32, 0);
                if self.holding_cell_htlc_updates.len() != 0 || self.holding_cell_update_fee.is_some() {
 +                      log_trace!(self, "Freeing holding cell with {} HTLC updates{}", self.holding_cell_htlc_updates.len(), if self.holding_cell_update_fee.is_some() { " and a fee update" } else { "" });
 +
                        let mut htlc_updates = Vec::new();
                        mem::swap(&mut htlc_updates, &mut self.holding_cell_htlc_updates);
                        let mut update_add_htlcs = Vec::with_capacity(htlc_updates.len());
                self.their_prev_commitment_point = self.their_cur_commitment_point;
                self.their_cur_commitment_point = Some(msg.next_per_commitment_point);
                self.cur_remote_commitment_transaction_number -= 1;
 -              self.received_commitment_while_awaiting_raa = false;
 -              if self.channel_state & (ChannelState::MonitorUpdateFailed as u32) == 0 {
 -                      // This is a response to our post-monitor-failed unfreeze messages, so we can clear the
 -                      // monitor_pending_order requirement as we won't re-send the monitor_pending messages.
 -                      self.monitor_pending_order = None;
 -              }
  
                log_trace!(self, "Updating HTLCs on receipt of RAA...");
                let mut to_forward_infos = Vec::new();
                                } else { true }
                        });
                        pending_outbound_htlcs.retain(|htlc| {
 -                              if let OutboundHTLCState::AwaitingRemovedRemoteRevoke = htlc.state {
 +                              if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref fail_reason) = &htlc.state {
                                        log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", log_bytes!(htlc.payment_hash.0));
 -                                      if let Some(reason) = htlc.fail_reason.clone() { // We really want take() here, but, again, non-mut ref :(
 +                                      if let Some(reason) = fail_reason.clone() { // We really want take() here, but, again, non-mut ref :(
                                                revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
                                        } else {
                                                // They fulfilled, so we sent them money
                                if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
                                        log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", log_bytes!(htlc.payment_hash.0));
                                        htlc.state = OutboundHTLCState::Committed;
 -                              } else if let OutboundHTLCState::AwaitingRemoteRevokeToRemove = htlc.state {
 +                              }
 +                              if let Some(fail_reason) = if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut fail_reason) = &mut htlc.state {
 +                                      Some(fail_reason.take())
 +                              } else { None } {
                                        log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", log_bytes!(htlc.payment_hash.0));
 -                                      htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke;
 +                                      htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(fail_reason);
                                        require_commitment = true;
                                }
                        }
                                // When the monitor updating is restored we'll call get_last_commitment_update(),
                                // which does not update state, but we're definitely now awaiting a remote revoke
                                // before we can step forward any more, so set it here.
 -                              self.channel_state |= ChannelState::AwaitingRemoteRevoke as u32;
 +                              self.send_commitment_no_status_check()?;
                        }
                        self.monitor_pending_forwards.append(&mut to_forward_infos);
                        self.monitor_pending_failures.append(&mut revoked_htlcs);
                self.next_remote_htlc_id -= inbound_drop_count;
  
                for htlc in self.pending_outbound_htlcs.iter_mut() {
 -                      if let OutboundHTLCState::RemoteRemoved = htlc.state {
 +                      if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
                                // They sent us an update to remove this but haven't yet sent the corresponding
                                // commitment_signed, we need to move it back to Committed and they can re-send
                                // the update upon reconnection.
        /// Indicates that a ChannelMonitor update failed to be stored by the client and further
        /// updates are partially paused.
        /// This must be called immediately after the call which generated the ChannelMonitor update
 -      /// which failed, with the order argument set to the type of call it represented (ie a
 -      /// commitment update or a revoke_and_ack generation). The messages which were generated from
 -      /// that original call must *not* have been sent to the remote end, and must instead have been
 -      /// dropped. They will be regenerated when monitor_updating_restored is called.
 -      pub fn monitor_update_failed(&mut self, order: RAACommitmentOrder, resend_raa: bool, resend_commitment: bool, mut pending_forwards: Vec<(PendingForwardHTLCInfo, u64)>, mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>) {
 +      /// which failed. The messages which were generated from that call which generated the
 +      /// monitor update failure must *not* have been sent to the remote end, and must instead
 +      /// have been dropped. They will be regenerated when monitor_updating_restored is called.
 +      pub fn monitor_update_failed(&mut self, resend_raa: bool, resend_commitment: bool, mut pending_forwards: Vec<(PendingForwardHTLCInfo, u64)>, mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>) {
                assert_eq!(self.channel_state & ChannelState::MonitorUpdateFailed as u32, 0);
                self.monitor_pending_revoke_and_ack = resend_raa;
                self.monitor_pending_commitment_signed = resend_commitment;
 -              self.monitor_pending_order = Some(order);
                assert!(self.monitor_pending_forwards.is_empty());
                mem::swap(&mut pending_forwards, &mut self.monitor_pending_forwards);
                assert!(self.monitor_pending_failures.is_empty());
        /// Indicates that the latest ChannelMonitor update has been committed by the client
        /// successfully and we should restore normal operation. Returns messages which should be sent
        /// to the remote side.
 -      pub fn monitor_updating_restored(&mut self) -> (Option<msgs::RevokeAndACK>, Option<msgs::CommitmentUpdate>, RAACommitmentOrder, Vec<(PendingForwardHTLCInfo, u64)>, Vec<(HTLCSource, PaymentHash, HTLCFailReason)>) {
 +      pub fn monitor_updating_restored(&mut self) -> (Option<msgs::RevokeAndACK>, Option<msgs::CommitmentUpdate>, RAACommitmentOrder, Vec<(PendingForwardHTLCInfo, u64)>, Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, bool, Option<msgs::FundingLocked>) {
                assert_eq!(self.channel_state & ChannelState::MonitorUpdateFailed as u32, ChannelState::MonitorUpdateFailed as u32);
                self.channel_state &= !(ChannelState::MonitorUpdateFailed as u32);
  
 +              let needs_broadcast_safe = self.channel_state & (ChannelState::FundingSent as u32) != 0 && self.channel_outbound;
 +
 +              // Because we will never generate a FundingBroadcastSafe event when we're in
 +              // MonitorUpdateFailed, if we assume the user only broadcast the funding transaction when
 +              // they received the FundingBroadcastSafe event, we can only ever hit
 +              // monitor_pending_funding_locked when we're an inbound channel which failed to persist the
 +              // monitor on funding_created, and we even got the funding transaction confirmed before the
 +              // monitor was persisted.
 +              let funding_locked = if self.monitor_pending_funding_locked {
 +                      assert!(!self.channel_outbound, "Funding transaction broadcast without FundingBroadcastSafe!");
 +                      self.monitor_pending_funding_locked = false;
 +                      let next_per_commitment_secret = self.build_local_commitment_secret(self.cur_local_commitment_transaction_number);
 +                      let next_per_commitment_point = PublicKey::from_secret_key(&self.secp_ctx, &next_per_commitment_secret);
 +                      Some(msgs::FundingLocked {
 +                              channel_id: self.channel_id(),
 +                              next_per_commitment_point: next_per_commitment_point,
 +                      })
 +              } else { None };
 +
                let mut forwards = Vec::new();
                mem::swap(&mut forwards, &mut self.monitor_pending_forwards);
                let mut failures = Vec::new();
                mem::swap(&mut failures, &mut self.monitor_pending_failures);
  
                if self.channel_state & (ChannelState::PeerDisconnected as u32) != 0 {
 -                      // Leave monitor_pending_order so we can order our channel_reestablish responses
                        self.monitor_pending_revoke_and_ack = false;
                        self.monitor_pending_commitment_signed = false;
 -                      return (None, None, RAACommitmentOrder::RevokeAndACKFirst, forwards, failures);
 +                      return (None, None, RAACommitmentOrder::RevokeAndACKFirst, forwards, failures, needs_broadcast_safe, funding_locked);
                }
  
                let raa = if self.monitor_pending_revoke_and_ack {
  
                self.monitor_pending_revoke_and_ack = false;
                self.monitor_pending_commitment_signed = false;
 -              (raa, commitment_update, self.monitor_pending_order.clone().unwrap(), forwards, failures)
 +              let order = self.resend_order.clone();
 +              log_trace!(self, "Restored monitor updating resulting in {}{} commitment update and {} RAA, with {} first",
 +                      if needs_broadcast_safe { "a funding broadcast safe, " } else { "" },
 +                      if commitment_update.is_some() { "a" } else { "no" },
 +                      if raa.is_some() { "an" } else { "no" },
 +                      match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
 +              (raa, commitment_update, order, forwards, failures, needs_broadcast_safe, funding_locked)
        }
  
        pub fn update_fee(&mut self, fee_estimator: &FeeEstimator, msg: &msgs::UpdateFee) -> Result<(), ChannelError> {
                                update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
                msgs::CommitmentUpdate {
                        update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs,
 -                      update_fee: None, //TODO: We need to support re-generating any update_fees in the last commitment_signed!
 +                      update_fee: None,
                        commitment_signed: self.send_commitment_no_state_update().expect("It looks like we failed to re-generate a commitment_signed we had previously sent?").0,
                }
        }
                } else { None };
  
                if self.channel_state & (ChannelState::FundingSent as u32) == ChannelState::FundingSent as u32 {
 -                      if self.channel_state & ChannelState::OurFundingLocked as u32 == 0 {
 +                      // If we're waiting on a monitor update, we shouldn't re-send any funding_locked's.
 +                      if self.channel_state & (ChannelState::OurFundingLocked as u32) == 0 ||
 +                                      self.channel_state & (ChannelState::MonitorUpdateFailed as u32) != 0 {
                                if msg.next_remote_commitment_number != 0 {
                                        return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent funding_locked yet"));
                                }
                        })
                } else { None };
  
 -              let order = self.monitor_pending_order.clone().unwrap_or(if self.received_commitment_while_awaiting_raa {
 -                              RAACommitmentOrder::CommitmentFirst
 -                      } else {
 -                              RAACommitmentOrder::RevokeAndACKFirst
 -                      });
 -
                if msg.next_local_commitment_number == our_next_remote_commitment_number {
                        if required_revoke.is_some() {
                                log_debug!(self, "Reconnected channel {} with only lost outbound RAA", log_bytes!(self.channel_id()));
                                log_debug!(self, "Reconnected channel {} with no loss", log_bytes!(self.channel_id()));
                        }
  
 -                      if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateFailed as u32)) == 0 &&
 -                                      self.monitor_pending_order.is_none() { // monitor_pending_order indicates we're waiting on a response to a unfreeze
 +                      if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateFailed as u32)) == 0 {
                                // We're up-to-date and not waiting on a remote revoke (if we are our
                                // channel_reestablish should result in them sending a revoke_and_ack), but we may
                                // have received some updates while we were disconnected. Free the holding cell
                                match self.free_holding_cell_htlcs() {
                                        Err(ChannelError::Close(msg)) => return Err(ChannelError::Close(msg)),
                                        Err(ChannelError::Ignore(_)) => panic!("Got non-channel-failing result from free_holding_cell_htlcs"),
 -                                      Ok(Some((commitment_update, channel_monitor))) => return Ok((resend_funding_locked, required_revoke, Some(commitment_update), Some(channel_monitor), order, shutdown_msg)),
 -                                      Ok(None) => return Ok((resend_funding_locked, required_revoke, None, None, order, shutdown_msg)),
 +                                      Ok(Some((commitment_update, channel_monitor))) => return Ok((resend_funding_locked, required_revoke, Some(commitment_update), Some(channel_monitor), self.resend_order.clone(), shutdown_msg)),
 +                                      Ok(None) => return Ok((resend_funding_locked, required_revoke, None, None, self.resend_order.clone(), shutdown_msg)),
                                }
                        } else {
 -                              return Ok((resend_funding_locked, required_revoke, None, None, order, shutdown_msg));
 +                              return Ok((resend_funding_locked, required_revoke, None, None, self.resend_order.clone(), shutdown_msg));
                        }
                } else if msg.next_local_commitment_number == our_next_remote_commitment_number - 1 {
                        if required_revoke.is_some() {
  
                        if self.channel_state & (ChannelState::MonitorUpdateFailed as u32) != 0 {
                                self.monitor_pending_commitment_signed = true;
 -                              return Ok((resend_funding_locked, None, None, None, order, shutdown_msg));
 +                              return Ok((resend_funding_locked, None, None, None, self.resend_order.clone(), shutdown_msg));
                        }
  
 -                      return Ok((resend_funding_locked, required_revoke, Some(self.get_last_commitment_update()), None, order, shutdown_msg));
 +                      return Ok((resend_funding_locked, required_revoke, Some(self.get_last_commitment_update()), None, self.resend_order.clone(), shutdown_msg));
                } else {
                        return Err(ChannelError::Close("Peer attempted to reestablish channel with a very old remote commitment transaction"));
                }
                self.cur_remote_commitment_transaction_number + 2
        }
  
 -      //TODO: Testing purpose only, should be changed in another way after #81
        #[cfg(test)]
        pub fn get_local_keys(&self) -> &ChannelKeys {
                &self.local_keys
                                        //they can by sending two revoke_and_acks back-to-back, but not really). This appears to be
                                        //a protocol oversight, but I assume I'm just missing something.
                                        if need_commitment_update {
 -                                              let next_per_commitment_secret = self.build_local_commitment_secret(self.cur_local_commitment_transaction_number);
 -                                              let next_per_commitment_point = PublicKey::from_secret_key(&self.secp_ctx, &next_per_commitment_secret);
 -                                              return Ok(Some(msgs::FundingLocked {
 -                                                      channel_id: self.channel_id,
 -                                                      next_per_commitment_point: next_per_commitment_point,
 -                                              }));
 +                                              if self.channel_state & (ChannelState::MonitorUpdateFailed as u32) == 0 {
 +                                                      let next_per_commitment_secret = self.build_local_commitment_secret(self.cur_local_commitment_transaction_number);
 +                                                      let next_per_commitment_point = PublicKey::from_secret_key(&self.secp_ctx, &next_per_commitment_secret);
 +                                                      return Ok(Some(msgs::FundingLocked {
 +                                                              channel_id: self.channel_id,
 +                                                              next_per_commitment_point: next_per_commitment_point,
 +                                                      }));
 +                                              } else {
 +                                                      self.monitor_pending_funding_locked = true;
 +                                                      return Ok(None);
 +                                              }
                                        }
                                }
                        }
                        channel_reserve_satoshis: Channel::get_our_channel_reserve_satoshis(self.channel_value_satoshis),
                        htlc_minimum_msat: self.our_htlc_minimum_msat,
                        feerate_per_kw: fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Background) as u32,
 -                      to_self_delay: BREAKDOWN_TIMEOUT,
 +                      to_self_delay: self.our_to_self_delay,
                        max_accepted_htlcs: OUR_MAX_HTLCS,
                        funding_pubkey: PublicKey::from_secret_key(&self.secp_ctx, &self.local_keys.funding_key),
                        revocation_basepoint: PublicKey::from_secret_key(&self.secp_ctx, &self.local_keys.revocation_base_key),
                        htlc_basepoint: PublicKey::from_secret_key(&self.secp_ctx, &self.local_keys.htlc_base_key),
                        first_per_commitment_point: PublicKey::from_secret_key(&self.secp_ctx, &local_commitment_secret),
                        channel_flags: if self.config.announced_channel {1} else {0},
 -                      shutdown_scriptpubkey: OptionalField::Absent
 +                      shutdown_scriptpubkey: OptionalField::Present(if self.config.commit_upfront_shutdown_pubkey { self.get_closing_scriptpubkey() } else { Builder::new().into_script() })
                }
        }
  
                        channel_reserve_satoshis: Channel::get_our_channel_reserve_satoshis(self.channel_value_satoshis),
                        htlc_minimum_msat: self.our_htlc_minimum_msat,
                        minimum_depth: self.minimum_depth,
 -                      to_self_delay: BREAKDOWN_TIMEOUT,
 +                      to_self_delay: self.our_to_self_delay,
                        max_accepted_htlcs: OUR_MAX_HTLCS,
                        funding_pubkey: PublicKey::from_secret_key(&self.secp_ctx, &self.local_keys.funding_key),
                        revocation_basepoint: PublicKey::from_secret_key(&self.secp_ctx, &self.local_keys.revocation_base_key),
                        delayed_payment_basepoint: PublicKey::from_secret_key(&self.secp_ctx, &self.local_keys.delayed_payment_base_key),
                        htlc_basepoint: PublicKey::from_secret_key(&self.secp_ctx, &self.local_keys.htlc_base_key),
                        first_per_commitment_point: PublicKey::from_secret_key(&self.secp_ctx, &local_commitment_secret),
 -                      shutdown_scriptpubkey: OptionalField::Absent
 +                      shutdown_scriptpubkey: OptionalField::Present(if self.config.commit_upfront_shutdown_pubkey { self.get_closing_scriptpubkey() } else { Builder::new().into_script() })
                }
        }
  
                        excess_data: Vec::new(),
                };
  
 -              let msghash = hash_to_message!(&Sha256dHash::from_data(&msg.encode()[..])[..]);
 +              let msghash = hash_to_message!(&Sha256dHash::hash(&msg.encode()[..])[..]);
                let sig = self.secp_ctx.sign(&msghash, &self.local_keys.funding_key);
  
                Ok((msg, sig))
                if outbound_htlc_count + 1 > self.their_max_accepted_htlcs as u32 {
                        return Err(ChannelError::Ignore("Cannot push more than their max accepted HTLCs"));
                }
-               //TODO: Spec is unclear if this is per-direction or in total (I assume per direction):
                // Check their_max_htlc_value_in_flight_msat
                if htlc_outbound_value_msat + amount_msat > self.their_max_htlc_value_in_flight_msat {
-                       return Err(ChannelError::Ignore("Cannot send value that would put us over the max HTLC value in flight"));
+                       return Err(ChannelError::Ignore("Cannot send value that would put us over the max HTLC value in flight our peer will accept"));
                }
  
                // Check self.their_channel_reserve_satoshis (the amount we must keep as
                // reserve for them to have something to claim if we misbehave)
                if self.value_to_self_msat < self.their_channel_reserve_satoshis * 1000 + amount_msat + htlc_outbound_value_msat {
-                       return Err(ChannelError::Ignore("Cannot send value that would put us over the reserve value"));
+                       return Err(ChannelError::Ignore("Cannot send value that would put us over their reserve value"));
                }
  
                //TODO: Check cltv_expiry? Do this in channel manager?
                                cltv_expiry: cltv_expiry,
                                source,
                                onion_routing_packet: onion_routing_packet,
 -                              time_created: Instant::now(),
                        });
                        return Ok(None);
                }
                        cltv_expiry: cltv_expiry,
                        state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
                        source,
 -                      fail_reason: None,
                });
  
                let res = msgs::UpdateAddHTLC {
                        }
                }
                for htlc in self.pending_outbound_htlcs.iter_mut() {
 -                      if let OutboundHTLCState::AwaitingRemoteRevokeToRemove = htlc.state {
 -                              htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke;
 +                      if let Some(fail_reason) = if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut fail_reason) = &mut htlc.state {
 +                              Some(fail_reason.take())
 +                      } else { None } {
 +                              htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(fail_reason);
                        }
                }
 +              self.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
  
                let (res, remote_commitment_tx, htlcs) = match self.send_commitment_no_state_update() {
                        Ok((res, (remote_commitment_tx, mut htlcs))) => {
@@@ -3630,6 -3525,8 +3628,6 @@@ impl Writeable for Channel 
                self.cur_remote_commitment_transaction_number.write(writer)?;
                self.value_to_self_msat.write(writer)?;
  
 -              self.received_commitment_while_awaiting_raa.write(writer)?;
 -
                let mut dropped_inbound_htlcs = 0;
                for htlc in self.pending_inbound_htlcs.iter() {
                        if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
                        htlc.cltv_expiry.write(writer)?;
                        htlc.payment_hash.write(writer)?;
                        htlc.source.write(writer)?;
 -                      write_option!(htlc.fail_reason);
                        match &htlc.state {
                                &OutboundHTLCState::LocalAnnounced(ref onion_packet) => {
                                        0u8.write(writer)?;
                                &OutboundHTLCState::Committed => {
                                        1u8.write(writer)?;
                                },
 -                              &OutboundHTLCState::RemoteRemoved => {
 +                              &OutboundHTLCState::RemoteRemoved(ref fail_reason) => {
                                        2u8.write(writer)?;
 +                                      write_option!(*fail_reason);
                                },
 -                              &OutboundHTLCState::AwaitingRemoteRevokeToRemove => {
 +                              &OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref fail_reason) => {
                                        3u8.write(writer)?;
 +                                      write_option!(*fail_reason);
                                },
 -                              &OutboundHTLCState::AwaitingRemovedRemoteRevoke => {
 +                              &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref fail_reason) => {
                                        4u8.write(writer)?;
 +                                      write_option!(*fail_reason);
                                },
                        }
                }
                (self.holding_cell_htlc_updates.len() as u64).write(writer)?;
                for update in self.holding_cell_htlc_updates.iter() {
                        match update {
 -                              &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet, time_created: _ } => {
 +                              &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet } => {
                                        0u8.write(writer)?;
                                        amount_msat.write(writer)?;
                                        cltv_expiry.write(writer)?;
                                        payment_hash.write(writer)?;
                                        source.write(writer)?;
                                        onion_routing_packet.write(writer)?;
 -                                      // time_created is not serialized - we re-init the timeout upon deserialization
                                },
                                &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
                                        1u8.write(writer)?;
                        }
                }
  
 +              match self.resend_order {
 +                      RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
 +                      RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
 +              }
 +
 +              self.monitor_pending_funding_locked.write(writer)?;
                self.monitor_pending_revoke_and_ack.write(writer)?;
                self.monitor_pending_commitment_signed.write(writer)?;
 -              match self.monitor_pending_order {
 -                      None => 0u8.write(writer)?,
 -                      Some(RAACommitmentOrder::CommitmentFirst) => 1u8.write(writer)?,
 -                      Some(RAACommitmentOrder::RevokeAndACKFirst) => 2u8.write(writer)?,
 -              }
  
                (self.monitor_pending_forwards.len() as u64).write(writer)?;
                for &(ref pending_forward, ref htlc_id) in self.monitor_pending_forwards.iter() {
                self.their_htlc_minimum_msat.write(writer)?;
                self.our_htlc_minimum_msat.write(writer)?;
                self.their_to_self_delay.write(writer)?;
 +              self.our_to_self_delay.write(writer)?;
                self.their_max_accepted_htlcs.write(writer)?;
                self.minimum_depth.write(writer)?;
  
@@@ -3834,6 -3728,8 +3832,6 @@@ impl<R : ::std::io::Read> ReadableArgs<
                let cur_remote_commitment_transaction_number = Readable::read(reader)?;
                let value_to_self_msat = Readable::read(reader)?;
  
 -              let received_commitment_while_awaiting_raa = Readable::read(reader)?;
 -
                let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
                let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, OUR_MAX_HTLCS as usize));
                for _ in 0..pending_inbound_htlc_count {
                                cltv_expiry: Readable::read(reader)?,
                                payment_hash: Readable::read(reader)?,
                                source: Readable::read(reader)?,
 -                              fail_reason: Readable::read(reader)?,
                                state: match <u8 as Readable<R>>::read(reader)? {
                                        0 => OutboundHTLCState::LocalAnnounced(Box::new(Readable::read(reader)?)),
                                        1 => OutboundHTLCState::Committed,
 -                                      2 => OutboundHTLCState::RemoteRemoved,
 -                                      3 => OutboundHTLCState::AwaitingRemoteRevokeToRemove,
 -                                      4 => OutboundHTLCState::AwaitingRemovedRemoteRevoke,
 +                                      2 => OutboundHTLCState::RemoteRemoved(Readable::read(reader)?),
 +                                      3 => OutboundHTLCState::AwaitingRemoteRevokeToRemove(Readable::read(reader)?),
 +                                      4 => OutboundHTLCState::AwaitingRemovedRemoteRevoke(Readable::read(reader)?),
                                        _ => return Err(DecodeError::InvalidValue),
                                },
                        });
                                        payment_hash: Readable::read(reader)?,
                                        source: Readable::read(reader)?,
                                        onion_routing_packet: Readable::read(reader)?,
 -                                      time_created: Instant::now(),
                                },
                                1 => HTLCUpdateAwaitingACK::ClaimHTLC {
                                        payment_preimage: Readable::read(reader)?,
                        });
                }
  
 -              let monitor_pending_revoke_and_ack = Readable::read(reader)?;
 -              let monitor_pending_commitment_signed = Readable::read(reader)?;
 -
 -              let monitor_pending_order = match <u8 as Readable<R>>::read(reader)? {
 -                      0 => None,
 -                      1 => Some(RAACommitmentOrder::CommitmentFirst),
 -                      2 => Some(RAACommitmentOrder::RevokeAndACKFirst),
 +              let resend_order = match <u8 as Readable<R>>::read(reader)? {
 +                      0 => RAACommitmentOrder::CommitmentFirst,
 +                      1 => RAACommitmentOrder::RevokeAndACKFirst,
                        _ => return Err(DecodeError::InvalidValue),
                };
  
 +              let monitor_pending_funding_locked = Readable::read(reader)?;
 +              let monitor_pending_revoke_and_ack = Readable::read(reader)?;
 +              let monitor_pending_commitment_signed = Readable::read(reader)?;
 +
                let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
                let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, OUR_MAX_HTLCS as usize));
                for _ in 0..monitor_pending_forwards_count {
                let their_htlc_minimum_msat = Readable::read(reader)?;
                let our_htlc_minimum_msat = Readable::read(reader)?;
                let their_to_self_delay = Readable::read(reader)?;
 +              let our_to_self_delay = Readable::read(reader)?;
                let their_max_accepted_htlcs = Readable::read(reader)?;
                let minimum_depth = Readable::read(reader)?;
  
                        cur_remote_commitment_transaction_number,
                        value_to_self_msat,
  
 -                      received_commitment_while_awaiting_raa,
                        pending_inbound_htlcs,
                        pending_outbound_htlcs,
                        holding_cell_htlc_updates,
  
 +                      resend_order,
 +
 +                      monitor_pending_funding_locked,
                        monitor_pending_revoke_and_ack,
                        monitor_pending_commitment_signed,
 -                      monitor_pending_order,
                        monitor_pending_forwards,
                        monitor_pending_failures,
  
                        their_htlc_minimum_msat,
                        our_htlc_minimum_msat,
                        their_to_self_delay,
 +                      our_to_self_delay,
                        their_max_accepted_htlcs,
                        minimum_depth,
  
  
  #[cfg(test)]
  mod tests {
 -      use bitcoin::util::hash::{Sha256dHash, Hash160};
        use bitcoin::util::bip143;
        use bitcoin::consensus::encode::serialize;
        use bitcoin::blockdata::script::{Script, Builder};
        use bitcoin::blockdata::transaction::Transaction;
        use bitcoin::blockdata::opcodes;
 +      use bitcoin_hashes::hex::FromHex;
        use hex;
        use ln::channelmanager::{HTLCSource, PaymentPreimage, PaymentHash};
        use ln::channel::{Channel,ChannelKeys,InboundHTLCOutput,OutboundHTLCOutput,InboundHTLCState,OutboundHTLCState,HTLCOutputInCommitment,TxCreationKeys};
        use secp256k1::{Secp256k1,Message,Signature};
        use secp256k1::key::{SecretKey,PublicKey};
        use bitcoin_hashes::sha256::Hash as Sha256;
 +      use bitcoin_hashes::sha256d::Hash as Sha256dHash;
 +      use bitcoin_hashes::hash160::Hash as Hash160;
        use bitcoin_hashes::Hash;
        use std::sync::Arc;
  
                fn get_destination_script(&self) -> Script {
                        let secp_ctx = Secp256k1::signing_only();
                        let channel_monitor_claim_key = SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
 -                      let our_channel_monitor_claim_key_hash = Hash160::from_data(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
 +                      let our_channel_monitor_claim_key_hash = Hash160::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
                        Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&our_channel_monitor_claim_key_hash[..]).into_script()
                }
  
                                payment_hash: PaymentHash([0; 32]),
                                state: OutboundHTLCState::Committed,
                                source: HTLCSource::dummy(),
 -                              fail_reason: None,
                        };
                        out.payment_hash.0 = Sha256::hash(&hex::decode("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).into_inner();
                        out
                                payment_hash: PaymentHash([0; 32]),
                                state: OutboundHTLCState::Committed,
                                source: HTLCSource::dummy(),
 -                              fail_reason: None,
                        };
                        out.payment_hash.0 = Sha256::hash(&hex::decode("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).into_inner();
                        out
index 001da7009b8a67e05ff22ae9719da7ce869434b9,36a9098732f8604889a97b992827fda1834fa86a..fe32a1ef24bf9f5d239044e446eab284f1295bae
@@@ -7,7 -7,7 +7,7 @@@ use chain::keysinterface::KeysInterface
  use ln::channelmanager::{ChannelManager,RAACommitmentOrder, PaymentPreimage, PaymentHash};
  use ln::router::{Route, Router};
  use ln::msgs;
 -use ln::msgs::{ChannelMessageHandler,RoutingMessageHandler};
 +use ln::msgs::{ChannelMessageHandler,RoutingMessageHandler, LocalFeatures};
  use util::test_utils;
  use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
  use util::errors::APIError;
@@@ -20,7 -20,6 +20,7 @@@ use bitcoin::blockdata::transaction::{T
  use bitcoin::network::constants::Network;
  
  use bitcoin_hashes::sha256::Hash as Sha256;
 +use bitcoin_hashes::sha256d::Hash as Sha256d;
  use bitcoin_hashes::Hash;
  
  use secp256k1::Secp256k1;
@@@ -33,6 -32,7 +33,6 @@@ use std::collections::HashMap
  use std::default::Default;
  use std::rc::Rc;
  use std::sync::{Arc, Mutex};
 -use std::time::Instant;
  use std::mem;
  
  pub const CHAN_CONFIRM_DEPTH: u32 = 100;
@@@ -46,16 -46,6 +46,16 @@@ pub fn confirm_transaction(chain: &chai
        }
  }
  
 +pub fn connect_blocks(chain: &chaininterface::ChainWatchInterfaceUtil, depth: u32, height: u32, parent: bool, prev_blockhash: Sha256d) -> Sha256d {
 +      let mut header = BlockHeader { version: 0x2000000, prev_blockhash: if parent { prev_blockhash } else { Default::default() }, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +      chain.block_connected_checked(&header, height + 1, &Vec::new(), &Vec::new());
 +      for i in 2..depth + 1 {
 +              header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +              chain.block_connected_checked(&header, height + i, &Vec::new(), &Vec::new());
 +      }
 +      header.bitcoin_hash()
 +}
 +
  pub struct Node {
        pub chain_monitor: Arc<chaininterface::ChainWatchInterfaceUtil>,
        pub tx_broadcaster: Arc<test_utils::TestBroadcaster>,
@@@ -78,12 -68,12 +78,12 @@@ impl Drop for Node 
        }
  }
  
 -pub fn create_chan_between_nodes(node_a: &Node, node_b: &Node) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
 -      create_chan_between_nodes_with_value(node_a, node_b, 100000, 10001)
 +pub fn create_chan_between_nodes(node_a: &Node, node_b: &Node, a_flags: LocalFeatures, b_flags: LocalFeatures) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
 +      create_chan_between_nodes_with_value(node_a, node_b, 100000, 10001, a_flags, b_flags)
  }
  
 -pub fn create_chan_between_nodes_with_value(node_a: &Node, node_b: &Node, channel_value: u64, push_msat: u64) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
 -      let (funding_locked, channel_id, tx) = create_chan_between_nodes_with_value_a(node_a, node_b, channel_value, push_msat);
 +pub fn create_chan_between_nodes_with_value(node_a: &Node, node_b: &Node, channel_value: u64, push_msat: u64, a_flags: LocalFeatures, b_flags: LocalFeatures) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
 +      let (funding_locked, channel_id, tx) = create_chan_between_nodes_with_value_a(node_a, node_b, channel_value, push_msat, a_flags, b_flags);
        let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(node_a, node_b, &funding_locked);
        (announcement, as_update, bs_update, channel_id, tx)
  }
@@@ -157,40 -147,36 +157,40 @@@ macro_rules! get_feerate 
        }
  }
  
 +pub fn create_funding_transaction(node: &Node, expected_chan_value: u64, expected_user_chan_id: u64) -> ([u8; 32], Transaction, OutPoint) {
 +      let chan_id = *node.network_chan_count.borrow();
  
 -pub fn create_chan_between_nodes_with_value_init(node_a: &Node, node_b: &Node, channel_value: u64, push_msat: u64) -> Transaction {
 -      node_a.node.create_channel(node_b.node.get_our_node_id(), channel_value, push_msat, 42).unwrap();
 -      node_b.node.handle_open_channel(&node_a.node.get_our_node_id(), &get_event_msg!(node_a, MessageSendEvent::SendOpenChannel, node_b.node.get_our_node_id())).unwrap();
 -      node_a.node.handle_accept_channel(&node_b.node.get_our_node_id(), &get_event_msg!(node_b, MessageSendEvent::SendAcceptChannel, node_a.node.get_our_node_id())).unwrap();
 -
 -      let chan_id = *node_a.network_chan_count.borrow();
 -      let tx;
 -      let funding_output;
 -
 -      let events_2 = node_a.node.get_and_clear_pending_events();
 -      assert_eq!(events_2.len(), 1);
 -      match events_2[0] {
 +      let events = node.node.get_and_clear_pending_events();
 +      assert_eq!(events.len(), 1);
 +      match events[0] {
                Event::FundingGenerationReady { ref temporary_channel_id, ref channel_value_satoshis, ref output_script, user_channel_id } => {
 -                      assert_eq!(*channel_value_satoshis, channel_value);
 -                      assert_eq!(user_channel_id, 42);
 +                      assert_eq!(*channel_value_satoshis, expected_chan_value);
 +                      assert_eq!(user_channel_id, expected_user_chan_id);
  
 -                      tx = Transaction { version: chan_id as u32, lock_time: 0, input: Vec::new(), output: vec![TxOut {
 +                      let tx = Transaction { version: chan_id as u32, lock_time: 0, input: Vec::new(), output: vec![TxOut {
                                value: *channel_value_satoshis, script_pubkey: output_script.clone(),
                        }]};
 -                      funding_output = OutPoint::new(tx.txid(), 0);
 -
 -                      node_a.node.funding_transaction_generated(&temporary_channel_id, funding_output);
 -                      let mut added_monitors = node_a.chan_monitor.added_monitors.lock().unwrap();
 -                      assert_eq!(added_monitors.len(), 1);
 -                      assert_eq!(added_monitors[0].0, funding_output);
 -                      added_monitors.clear();
 +                      let funding_outpoint = OutPoint::new(tx.txid(), 0);
 +                      (*temporary_channel_id, tx, funding_outpoint)
                },
                _ => panic!("Unexpected event"),
        }
 +}
 +
 +pub fn create_chan_between_nodes_with_value_init(node_a: &Node, node_b: &Node, channel_value: u64, push_msat: u64, a_flags: LocalFeatures, b_flags: LocalFeatures) -> Transaction {
 +      node_a.node.create_channel(node_b.node.get_our_node_id(), channel_value, push_msat, 42).unwrap();
 +      node_b.node.handle_open_channel(&node_a.node.get_our_node_id(), a_flags, &get_event_msg!(node_a, MessageSendEvent::SendOpenChannel, node_b.node.get_our_node_id())).unwrap();
 +      node_a.node.handle_accept_channel(&node_b.node.get_our_node_id(), b_flags, &get_event_msg!(node_b, MessageSendEvent::SendAcceptChannel, node_a.node.get_our_node_id())).unwrap();
 +
 +      let (temporary_channel_id, tx, funding_output) = create_funding_transaction(node_a, channel_value, 42);
 +
 +      {
 +              node_a.node.funding_transaction_generated(&temporary_channel_id, funding_output);
 +              let mut added_monitors = node_a.chan_monitor.added_monitors.lock().unwrap();
 +              assert_eq!(added_monitors.len(), 1);
 +              assert_eq!(added_monitors[0].0, funding_output);
 +              added_monitors.clear();
 +      }
  
        node_b.node.handle_funding_created(&node_a.node.get_our_node_id(), &get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b.node.get_our_node_id())).unwrap();
        {
        tx
  }
  
 -pub fn create_chan_between_nodes_with_value_confirm(node_a: &Node, node_b: &Node, tx: &Transaction) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32]) {
 -      confirm_transaction(&node_b.chain_monitor, &tx, tx.version);
 -      node_a.node.handle_funding_locked(&node_b.node.get_our_node_id(), &get_event_msg!(node_b, MessageSendEvent::SendFundingLocked, node_a.node.get_our_node_id())).unwrap();
 +pub fn create_chan_between_nodes_with_value_confirm_first(node_recv: &Node, node_conf: &Node, tx: &Transaction) {
 +      confirm_transaction(&node_conf.chain_monitor, &tx, tx.version);
 +      node_recv.node.handle_funding_locked(&node_conf.node.get_our_node_id(), &get_event_msg!(node_conf, MessageSendEvent::SendFundingLocked, node_recv.node.get_our_node_id())).unwrap();
 +}
  
 +pub fn create_chan_between_nodes_with_value_confirm_second(node_recv: &Node, node_conf: &Node) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32]) {
        let channel_id;
 -
 -      confirm_transaction(&node_a.chain_monitor, &tx, tx.version);
 -      let events_6 = node_a.node.get_and_clear_pending_msg_events();
 +      let events_6 = node_conf.node.get_and_clear_pending_msg_events();
        assert_eq!(events_6.len(), 2);
        ((match events_6[0] {
                MessageSendEvent::SendFundingLocked { ref node_id, ref msg } => {
                        channel_id = msg.channel_id.clone();
 -                      assert_eq!(*node_id, node_b.node.get_our_node_id());
 +                      assert_eq!(*node_id, node_recv.node.get_our_node_id());
                        msg.clone()
                },
                _ => panic!("Unexpected event"),
        }, match events_6[1] {
                MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } => {
 -                      assert_eq!(*node_id, node_b.node.get_our_node_id());
 +                      assert_eq!(*node_id, node_recv.node.get_our_node_id());
                        msg.clone()
                },
                _ => panic!("Unexpected event"),
        }), channel_id)
  }
  
 -pub fn create_chan_between_nodes_with_value_a(node_a: &Node, node_b: &Node, channel_value: u64, push_msat: u64) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32], Transaction) {
 -      let tx = create_chan_between_nodes_with_value_init(node_a, node_b, channel_value, push_msat);
 +pub fn create_chan_between_nodes_with_value_confirm(node_a: &Node, node_b: &Node, tx: &Transaction) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32]) {
 +      create_chan_between_nodes_with_value_confirm_first(node_a, node_b, tx);
 +      confirm_transaction(&node_a.chain_monitor, &tx, tx.version);
 +      create_chan_between_nodes_with_value_confirm_second(node_b, node_a)
 +}
 +
 +pub fn create_chan_between_nodes_with_value_a(node_a: &Node, node_b: &Node, channel_value: u64, push_msat: u64, a_flags: LocalFeatures, b_flags: LocalFeatures) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32], Transaction) {
 +      let tx = create_chan_between_nodes_with_value_init(node_a, node_b, channel_value, push_msat, a_flags, b_flags);
        let (msgs, chan_id) = create_chan_between_nodes_with_value_confirm(node_a, node_b, &tx);
        (msgs, chan_id, tx)
  }
@@@ -290,12 -270,12 +290,12 @@@ pub fn create_chan_between_nodes_with_v
        ((*announcement).clone(), (*as_update).clone(), (*bs_update).clone())
  }
  
 -pub fn create_announced_chan_between_nodes(nodes: &Vec<Node>, a: usize, b: usize) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
 -      create_announced_chan_between_nodes_with_value(nodes, a, b, 100000, 10001)
 +pub fn create_announced_chan_between_nodes(nodes: &Vec<Node>, a: usize, b: usize, a_flags: LocalFeatures, b_flags: LocalFeatures) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
 +      create_announced_chan_between_nodes_with_value(nodes, a, b, 100000, 10001, a_flags, b_flags)
  }
  
 -pub fn create_announced_chan_between_nodes_with_value(nodes: &Vec<Node>, a: usize, b: usize, channel_value: u64, push_msat: u64) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
 -      let chan_announcement = create_chan_between_nodes_with_value(&nodes[a], &nodes[b], channel_value, push_msat);
 +pub fn create_announced_chan_between_nodes_with_value(nodes: &Vec<Node>, a: usize, b: usize, channel_value: u64, push_msat: u64, a_flags: LocalFeatures, b_flags: LocalFeatures) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
 +      let chan_announcement = create_chan_between_nodes_with_value(&nodes[a], &nodes[b], channel_value, push_msat, a_flags, b_flags);
        for node in nodes {
                assert!(node.router.handle_channel_announcement(&chan_announcement.0).unwrap());
                node.router.handle_channel_update(&chan_announcement.1).unwrap();
@@@ -556,37 -536,12 +556,37 @@@ macro_rules! expect_pending_htlcs_forwa
                        Event::PendingHTLCsForwardable { .. } => { },
                        _ => panic!("Unexpected event"),
                };
 -              let node_ref: &Node = &$node;
 -              node_ref.node.channel_state.lock().unwrap().next_forward = Instant::now();
                $node.node.process_pending_htlc_forwards();
        }}
  }
  
 +macro_rules! expect_payment_received {
 +      ($node: expr, $expected_payment_hash: expr, $expected_recv_value: expr) => {
 +              let events = $node.node.get_and_clear_pending_events();
 +              assert_eq!(events.len(), 1);
 +              match events[0] {
 +                      Event::PaymentReceived { ref payment_hash, amt } => {
 +                              assert_eq!($expected_payment_hash, *payment_hash);
 +                              assert_eq!($expected_recv_value, amt);
 +                      },
 +                      _ => panic!("Unexpected event"),
 +              }
 +      }
 +}
 +
 +macro_rules! expect_payment_sent {
 +      ($node: expr, $expected_payment_preimage: expr) => {
 +              let events = $node.node.get_and_clear_pending_events();
 +              assert_eq!(events.len(), 1);
 +              match events[0] {
 +                      Event::PaymentSent { ref payment_preimage } => {
 +                              assert_eq!($expected_payment_preimage, *payment_preimage);
 +                      },
 +                      _ => panic!("Unexpected event"),
 +              }
 +      }
 +}
 +
  pub fn send_along_route_with_hash(origin_node: &Node, route: Route, expected_route: &[&Node], recv_value: u64, our_payment_hash: PaymentHash) {
        let mut payment_event = {
                origin_node.node.send_payment(route, our_payment_hash).unwrap();
@@@ -709,7 -664,14 +709,7 @@@ pub fn claim_payment_along_route(origin
  
        if !skip_last {
                last_update_fulfill_dance!(origin_node, expected_route.first().unwrap());
 -              let events = origin_node.node.get_and_clear_pending_events();
 -              assert_eq!(events.len(), 1);
 -              match events[0] {
 -                      Event::PaymentSent { payment_preimage } => {
 -                              assert_eq!(payment_preimage, our_payment_preimage);
 -                      },
 -                      _ => panic!("Unexpected event"),
 -              }
 +              expect_payment_sent!(origin_node, our_payment_preimage);
        }
  }
  
@@@ -740,7 -702,7 +740,7 @@@ pub fn route_over_limit(origin_node: &N
  
        let err = origin_node.node.send_payment(route, our_payment_hash).err().unwrap();
        match err {
-               APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over the max HTLC value in flight"),
+               APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over the max HTLC value in flight our peer will accept"),
                _ => panic!("Unknown error variants"),
        };
  }
@@@ -823,7 -785,7 +823,7 @@@ pub fn fail_payment(origin_node: &Node
        fail_payment_along_route(origin_node, expected_route, false, our_payment_hash);
  }
  
 -pub fn create_network(node_count: usize) -> Vec<Node> {
 +pub fn create_network(node_count: usize, node_config: &[Option<UserConfig>]) -> Vec<Node> {
        let mut nodes = Vec::new();
        let mut rng = thread_rng();
        let secp_ctx = Secp256k1::new();
                let mut seed = [0; 32];
                rng.fill_bytes(&mut seed);
                let keys_manager = Arc::new(test_utils::TestKeysInterface::new(&seed, Network::Testnet, Arc::clone(&logger)));
 -              let chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(chain_monitor.clone(), tx_broadcaster.clone(), logger.clone()));
 -              let mut config = UserConfig::new();
 -              config.channel_options.announced_channel = true;
 -              config.channel_limits.force_announced_channel_preference = false;
 -              let node = ChannelManager::new(Network::Testnet, feeest.clone(), chan_monitor.clone(), chain_monitor.clone(), tx_broadcaster.clone(), Arc::clone(&logger), keys_manager.clone(), config).unwrap();
 +              let chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(chain_monitor.clone(), tx_broadcaster.clone(), logger.clone(), feeest.clone()));
 +              let mut default_config = UserConfig::new();
 +              default_config.channel_options.announced_channel = true;
 +              default_config.peer_channel_config_limits.force_announced_channel_preference = false;
 +              let node = ChannelManager::new(Network::Testnet, feeest.clone(), chan_monitor.clone(), chain_monitor.clone(), tx_broadcaster.clone(), Arc::clone(&logger), keys_manager.clone(), if node_config[i].is_some() { node_config[i].clone().unwrap() } else { default_config }).unwrap();
                let router = Router::new(PublicKey::from_secret_key(&secp_ctx, &keys_manager.get_node_secret()), chain_monitor.clone(), Arc::clone(&logger));
                nodes.push(Node { chain_monitor, tx_broadcaster, chan_monitor, node, router, keys_manager, node_seed: seed,
                        network_payment_count: payment_count.clone(),
@@@ -973,6 -935,20 +973,6 @@@ pub fn get_announce_close_broadcast_eve
        }
  }
  
 -macro_rules! expect_payment_received {
 -      ($node: expr, $expected_payment_hash: expr, $expected_recv_value: expr) => {
 -              let events = $node.node.get_and_clear_pending_events();
 -              assert_eq!(events.len(), 1);
 -              match events[0] {
 -                      Event::PaymentReceived { ref payment_hash, amt } => {
 -                              assert_eq!($expected_payment_hash, *payment_hash);
 -                              assert_eq!($expected_recv_value, amt);
 -                      },
 -                      _ => panic!("Unexpected event"),
 -              }
 -      }
 -}
 -
  macro_rules! get_channel_value_stat {
        ($node: expr, $channel_id: expr) => {{
                let chan_lock = $node.node.channel_state.lock().unwrap();
index 3e5a21b0ee79f0b86042e48970d12e3f618a7b69,ff6169144d1464f7d469cf65a9a64cf6f36af71e..cb59aed83351287d2f5bd04fd8125710932cd4d5
@@@ -4,28 -4,29 +4,28 @@@
  
  use chain::transaction::OutPoint;
  use chain::chaininterface::{ChainListener, ChainWatchInterface};
 -use chain::keysinterface::{KeysInterface, SpendableOutputDescriptor};
 -use chain::keysinterface;
 -use ln::channel::{COMMITMENT_TX_BASE_WEIGHT, COMMITMENT_TX_WEIGHT_PER_HTLC, BREAKDOWN_TIMEOUT};
 -use ln::channelmanager::{ChannelManager,ChannelManagerReadArgs,HTLCForwardInfo,RAACommitmentOrder, PaymentPreimage, PaymentHash};
 -use ln::channelmonitor::{ChannelMonitor, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS, ManyChannelMonitor};
 -use ln::channel::{ACCEPTED_HTLC_SCRIPT_WEIGHT, OFFERED_HTLC_SCRIPT_WEIGHT};
 +use chain::keysinterface::{KeysInterface, SpendableOutputDescriptor, KeysManager};
 +use ln::channel::{COMMITMENT_TX_BASE_WEIGHT, COMMITMENT_TX_WEIGHT_PER_HTLC};
 +use ln::channelmanager::{ChannelManager,ChannelManagerReadArgs,HTLCForwardInfo,RAACommitmentOrder, PaymentPreimage, PaymentHash, BREAKDOWN_TIMEOUT};
 +use ln::channelmonitor::{ChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ManyChannelMonitor, ANTI_REORG_DELAY};
 +use ln::channel::{ACCEPTED_HTLC_SCRIPT_WEIGHT, OFFERED_HTLC_SCRIPT_WEIGHT, Channel, ChannelError};
  use ln::onion_utils;
  use ln::router::{Route, RouteHop};
  use ln::msgs;
 -use ln::msgs::{ChannelMessageHandler,RoutingMessageHandler,HTLCFailChannelUpdate};
 +use ln::msgs::{ChannelMessageHandler,RoutingMessageHandler,HTLCFailChannelUpdate, LocalFeatures, ErrorAction};
  use util::test_utils;
  use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
  use util::errors::APIError;
  use util::ser::{Writeable, ReadableArgs};
  use util::config::UserConfig;
 -use util::rng;
  
 -use bitcoin::util::hash::{BitcoinHash, Sha256dHash};
 +use bitcoin::util::hash::BitcoinHash;
 +use bitcoin_hashes::sha256d::Hash as Sha256dHash;
  use bitcoin::util::bip143;
  use bitcoin::util::address::Address;
  use bitcoin::util::bip32::{ChildNumber, ExtendedPubKey, ExtendedPrivKey};
  use bitcoin::blockdata::block::{Block, BlockHeader};
 -use bitcoin::blockdata::transaction::{Transaction, TxOut, TxIn, SigHashType};
 +use bitcoin::blockdata::transaction::{Transaction, TxOut, TxIn, SigHashType, OutPoint as BitcoinOutPoint};
  use bitcoin::blockdata::script::{Builder, Script};
  use bitcoin::blockdata::opcodes;
  use bitcoin::blockdata::constants::genesis_block;
@@@ -41,16 -42,15 +41,16 @@@ use std::collections::{BTreeSet, HashMa
  use std::default::Default;
  use std::sync::Arc;
  use std::sync::atomic::Ordering;
 -use std::time::Instant;
  use std::mem;
  
 +use rand::{thread_rng, Rng};
 +
  use ln::functional_test_utils::*;
  
  #[test]
  fn test_async_inbound_update_fee() {
 -      let mut nodes = create_network(2);
 -      let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
 +      let mut nodes = create_network(2, &[None, None]);
 +      let chan = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
        let channel_id = chan.2;
  
        // balancing
  fn test_update_fee_unordered_raa() {
        // Just the intro to the previous test followed by an out-of-order RAA (which caused a
        // crash in an earlier version of the update_fee patch)
 -      let mut nodes = create_network(2);
 -      let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
 +      let mut nodes = create_network(2, &[None, None]);
 +      let chan = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
        let channel_id = chan.2;
  
        // balancing
  
  #[test]
  fn test_multi_flight_update_fee() {
 -      let nodes = create_network(2);
 -      let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
 +      let nodes = create_network(2, &[None, None]);
 +      let chan = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
        let channel_id = chan.2;
  
        // A                                        B
  
  #[test]
  fn test_update_fee_vanilla() {
 -      let nodes = create_network(2);
 -      let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
 +      let nodes = create_network(2, &[None, None]);
 +      let chan = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
        let channel_id = chan.2;
  
        let feerate = get_feerate!(nodes[0], channel_id);
  
  #[test]
  fn test_update_fee_that_funder_cannot_afford() {
 -      let nodes = create_network(2);
 +      let nodes = create_network(2, &[None, None]);
        let channel_value = 1888;
 -      let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, 700000);
 +      let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, 700000, LocalFeatures::new(), LocalFeatures::new());
        let channel_id = chan.2;
  
        let feerate = 260;
  
  #[test]
  fn test_update_fee_with_fundee_update_add_htlc() {
 -      let mut nodes = create_network(2);
 -      let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
 +      let mut nodes = create_network(2, &[None, None]);
 +      let chan = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
        let channel_id = chan.2;
  
        // balancing
  
  #[test]
  fn test_update_fee() {
 -      let nodes = create_network(2);
 -      let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
 +      let nodes = create_network(2, &[None, None]);
 +      let chan = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
        let channel_id = chan.2;
  
        // A                                        B
  #[test]
  fn pre_funding_lock_shutdown_test() {
        // Test sending a shutdown prior to funding_locked after funding generation
 -      let nodes = create_network(2);
 -      let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 8000000, 0);
 +      let nodes = create_network(2, &[None, None]);
 +      let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 8000000, 0, LocalFeatures::new(), LocalFeatures::new());
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
        nodes[0].chain_monitor.block_connected_checked(&header, 1, &[&tx; 1], &[1; 1]);
        nodes[1].chain_monitor.block_connected_checked(&header, 1, &[&tx; 1], &[1; 1]);
  #[test]
  fn updates_shutdown_wait() {
        // Test sending a shutdown with outstanding updates pending
 -      let mut nodes = create_network(3);
 -      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
 -      let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
 +      let mut nodes = create_network(3, &[None, None, None]);
 +      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +      let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
        let route_1 = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV).unwrap();
        let route_2 = nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV).unwrap();
  
  #[test]
  fn htlc_fail_async_shutdown() {
        // Test HTLCs fail if shutdown starts even if messages are delivered out-of-order
 -      let mut nodes = create_network(3);
 -      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
 -      let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
 +      let mut nodes = create_network(3, &[None, None, None]);
 +      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +      let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
  
        let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV).unwrap();
        let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
  fn do_test_shutdown_rebroadcast(recv_count: u8) {
        // Test that shutdown/closing_signed is re-sent on reconnect with a variable number of
        // messages delivered prior to disconnect
 -      let nodes = create_network(3);
 -      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
 -      let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
 +      let nodes = create_network(3, &[None, None, None]);
 +      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +      let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
  
        let (our_payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100000);
  
@@@ -931,12 -931,12 +931,12 @@@ fn test_shutdown_rebroadcast() 
  fn fake_network_test() {
        // Simple test which builds a network of ChannelManagers, connects them to each other, and
        // tests that payments get routed and transactions broadcast in semi-reasonable ways.
 -      let nodes = create_network(4);
 +      let nodes = create_network(4, &[None, None, None, None]);
  
        // Create some initial channels
 -      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
 -      let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
 -      let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3);
 +      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +      let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
 +      let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3, LocalFeatures::new(), LocalFeatures::new());
  
        // Rebalance the network a bit by relaying one payment through all the channels...
        send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
        fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], payment_hash_1);
  
        // Add a new channel that skips 3
 -      let chan_4 = create_announced_chan_between_nodes(&nodes, 1, 3);
 +      let chan_4 = create_announced_chan_between_nodes(&nodes, 1, 3, LocalFeatures::new(), LocalFeatures::new());
  
        send_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 1000000);
        send_payment(&nodes[2], &vec!(&nodes[3])[..], 1000000);
        claim_payment(&nodes[1], &vec!(&nodes[2], &nodes[3], &nodes[1])[..], payment_preimage_1);
  
        // Add a duplicate new channel from 2 to 4
 -      let chan_5 = create_announced_chan_between_nodes(&nodes, 1, 3);
 +      let chan_5 = create_announced_chan_between_nodes(&nodes, 1, 3, LocalFeatures::new(), LocalFeatures::new());
  
        // Send some payments across both channels
        let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
@@@ -1044,9 -1044,9 +1044,9 @@@ fn holding_cell_htlc_counting() 
        // Tests that HTLCs in the holding cell count towards the pending HTLC limits on outbound HTLCs
        // to ensure we don't end up with HTLCs sitting around in our holding cell for several
        // commitment dance rounds.
 -      let mut nodes = create_network(3);
 -      create_announced_chan_between_nodes(&nodes, 0, 1);
 -      let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
 +      let mut nodes = create_network(3, &[None, None, None]);
 +      create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +      let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
  
        let mut payments = Vec::new();
        for _ in 0..::ln::channel::OUR_MAX_HTLCS {
  fn duplicate_htlc_test() {
        // Test that we accept duplicate payment_hash HTLCs across the network and that
        // claiming/failing them are all separate and don't affect each other
 -      let mut nodes = create_network(6);
 +      let mut nodes = create_network(6, &[None, None, None, None, None, None]);
  
        // Create some initial channels to route via 3 to 4/5 from 0/1/2
 -      create_announced_chan_between_nodes(&nodes, 0, 3);
 -      create_announced_chan_between_nodes(&nodes, 1, 3);
 -      create_announced_chan_between_nodes(&nodes, 2, 3);
 -      create_announced_chan_between_nodes(&nodes, 3, 4);
 -      create_announced_chan_between_nodes(&nodes, 3, 5);
 +      create_announced_chan_between_nodes(&nodes, 0, 3, LocalFeatures::new(), LocalFeatures::new());
 +      create_announced_chan_between_nodes(&nodes, 1, 3, LocalFeatures::new(), LocalFeatures::new());
 +      create_announced_chan_between_nodes(&nodes, 2, 3, LocalFeatures::new(), LocalFeatures::new());
 +      create_announced_chan_between_nodes(&nodes, 3, 4, LocalFeatures::new(), LocalFeatures::new());
 +      create_announced_chan_between_nodes(&nodes, 3, 5, LocalFeatures::new(), LocalFeatures::new());
  
        let (payment_preimage, payment_hash) = route_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], 1000000);
  
  }
  
  fn do_channel_reserve_test(test_recv: bool) {
 -      use util::rng;
        use std::sync::atomic::Ordering;
        use ln::msgs::HandleError;
  
 -      let mut nodes = create_network(3);
 -      let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1900, 1001);
 -      let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1900, 1001);
 +      let mut nodes = create_network(3, &[None, None, None]);
 +      let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1900, 1001, LocalFeatures::new(), LocalFeatures::new());
 +      let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1900, 1001, LocalFeatures::new(), LocalFeatures::new());
  
        let mut stat01 = get_channel_value_stat!(nodes[0], chan_1.2);
        let mut stat11 = get_channel_value_stat!(nodes[1], chan_1.2);
                assert!(route.hops.iter().rev().skip(1).all(|h| h.fee_msat == feemsat));
                let err = nodes[0].node.send_payment(route, our_payment_hash).err().unwrap();
                match err {
-                       APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over the max HTLC value in flight"),
+                       APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over the max HTLC value in flight our peer will accept"),
                        _ => panic!("Unknown error variants"),
                }
        }
                let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value + 1);
                let err = nodes[0].node.send_payment(route.clone(), our_payment_hash).err().unwrap();
                match err {
-                       APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over the reserve value"),
+                       APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over their reserve value"),
                        _ => panic!("Unknown error variants"),
                }
        }
        {
                let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_2 + 1);
                match nodes[0].node.send_payment(route, our_payment_hash).err().unwrap() {
-                       APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over the reserve value"),
+                       APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over their reserve value"),
                        _ => panic!("Unknown error variants"),
                }
        }
                let secp_ctx = Secp256k1::new();
                let session_priv = SecretKey::from_slice(&{
                        let mut session_key = [0; 32];
 -                      rng::fill_bytes(&mut session_key);
 +                      let mut rng = thread_rng();
 +                      rng.fill_bytes(&mut session_key);
                        session_key
                }).expect("RNG is bad!");
  
        {
                let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_22+1);
                match nodes[0].node.send_payment(route, our_payment_hash).err().unwrap() {
-                       APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over the reserve value"),
+                       APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over their reserve value"),
                        _ => panic!("Unknown error variants"),
                }
        }
@@@ -1449,164 -1449,17 +1449,164 @@@ fn channel_reserve_test() 
        do_channel_reserve_test(true);
  }
  
 +#[test]
 +fn channel_reserve_in_flight_removes() {
 +      // In cases where one side claims an HTLC, it thinks it has additional available funds that it
 +      // can send to its counterparty, but due to update ordering, the other side may not yet have
 +      // considered those HTLCs fully removed.
 +      // This tests that we don't count HTLCs which will not be included in the next remote
 +      // commitment transaction towards the reserve value (as it implies no commitment transaction
 +      // will be generated which violates the remote reserve value).
 +      // This was broken previously, and discovered by the chanmon_fail_consistency fuzz test.
 +      // To test this we:
 +      //  * route two HTLCs from A to B (note that, at a high level, this test is checking that, when
 +      //    you consider the values of both of these HTLCs, B may not send an HTLC back to A, but if
 +      //    you only consider the value of the first HTLC, it may not),
 +      //  * start routing a third HTLC from A to B,
 +      //  * claim the first two HTLCs (though B will generate an update_fulfill for one, and put
 +      //    the other claim in its holding cell, as it immediately goes into AwaitingRAA),
 +      //  * deliver the first fulfill from B
 +      //  * deliver the update_add and an RAA from A, resulting in B freeing the second holding cell
 +      //    claim,
 +      //  * deliver A's response CS and RAA.
 +      //    This results in A having the second HTLC in AwaitingRemovedRemoteRevoke, but B having
 +      //    removed it fully. B now has the push_msat plus the first two HTLCs in value.
 +      //  * Now B happily sends another HTLC, potentially violating its reserve value from A's point
 +      //    of view (if A counts the AwaitingRemovedRemoteRevoke HTLC).
 +      let mut nodes = create_network(2, &[None, None]);
 +      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +
 +      let b_chan_values = get_channel_value_stat!(nodes[1], chan_1.2);
 +      // Route the first two HTLCs.
 +      let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], b_chan_values.channel_reserve_msat - b_chan_values.value_to_self_msat - 10000);
 +      let (payment_preimage_2, _) = route_payment(&nodes[0], &[&nodes[1]], 20000);
 +
 +      // Start routing the third HTLC (this is just used to get everyone in the right state).
 +      let (payment_preimage_3, payment_hash_3) = get_payment_preimage_hash!(nodes[0]);
 +      let send_1 = {
 +              let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV).unwrap();
 +              nodes[0].node.send_payment(route, payment_hash_3).unwrap();
 +              check_added_monitors!(nodes[0], 1);
 +              let mut events = nodes[0].node.get_and_clear_pending_msg_events();
 +              assert_eq!(events.len(), 1);
 +              SendEvent::from_event(events.remove(0))
 +      };
 +
 +      // Now claim both of the first two HTLCs on B's end, putting B in AwaitingRAA and generating an
 +      // initial fulfill/CS.
 +      assert!(nodes[1].node.claim_funds(payment_preimage_1));
 +      check_added_monitors!(nodes[1], 1);
 +      let bs_removes = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +
 +      // This claim goes in B's holding cell, allowing us to have a pending B->A RAA which does not
 +      // remove the second HTLC when we send the HTLC back from B to A.
 +      assert!(nodes[1].node.claim_funds(payment_preimage_2));
 +      check_added_monitors!(nodes[1], 1);
 +      assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 +
 +      nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_removes.update_fulfill_htlcs[0]).unwrap();
 +      nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_removes.commitment_signed).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
 +      expect_payment_sent!(nodes[0], payment_preimage_1);
 +
 +      nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_1.msgs[0]).unwrap();
 +      nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_1.commitment_msg).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +      // B is already AwaitingRAA, so cant generate a CS here
 +      let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
 +
 +      nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +      let bs_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +
 +      nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
 +
 +      nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +      let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
 +
 +      // The second HTLCis removed, but as A is in AwaitingRAA it can't generate a CS here, so the
 +      // RAA that B generated above doesn't fully resolve the second HTLC from A's point of view.
 +      // However, the RAA A generates here *does* fully resolve the HTLC from B's point of view (as A
 +      // can no longer broadcast a commitment transaction with it and B has the preimage so can go
 +      // on-chain as necessary).
 +      nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_cs.update_fulfill_htlcs[0]).unwrap();
 +      nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
 +      expect_payment_sent!(nodes[0], payment_preimage_2);
 +
 +      nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +      assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 +
 +      expect_pending_htlcs_forwardable!(nodes[1]);
 +      expect_payment_received!(nodes[1], payment_hash_3, 100000);
 +
 +      // Note that as this RAA was generated before the delivery of the update_fulfill it shouldn't
 +      // resolve the second HTLC from A's point of view.
 +      nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
 +
 +      // Now that B doesn't have the second RAA anymore, but A still does, send a payment from B back
 +      // to A to ensure that A doesn't count the almost-removed HTLC in update_add processing.
 +      let (payment_preimage_4, payment_hash_4) = get_payment_preimage_hash!(nodes[1]);
 +      let send_2 = {
 +              let route = nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &[], 10000, TEST_FINAL_CLTV).unwrap();
 +              nodes[1].node.send_payment(route, payment_hash_4).unwrap();
 +              check_added_monitors!(nodes[1], 1);
 +              let mut events = nodes[1].node.get_and_clear_pending_msg_events();
 +              assert_eq!(events.len(), 1);
 +              SendEvent::from_event(events.remove(0))
 +      };
 +
 +      nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_2.msgs[0]).unwrap();
 +      nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_2.commitment_msg).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
 +
 +      // Now just resolve all the outstanding messages/HTLCs for completeness...
 +
 +      nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +      let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
 +
 +      nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +
 +      nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +      let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
 +
 +      nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed).unwrap();
 +      check_added_monitors!(nodes[1], 1);
 +      let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
 +
 +      nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +
 +      expect_pending_htlcs_forwardable!(nodes[0]);
 +      expect_payment_received!(nodes[0], payment_hash_4, 10000);
 +
 +      claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_4);
 +      claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3);
 +}
 +
  #[test]
  fn channel_monitor_network_test() {
        // Simple test which builds a network of ChannelManagers, connects them to each other, and
        // tests that ChannelMonitor is able to recover from various states.
 -      let nodes = create_network(5);
 +      let nodes = create_network(5, &[None, None, None, None, None]);
  
        // Create some initial channels
 -      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
 -      let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
 -      let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3);
 -      let chan_4 = create_announced_chan_between_nodes(&nodes, 3, 4);
 +      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +      let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
 +      let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3, LocalFeatures::new(), LocalFeatures::new());
 +      let chan_4 = create_announced_chan_between_nodes(&nodes, 3, 4, LocalFeatures::new(), LocalFeatures::new());
  
        // Rebalance the network a bit by relaying one payment through all the channels...
        send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
        {
                let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
                nodes[3].chain_monitor.block_connected_checked(&header, 2, &Vec::new()[..], &[0; 0]);
 -              for i in 3..TEST_FINAL_CLTV + 2 + HTLC_FAIL_TIMEOUT_BLOCKS + 1 {
 +              for i in 3..TEST_FINAL_CLTV + 2 + LATENCY_GRACE_PERIOD_BLOCKS + 1 {
                        header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
                        nodes[3].chain_monitor.block_connected_checked(&header, i, &Vec::new()[..], &[0; 0]);
                }
  fn test_justice_tx() {
        // Test justice txn built on revoked HTLC-Success tx, against both sides
  
 -      let nodes = create_network(2);
 +      let mut alice_config = UserConfig::new();
 +      alice_config.channel_options.announced_channel = true;
 +      alice_config.peer_channel_config_limits.force_announced_channel_preference = false;
 +      alice_config.own_channel_config.our_to_self_delay = 6 * 24 * 5;
 +      let mut bob_config = UserConfig::new();
 +      bob_config.channel_options.announced_channel = true;
 +      bob_config.peer_channel_config_limits.force_announced_channel_preference = false;
 +      bob_config.own_channel_config.our_to_self_delay = 6 * 24 * 3;
 +      let nodes = create_network(2, &[Some(alice_config), Some(bob_config)]);
        // Create some new channels:
 -      let chan_5 = create_announced_chan_between_nodes(&nodes, 0, 1);
 +      let chan_5 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
  
        // A pending HTLC which will be revoked:
        let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
  
        // We test justice_tx build by A on B's revoked HTLC-Success tx
        // Create some new channels:
 -      let chan_6 = create_announced_chan_between_nodes(&nodes, 0, 1);
 +      let chan_6 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
  
        // A pending HTLC which will be revoked:
        let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
  fn revoked_output_claim() {
        // Simple test to ensure a node will claim a revoked output when a stale remote commitment
        // transaction is broadcast by its counterparty
 -      let nodes = create_network(2);
 -      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
 +      let nodes = create_network(2, &[None, None]);
 +      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
        // node[0] is gonna to revoke an old state thus node[1] should be able to claim the revoked output
        let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
        assert_eq!(revoked_local_txn.len(), 1);
  #[test]
  fn claim_htlc_outputs_shared_tx() {
        // Node revoked old state, htlcs haven't time out yet, claim them in shared justice tx
 -      let nodes = create_network(2);
 +      let nodes = create_network(2, &[None, None]);
  
        // Create some new channel:
 -      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
 +      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
  
        // Rebalance the network to generate htlc in the two directions
        send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
                let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
                nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
                nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
 +              connect_blocks(&nodes[1].chain_monitor, ANTI_REORG_DELAY - 1, 1, true, header.bitcoin_hash());
  
                let events = nodes[1].node.get_and_clear_pending_events();
                assert_eq!(events.len(), 1);
  #[test]
  fn claim_htlc_outputs_single_tx() {
        // Node revoked old state, htlcs have timed out, claim each of them in separated justice tx
 -      let nodes = create_network(2);
 +      let nodes = create_network(2, &[None, None]);
  
 -      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
 +      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
  
        // Rebalance the network to generate htlc in the two directions
        send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
                let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
                nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 200);
                nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 200);
 +              connect_blocks(&nodes[1].chain_monitor, ANTI_REORG_DELAY - 1, 200, true, header.bitcoin_hash());
  
                let events = nodes[1].node.get_and_clear_pending_events();
                assert_eq!(events.len(), 1);
                }
  
                let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
 -              assert_eq!(node_txn.len(), 12); // ChannelManager : 2, ChannelMontitor: 8 (1 standard revoked output, 2 revocation htlc tx, 1 local commitment tx + 1 htlc timeout tx) * 2 (block-rescan)
 +              assert_eq!(node_txn.len(), 22); // ChannelManager : 2, ChannelMontitor: 8 (1 standard revoked output, 2 revocation htlc tx, 1 local commitment tx + 1 htlc timeout tx) * 2 (block-rescan) + 5 * (1 local commitment tx + 1 htlc timeout tx)
  
                assert_eq!(node_txn[0], node_txn[7]);
                assert_eq!(node_txn[1], node_txn[8]);
                assert_eq!(node_txn[3], node_txn[5]); //local commitment tx + htlc timeout tx broadcasted by ChannelManger
                assert_eq!(node_txn[4], node_txn[6]);
  
 +              for i in 12..22 {
 +                      if i % 2 == 0 { assert_eq!(node_txn[3], node_txn[i]); } else { assert_eq!(node_txn[4], node_txn[i]); }
 +              }
 +
                assert_eq!(node_txn[0].input.len(), 1);
                assert_eq!(node_txn[1].input.len(), 1);
                assert_eq!(node_txn[2].input.len(), 1);
@@@ -2022,11 -1861,11 +2022,11 @@@ fn test_htlc_on_chain_success() 
        // the HTLC outputs via the preimage it learned (which, once confirmed should generate a
        // PaymentSent event).
  
 -      let nodes = create_network(3);
 +      let nodes = create_network(3, &[None, None, None]);
  
        // Create some initial channels
 -      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
 -      let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
 +      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +      let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
  
        // Rebalance the network a bit by relaying one payment through all the channels...
        send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
@@@ -2183,11 -2022,11 +2183,11 @@@ fn test_htlc_on_chain_timeout() 
        //            \                                  \
        //         B's HTLC timeout tx               B's timeout tx
  
 -      let nodes = create_network(3);
 +      let nodes = create_network(3, &[None, None, None]);
  
        // Create some intial channels
 -      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
 -      let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
 +      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +      let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
  
        // Rebalance the network a bit by relaying one payment thorugh all the channels...
        send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
        }
  
        nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![timeout_tx]}, 1);
 +      connect_blocks(&nodes[1].chain_monitor, ANTI_REORG_DELAY - 1, 1, true, header.bitcoin_hash());
        check_added_monitors!(nodes[1], 0);
        check_closed_broadcast!(nodes[1]);
  
@@@ -2291,11 -2129,11 +2291,11 @@@ fn test_simple_commitment_revoked_fail_
        // Test that in case of a revoked commitment tx, we detect the resolution of output by justice tx
        // and fail backward accordingly.
  
 -      let nodes = create_network(3);
 +      let nodes = create_network(3, &[None, None, None]);
  
        // Create some initial channels
 -      create_announced_chan_between_nodes(&nodes, 0, 1);
 -      let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
 +      create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +      let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
  
        let (payment_preimage, _payment_hash) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
        // Get the will-be-revoked local txn from nodes[2]
  
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
        nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
 +      connect_blocks(&nodes[1].chain_monitor, ANTI_REORG_DELAY - 1, 1, true, header.bitcoin_hash());
        check_added_monitors!(nodes[1], 0);
        check_closed_broadcast!(nodes[1]);
  
@@@ -2359,11 -2196,11 +2359,11 @@@ fn do_test_commitment_revoked_fail_back
        // * Once they remove it, we will send a (the first) commitment_signed without the HTLC,
        //   and once they revoke the previous commitment transaction (allowing us to send a new
        //   commitment_signed) we will be free to fail/fulfill the HTLC backwards.
 -      let mut nodes = create_network(3);
 +      let mut nodes = create_network(3, &[None, None, None]);
  
        // Create some initial channels
 -      create_announced_chan_between_nodes(&nodes, 0, 1);
 -      let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
 +      create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +      let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
  
        let (payment_preimage, _payment_hash) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], if no_to_remote { 10_000 } else { 3_000_000 });
        // Get the will-be-revoked local txn from nodes[2]
  
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
        nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
 +      connect_blocks(&nodes[1].chain_monitor, ANTI_REORG_DELAY - 1, 1, true, header.bitcoin_hash());
  
        let events = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(events.len(), if deliver_bs_raa { 1 } else { 2 });
                        _ => panic!("Unexpected event"),
                };
        }
 -      nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now();
        nodes[1].node.process_pending_htlc_forwards();
        check_added_monitors!(nodes[1], 1);
  
@@@ -2570,8 -2407,8 +2570,8 @@@ fn test_commitment_revoked_fail_backwar
  fn test_htlc_ignore_latest_remote_commitment() {
        // Test that HTLC transactions spending the latest remote commitment transaction are simply
        // ignored if we cannot claim them. This originally tickled an invalid unwrap().
 -      let nodes = create_network(2);
 -      create_announced_chan_between_nodes(&nodes, 0, 1);
 +      let nodes = create_network(2, &[None, None]);
 +      create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
  
        route_payment(&nodes[0], &[&nodes[1]], 10000000);
        nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id);
  #[test]
  fn test_force_close_fail_back() {
        // Check which HTLCs are failed-backwards on channel force-closure
 -      let mut nodes = create_network(3);
 -      create_announced_chan_between_nodes(&nodes, 0, 1);
 -      create_announced_chan_between_nodes(&nodes, 1, 2);
 +      let mut nodes = create_network(3, &[None, None, None]);
 +      create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +      create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
  
        let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 1000000, 42).unwrap();
  
        // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success..
        {
                let mut monitors = nodes[2].chan_monitor.simple_monitor.monitors.lock().unwrap();
 -              monitors.get_mut(&OutPoint::new(Sha256dHash::from(&payment_event.commitment_msg.channel_id[..]), 0)).unwrap()
 +              monitors.get_mut(&OutPoint::new(Sha256dHash::from_slice(&payment_event.commitment_msg.channel_id[..]).unwrap(), 0)).unwrap()
                        .provide_payment_preimage(&our_payment_hash, &our_payment_preimage);
        }
        nodes[2].chain_monitor.block_connected_checked(&header, 1, &[&tx], &[1]);
  #[test]
  fn test_unconf_chan() {
        // After creating a chan between nodes, we disconnect all blocks previously seen to force a channel close on nodes[0] side
 -      let nodes = create_network(2);
 -      create_announced_chan_between_nodes(&nodes, 0, 1);
 +      let nodes = create_network(2, &[None, None]);
 +      create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
  
        let channel_state = nodes[0].node.channel_state.lock().unwrap();
        assert_eq!(channel_state.by_id.len(), 1);
                header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
                headers.push(header.clone());
        }
 +      let mut height = 99;
        while !headers.is_empty() {
 -              nodes[0].node.block_disconnected(&headers.pop().unwrap());
 +              nodes[0].node.block_disconnected(&headers.pop().unwrap(), height);
 +              height -= 1;
        }
        check_closed_broadcast!(nodes[0]);
        let channel_state = nodes[0].node.channel_state.lock().unwrap();
  #[test]
  fn test_simple_peer_disconnect() {
        // Test that we can reconnect when there are no lost messages
 -      let nodes = create_network(3);
 -      create_announced_chan_between_nodes(&nodes, 0, 1);
 -      create_announced_chan_between_nodes(&nodes, 1, 2);
 +      let nodes = create_network(3, &[None, None, None]);
 +      create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +      create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
  
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
  
  fn do_test_drop_messages_peer_disconnect(messages_delivered: u8) {
        // Test that we can reconnect when in-flight HTLC updates get dropped
 -      let mut nodes = create_network(2);
 +      let mut nodes = create_network(2, &[None, None]);
        if messages_delivered == 0 {
 -              create_chan_between_nodes_with_value_a(&nodes[0], &nodes[1], 100000, 10001);
 +              create_chan_between_nodes_with_value_a(&nodes[0], &nodes[1], 100000, 10001, LocalFeatures::new(), LocalFeatures::new());
                // nodes[1] doesn't receive the funding_locked message (it'll be re-sent on reconnect)
        } else {
 -              create_announced_chan_between_nodes(&nodes, 0, 1);
 +              create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
        }
  
        let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), Some(&nodes[0].node.list_usable_channels()), &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
        reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
  
 -      nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now();
        nodes[1].node.process_pending_htlc_forwards();
  
        let events_2 = nodes[1].node.get_and_clear_pending_events();
@@@ -2954,8 -2790,8 +2954,8 @@@ fn test_drop_messages_peer_disconnect_b
  #[test]
  fn test_funding_peer_disconnect() {
        // Test that we can lock in our funding tx while disconnected
 -      let nodes = create_network(2);
 -      let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001);
 +      let nodes = create_network(2, &[None, None]);
 +      let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001, LocalFeatures::new(), LocalFeatures::new());
  
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
  fn test_drop_messages_peer_disconnect_dual_htlc() {
        // Test that we can handle reconnecting when both sides of a channel have pending
        // commitment_updates when we disconnect.
 -      let mut nodes = create_network(2);
 -      create_announced_chan_between_nodes(&nodes, 0, 1);
 +      let mut nodes = create_network(2, &[None, None]);
 +      create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
  
        let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
  
  fn test_invalid_channel_announcement() {
        //Test BOLT 7 channel_announcement msg requirement for final node, gather data to build customed channel_announcement msgs
        let secp_ctx = Secp256k1::new();
 -      let nodes = create_network(2);
 +      let nodes = create_network(2, &[None, None]);
  
 -      let chan_announcement = create_chan_between_nodes(&nodes[0], &nodes[1]);
 +      let chan_announcement = create_chan_between_nodes(&nodes[0], &nodes[1], LocalFeatures::new(), LocalFeatures::new());
  
        let a_channel_lock = nodes[0].node.channel_state.lock().unwrap();
        let b_channel_lock = nodes[1].node.channel_state.lock().unwrap();
  
        macro_rules! sign_msg {
                ($unsigned_msg: expr) => {
 -                      let msghash = Message::from_slice(&Sha256dHash::from_data(&$unsigned_msg.encode()[..])[..]).unwrap();
 +                      let msghash = Message::from_slice(&Sha256dHash::hash(&$unsigned_msg.encode()[..])[..]).unwrap();
                        let as_bitcoin_sig = secp_ctx.sign(&msghash, &as_chan.get_local_keys().funding_key);
                        let bs_bitcoin_sig = secp_ctx.sign(&msghash, &bs_chan.get_local_keys().funding_key);
                        let as_node_sig = secp_ctx.sign(&msghash, &nodes[0].keys_manager.get_node_secret());
        assert!(nodes[0].router.handle_channel_announcement(&chan_announcement).is_err());
  
        let mut unsigned_msg = dummy_unsigned_msg!();
 -      unsigned_msg.chain_hash = Sha256dHash::from_data(&[1,2,3,4,5,6,7,8,9]);
 +      unsigned_msg.chain_hash = Sha256dHash::hash(&[1,2,3,4,5,6,7,8,9]);
        sign_msg!(unsigned_msg);
        assert!(nodes[0].router.handle_channel_announcement(&chan_announcement).is_err());
  }
  
  #[test]
  fn test_no_txn_manager_serialize_deserialize() {
 -      let mut nodes = create_network(2);
 +      let mut nodes = create_network(2, &[None, None]);
  
 -      let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001);
 +      let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001, LocalFeatures::new(), LocalFeatures::new());
  
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
  
        let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
        nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut chan_0_monitor_serialized).unwrap();
  
 -      nodes[0].chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(nodes[0].chain_monitor.clone(), nodes[0].tx_broadcaster.clone(), Arc::new(test_utils::TestLogger::new())));
 +      nodes[0].chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(nodes[0].chain_monitor.clone(), nodes[0].tx_broadcaster.clone(), Arc::new(test_utils::TestLogger::new()), Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 })));
        let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
        let (_, chan_0_monitor) = <(Sha256dHash, ChannelMonitor)>::read(&mut chan_0_monitor_read, Arc::new(test_utils::TestLogger::new())).unwrap();
        assert!(chan_0_monitor_read.is_empty());
  
        let mut nodes_0_read = &nodes_0_serialized[..];
        let config = UserConfig::new();
 -      let keys_manager = Arc::new(keysinterface::KeysManager::new(&nodes[0].node_seed, Network::Testnet, Arc::new(test_utils::TestLogger::new())));
 +      let keys_manager = Arc::new(test_utils::TestKeysInterface::new(&nodes[0].node_seed, Network::Testnet, Arc::new(test_utils::TestLogger::new())));
        let (_, nodes_0_deserialized) = {
                let mut channel_monitors = HashMap::new();
                channel_monitors.insert(chan_0_monitor.get_funding_txo().unwrap(), &chan_0_monitor);
  
  #[test]
  fn test_simple_manager_serialize_deserialize() {
 -      let mut nodes = create_network(2);
 -      create_announced_chan_between_nodes(&nodes, 0, 1);
 +      let mut nodes = create_network(2, &[None, None]);
 +      create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
  
        let (our_payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
        let (_, our_payment_hash) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
        let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
        nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut chan_0_monitor_serialized).unwrap();
  
 -      nodes[0].chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(nodes[0].chain_monitor.clone(), nodes[0].tx_broadcaster.clone(), Arc::new(test_utils::TestLogger::new())));
 +      nodes[0].chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(nodes[0].chain_monitor.clone(), nodes[0].tx_broadcaster.clone(), Arc::new(test_utils::TestLogger::new()), Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 })));
        let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
        let (_, chan_0_monitor) = <(Sha256dHash, ChannelMonitor)>::read(&mut chan_0_monitor_read, Arc::new(test_utils::TestLogger::new())).unwrap();
        assert!(chan_0_monitor_read.is_empty());
  
        let mut nodes_0_read = &nodes_0_serialized[..];
 -      let keys_manager = Arc::new(keysinterface::KeysManager::new(&nodes[0].node_seed, Network::Testnet, Arc::new(test_utils::TestLogger::new())));
 +      let keys_manager = Arc::new(test_utils::TestKeysInterface::new(&nodes[0].node_seed, Network::Testnet, Arc::new(test_utils::TestLogger::new())));
        let (_, nodes_0_deserialized) = {
                let mut channel_monitors = HashMap::new();
                channel_monitors.insert(chan_0_monitor.get_funding_txo().unwrap(), &chan_0_monitor);
  #[test]
  fn test_manager_serialize_deserialize_inconsistent_monitor() {
        // Test deserializing a ChannelManager with an out-of-date ChannelMonitor
 -      let mut nodes = create_network(4);
 -      create_announced_chan_between_nodes(&nodes, 0, 1);
 -      create_announced_chan_between_nodes(&nodes, 2, 0);
 -      let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 3);
 +      let mut nodes = create_network(4, &[None, None, None, None]);
 +      create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +      create_announced_chan_between_nodes(&nodes, 2, 0, LocalFeatures::new(), LocalFeatures::new());
 +      let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 3, LocalFeatures::new(), LocalFeatures::new());
  
        let (our_payment_preimage, _) = route_payment(&nodes[2], &[&nodes[0], &nodes[1]], 1000000);
  
                node_0_monitors_serialized.push(writer.0);
        }
  
 -      nodes[0].chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(nodes[0].chain_monitor.clone(), nodes[0].tx_broadcaster.clone(), Arc::new(test_utils::TestLogger::new())));
 +      nodes[0].chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(nodes[0].chain_monitor.clone(), nodes[0].tx_broadcaster.clone(), Arc::new(test_utils::TestLogger::new()), Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 })));
        let mut node_0_monitors = Vec::new();
        for serialized in node_0_monitors_serialized.iter() {
                let mut read = &serialized[..];
        }
  
        let mut nodes_0_read = &nodes_0_serialized[..];
 -      let keys_manager = Arc::new(keysinterface::KeysManager::new(&nodes[0].node_seed, Network::Testnet, Arc::new(test_utils::TestLogger::new())));
 +      let keys_manager = Arc::new(test_utils::TestKeysInterface::new(&nodes[0].node_seed, Network::Testnet, Arc::new(test_utils::TestLogger::new())));
        let (_, nodes_0_deserialized) = <(Sha256dHash, ChannelManager)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
                default_config: UserConfig::new(),
                keys_manager,
@@@ -3431,7 -3267,7 +3431,7 @@@ macro_rules! check_spendable_outputs 
                                                                        };
                                                                        let secp_ctx = Secp256k1::new();
                                                                        let remotepubkey = PublicKey::from_secret_key(&secp_ctx, &key);
 -                                                                      let witness_script = Address::p2pkh(&remotepubkey, Network::Testnet).script_pubkey();
 +                                                                      let witness_script = Address::p2pkh(&::bitcoin::PublicKey{compressed: true, key: remotepubkey}, Network::Testnet).script_pubkey();
                                                                        let sighash = Message::from_slice(&bip143::SighashComponents::new(&spend_tx).sighash_all(&spend_tx.input[0], &witness_script, output.value)[..]).unwrap();
                                                                        let remotesig = secp_ctx.sign(&sighash, key);
                                                                        spend_tx.input[0].witness.push(remotesig.serialize_der().to_vec());
                                                                        let secret = {
                                                                                match ExtendedPrivKey::new_master(Network::Testnet, &$node.node_seed) {
                                                                                        Ok(master_key) => {
 -                                                                                              match master_key.ckd_priv(&secp_ctx, ChildNumber::from_hardened_idx($der_idx)) {
 +                                                                                              match master_key.ckd_priv(&secp_ctx, ChildNumber::from_hardened_idx($der_idx).expect("key space exhausted")) {
                                                                                                        Ok(key) => key,
                                                                                                        Err(_) => panic!("Your RNG is busted"),
                                                                                                }
                                                                        let pubkey = ExtendedPubKey::from_private(&secp_ctx, &secret).public_key;
                                                                        let witness_script = Address::p2pkh(&pubkey, Network::Testnet).script_pubkey();
                                                                        let sighash = Message::from_slice(&bip143::SighashComponents::new(&spend_tx).sighash_all(&spend_tx.input[0], &witness_script, output.value)[..]).unwrap();
 -                                                                      let sig = secp_ctx.sign(&sighash, &secret.secret_key);
 +                                                                      let sig = secp_ctx.sign(&sighash, &secret.private_key.key);
                                                                        spend_tx.input[0].witness.push(sig.serialize_der().to_vec());
                                                                        spend_tx.input[0].witness[0].push(SigHashType::All as u8);
 -                                                                      spend_tx.input[0].witness.push(pubkey.serialize().to_vec());
 +                                                                      spend_tx.input[0].witness.push(pubkey.key.serialize().to_vec());
                                                                        txn.push(spend_tx);
                                                                },
                                                        }
  #[test]
  fn test_claim_sizeable_push_msat() {
        // Incidentally test SpendableOutput event generation due to detection of to_local output on commitment tx
 -      let nodes = create_network(2);
 +      let nodes = create_network(2, &[None, None]);
  
 -      let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 99000000);
 +      let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 99000000, LocalFeatures::new(), LocalFeatures::new());
        nodes[1].node.force_close_channel(&chan.2);
        check_closed_broadcast!(nodes[1]);
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
@@@ -3539,9 -3375,9 +3539,9 @@@ fn test_claim_on_remote_sizeable_push_m
        // Same test as previous, just test on remote commitment tx, as per_commitment_point registration changes following you're funder/fundee and
        // to_remote output is encumbered by a P2WPKH
  
 -      let nodes = create_network(2);
 +      let nodes = create_network(2, &[None, None]);
  
 -      let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 99000000);
 +      let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 99000000, LocalFeatures::new(), LocalFeatures::new());
        nodes[0].node.force_close_channel(&chan.2);
        check_closed_broadcast!(nodes[0]);
  
@@@ -3564,9 -3400,9 +3564,9 @@@ fn test_claim_on_remote_revoked_sizeabl
        // Same test as previous, just test on remote revoked commitment tx, as per_commitment_point registration changes following you're funder/fundee and
        // to_remote output is encumbered by a P2WPKH
  
 -      let nodes = create_network(2);
 +      let nodes = create_network(2, &[None, None]);
  
 -      let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 59000000);
 +      let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 59000000, LocalFeatures::new(), LocalFeatures::new());
        let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
        let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn.clone();
        assert_eq!(revoked_local_txn[0].input.len(), 1);
  
  #[test]
  fn test_static_spendable_outputs_preimage_tx() {
 -      let nodes = create_network(2);
 +      let nodes = create_network(2, &[None, None]);
  
        // Create some initial channels
 -      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
 +      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
  
        let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
  
  
  #[test]
  fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() {
 -      let nodes = create_network(2);
 +      let nodes = create_network(2, &[None, None]);
  
        // Create some initial channels
 -      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
 +      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
  
        let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
        let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.iter().next().unwrap().1.last_local_commitment_txn.clone();
  
  #[test]
  fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() {
 -      let nodes = create_network(2);
 +      let nodes = create_network(2, &[None, None]);
  
        // Create some initial channels
 -      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
 +      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
  
        let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
        let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
  
  #[test]
  fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() {
 -      let nodes = create_network(2);
 +      let nodes = create_network(2, &[None, None]);
  
        // Create some initial channels
 -      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
 +      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
  
        let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
        let revoked_local_txn = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
@@@ -3756,11 -3592,11 +3756,11 @@@ fn test_onchain_to_onchain_claim() 
        // Finally, check that B will claim the HTLC output if A's latest commitment transaction
        // gets broadcast.
  
 -      let nodes = create_network(3);
 +      let nodes = create_network(3, &[None, None, None]);
  
        // Create some initial channels
 -      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
 -      let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
 +      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +      let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
  
        // Rebalance the network a bit by relaying one payment through all the channels ...
        send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
  fn test_duplicate_payment_hash_one_failure_one_success() {
        // Topology : A --> B --> C
        // We route 2 payments with same hash between B and C, one will be timeout, the other successfully claim
 -      let mut nodes = create_network(3);
 +      let mut nodes = create_network(3, &[None, None, None]);
  
 -      create_announced_chan_between_nodes(&nodes, 0, 1);
 -      let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
 +      create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +      let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
  
        let (our_payment_preimage, duplicate_payment_hash) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 900000);
        *nodes[0].network_payment_count.borrow_mut() -= 1;
        check_spends!(htlc_success_txn[1], commitment_txn[0].clone());
  
        nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![htlc_timeout_tx] }, 200);
 +      connect_blocks(&nodes[1].chain_monitor, ANTI_REORG_DELAY - 1, 200, true, header.bitcoin_hash());
        expect_pending_htlcs_forwardable!(nodes[1]);
        let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        assert!(htlc_updates.update_add_htlcs.is_empty());
  
  #[test]
  fn test_dynamic_spendable_outputs_local_htlc_success_tx() {
 -      let nodes = create_network(2);
 +      let nodes = create_network(2, &[None, None]);
  
        // Create some initial channels
 -      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
 +      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
  
        let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000).0;
        let local_txn = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
@@@ -4007,13 -3842,13 +4007,13 @@@ fn do_test_fail_backwards_unrevoked_rem
        //    - C - D -
        // B /         \ F
        // And test where C fails back to A/B when D announces its latest commitment transaction
 -      let nodes = create_network(6);
 +      let nodes = create_network(6, &[None, None, None, None, None, None]);
  
 -      create_announced_chan_between_nodes(&nodes, 0, 2);
 -      create_announced_chan_between_nodes(&nodes, 1, 2);
 -      let chan = create_announced_chan_between_nodes(&nodes, 2, 3);
 -      create_announced_chan_between_nodes(&nodes, 3, 4);
 -      create_announced_chan_between_nodes(&nodes, 3, 5);
 +      create_announced_chan_between_nodes(&nodes, 0, 2, LocalFeatures::new(), LocalFeatures::new());
 +      create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
 +      let chan = create_announced_chan_between_nodes(&nodes, 2, 3, LocalFeatures::new(), LocalFeatures::new());
 +      create_announced_chan_between_nodes(&nodes, 3, 4, LocalFeatures::new(), LocalFeatures::new());
 +      create_announced_chan_between_nodes(&nodes, 3, 5, LocalFeatures::new(), LocalFeatures::new());
  
        // Rebalance and check output sanity...
        send_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 500000);
        } else {
                nodes[2].chain_monitor.block_connected_checked(&header, 1, &[&ds_prev_commitment_tx[0]], &[1; 1]);
        }
 +      connect_blocks(&nodes[2].chain_monitor, ANTI_REORG_DELAY - 1, 1, true,  header.bitcoin_hash());
        check_closed_broadcast!(nodes[2]);
        expect_pending_htlcs_forwardable!(nodes[2]);
        check_added_monitors!(nodes[2], 2);
@@@ -4246,10 -4080,10 +4246,10 @@@ fn test_fail_backwards_previous_remote_
  
  #[test]
  fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() {
 -      let nodes = create_network(2);
 +      let nodes = create_network(2, &[None, None]);
  
        // Create some initial channels
 -      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
 +      let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
  
        route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000).0;
        let local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
  
  #[test]
  fn test_static_output_closing_tx() {
 -      let nodes = create_network(2);
 +      let nodes = create_network(2, &[None, None]);
  
 -      let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
 +      let chan = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
  
        send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
        let closing_tx = close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true).2;
  }
  
  fn do_htlc_claim_local_commitment_only(use_dust: bool) {
 -      let nodes = create_network(2);
 -      let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
 +      let nodes = create_network(2, &[None, None]);
 +      let chan = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
  
        let (our_payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1]], if use_dust { 50000 } else { 3000000 });
  
  }
  
  fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
 -      let mut nodes = create_network(2);
 -      let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
 +      let mut nodes = create_network(2, &[None, None]);
 +      let chan = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
  
        let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), if use_dust { 50000 } else { 3000000 }, TEST_FINAL_CLTV).unwrap();
        let (_, payment_hash) = get_payment_preimage_hash!(nodes[0]);
        // to "time out" the HTLC.
  
        let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 -      for i in 1..TEST_FINAL_CLTV + HTLC_FAIL_TIMEOUT_BLOCKS + CHAN_CONFIRM_DEPTH + 1 {
 +      for i in 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + CHAN_CONFIRM_DEPTH + 1 {
                nodes[0].chain_monitor.block_connected_checked(&header, i, &Vec::new(), &Vec::new());
                header.prev_blockhash = header.bitcoin_hash();
        }
  }
  
  fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) {
 -      let nodes = create_network(3);
 -      let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
 +      let nodes = create_network(3, &[None, None, None]);
 +      let chan = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
  
        // Fail the payment, but don't deliver A's final RAA, resulting in the HTLC only being present
        // in B's previous (unrevoked) commitment transaction, but none of A's commitment transactions.
        }
  
        let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 -      for i in 1..TEST_FINAL_CLTV + HTLC_FAIL_TIMEOUT_BLOCKS + CHAN_CONFIRM_DEPTH + 1 {
 +      for i in 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + CHAN_CONFIRM_DEPTH + 1 {
                nodes[0].chain_monitor.block_connected_checked(&header, i, &Vec::new(), &Vec::new());
                header.prev_blockhash = header.bitcoin_hash();
        }
@@@ -4481,6 -4315,7 +4481,6 @@@ fn run_onion_failure_test_with_fail_int
        macro_rules! expect_htlc_forward {
                ($node: expr) => {{
                        expect_event!($node, Event::PendingHTLCsForwardable);
 -                      $node.node.channel_state.lock().unwrap().next_forward = Instant::now();
                        $node.node.process_pending_htlc_forwards();
                }}
        }
@@@ -4621,7 -4456,7 +4621,7 @@@ impl msgs::ChannelUpdate 
                msgs::ChannelUpdate {
                        signature: Signature::from(FFISignature::new()),
                        contents: msgs::UnsignedChannelUpdate {
 -                              chain_hash: Sha256dHash::from_data(&vec![0u8][..]),
 +                              chain_hash: Sha256dHash::hash(&vec![0u8][..]),
                                short_channel_id: 0,
                                timestamp: 0,
                                flags: 0,
@@@ -4646,11 -4481,11 +4646,11 @@@ fn test_onion_failure() 
        const NODE: u16 = 0x2000;
        const UPDATE: u16 = 0x1000;
  
 -      let mut nodes = create_network(3);
 +      let mut nodes = create_network(3, &[None, None, None]);
        for node in nodes.iter() {
                *node.keys_manager.override_session_priv.lock().unwrap() = Some(SecretKey::from_slice(&[3; 32]).unwrap());
        }
 -      let channels = [create_announced_chan_between_nodes(&nodes, 0, 1), create_announced_chan_between_nodes(&nodes, 1, 2)];
 +      let channels = [create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new()), create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new())];
        let (_, payment_hash) = get_payment_preimage_hash!(nodes[0]);
        let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 40000, TEST_FINAL_CLTV).unwrap();
        // positve case
        }, || {}, true, Some(UPDATE|13), Some(msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id: channels[0].0.contents.short_channel_id, is_permanent: true}));
  
        run_onion_failure_test("expiry_too_soon", 0, &nodes, &route, &payment_hash, |msg| {
 -              let height = msg.cltv_expiry - CLTV_CLAIM_BUFFER - HTLC_FAIL_TIMEOUT_BLOCKS + 1;
 +              let height = msg.cltv_expiry - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS + 1;
                let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
                nodes[1].chain_monitor.block_connected_checked(&header, height, &Vec::new()[..], &[0; 0]);
        }, ||{}, true, Some(UPDATE|14), Some(msgs::HTLCFailChannelUpdate::ChannelUpdateMessage{msg: ChannelUpdate::dummy()}));
        }, false, Some(PERM|15), None);
  
        run_onion_failure_test("final_expiry_too_soon", 1, &nodes, &route, &payment_hash, |msg| {
 -              let height = msg.cltv_expiry - CLTV_CLAIM_BUFFER - HTLC_FAIL_TIMEOUT_BLOCKS + 1;
 +              let height = msg.cltv_expiry - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS + 1;
                let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
                nodes[2].chain_monitor.block_connected_checked(&header, height, &Vec::new()[..], &[0; 0]);
        }, || {}, true, Some(17), None);
  #[test]
  #[should_panic]
  fn bolt2_open_channel_sending_node_checks_part1() { //This test needs to be on its own as we are catching a panic
 -      let nodes = create_network(2);
 +      let nodes = create_network(2, &[None, None]);
        //Force duplicate channel ids
        for node in nodes.iter() {
                *node.keys_manager.override_channel_id_priv.lock().unwrap() = Some([0; 32]);
        let push_msat=10001;
        nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42).unwrap();
        let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
 -      nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &node0_to_1_send_open_channel).unwrap();
 +      nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), LocalFeatures::new(), &node0_to_1_send_open_channel).unwrap();
  
        //Create a second channel with a channel_id collision
        assert!(nodes[0].node.create_channel(nodes[0].node.get_our_node_id(), channel_value_satoshis, push_msat, 42).is_err());
  
  #[test]
  fn bolt2_open_channel_sending_node_checks_part2() {
 -      let nodes = create_network(2);
 +      let nodes = create_network(2, &[None, None]);
  
        // BOLT #2 spec: Sending node must set funding_satoshis to less than 2^24 satoshis
        let channel_value_satoshis=2^24;
  fn test_update_add_htlc_bolt2_sender_value_below_minimum_msat() {
        //BOLT2 Requirement: MUST offer amount_msat greater than 0.
        //BOLT2 Requirement: MUST NOT offer amount_msat below the receiving node's htlc_minimum_msat (same validation check catches both of these)
 -      let mut nodes = create_network(2);
 -      let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
 +      let mut nodes = create_network(2, &[None, None]);
 +      let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, LocalFeatures::new(), LocalFeatures::new());
        let mut route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV).unwrap();
        let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
  
  fn test_update_add_htlc_bolt2_sender_cltv_expiry_too_high() {
        //BOLT 2 Requirement: MUST set cltv_expiry less than 500000000.
        //It is enforced when constructing a route.
 -      let mut nodes = create_network(2);
 -      let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 0);
 +      let mut nodes = create_network(2, &[None, None]);
 +      let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 0, LocalFeatures::new(), LocalFeatures::new());
        let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 100000000, 500000001).unwrap();
        let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
  
@@@ -4964,8 -4799,8 +4964,8 @@@ fn test_update_add_htlc_bolt2_sender_ex
        //BOLT 2 Requirement: if result would be offering more than the remote's max_accepted_htlcs HTLCs, in the remote commitment transaction: MUST NOT add an HTLC.
        //BOLT 2 Requirement: for the first HTLC it offers MUST set id to 0.
        //BOLT 2 Requirement: MUST increase the value of id by 1 for each successive offer.
 -      let mut nodes = create_network(2);
 -      let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0);
 +      let mut nodes = create_network(2, &[None, None]);
 +      let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0, LocalFeatures::new(), LocalFeatures::new());
        let max_accepted_htlcs = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().their_max_accepted_htlcs as u64;
  
        for i in 0..max_accepted_htlcs {
  #[test]
  fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_value_in_flight() {
        //BOLT 2 Requirement: if the sum of total offered HTLCs would exceed the remote's max_htlc_value_in_flight_msat: MUST NOT add an HTLC.
 -      let mut nodes = create_network(2);
 +      let mut nodes = create_network(2, &[None, None]);
        let channel_value = 100000;
 -      let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, 0);
 +      let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, 0, LocalFeatures::new(), LocalFeatures::new());
        let max_in_flight = get_channel_value_stat!(nodes[0], chan.2).their_max_htlc_value_in_flight_msat;
  
        send_payment(&nodes[0], &vec!(&nodes[1])[..], max_in_flight);
        let err = nodes[0].node.send_payment(route, our_payment_hash);
  
        if let Err(APIError::ChannelUnavailable{err}) = err {
-               assert_eq!(err, "Cannot send value that would put us over the max HTLC value in flight");
+               assert_eq!(err, "Cannot send value that would put us over the max HTLC value in flight our peer will accept");
        } else {
                assert!(false);
        }
  #[test]
  fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() {
        //BOLT2 Requirement: receiving an amount_msat equal to 0, OR less than its own htlc_minimum_msat -> SHOULD fail the channel.
 -      let mut nodes = create_network(2);
 -      let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
 +      let mut nodes = create_network(2, &[None, None]);
 +      let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, LocalFeatures::new(), LocalFeatures::new());
        let htlc_minimum_msat: u64;
        {
                let chan_lock = nodes[0].node.channel_state.lock().unwrap();
  #[test]
  fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() {
        //BOLT2 Requirement: receiving an amount_msat that the sending node cannot afford at the current feerate_per_kw (while maintaining its channel reserve): SHOULD fail the channel
 -      let mut nodes = create_network(2);
 -      let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
 +      let mut nodes = create_network(2, &[None, None]);
 +      let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, LocalFeatures::new(), LocalFeatures::new());
  
        let their_channel_reserve = get_channel_value_stat!(nodes[0], chan.2).channel_reserve_msat;
  
  fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() {
        //BOLT 2 Requirement: if a sending node adds more than its max_accepted_htlcs HTLCs to its local commitment transaction: SHOULD fail the channel
        //BOLT 2 Requirement: MUST allow multiple HTLCs with the same payment_hash.
 -      let mut nodes = create_network(2);
 -      let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
 +      let mut nodes = create_network(2, &[None, None]);
 +      let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, LocalFeatures::new(), LocalFeatures::new());
        let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 3999999, TEST_FINAL_CLTV).unwrap();
        let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
  
        let session_priv = SecretKey::from_slice(&{
                let mut session_key = [0; 32];
 -              rng::fill_bytes(&mut session_key);
 +              let mut rng = thread_rng();
 +              rng.fill_bytes(&mut session_key);
                session_key
        }).expect("RNG is bad!");
  
  #[test]
  fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() {
        //OR adds more than its max_htlc_value_in_flight_msat worth of offered HTLCs to its local commitment transaction: SHOULD fail the channel
 -      let mut nodes = create_network(2);
 -      let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000);
 +      let mut nodes = create_network(2, &[None, None]);
 +      let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, LocalFeatures::new(), LocalFeatures::new());
        let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 1000000, TEST_FINAL_CLTV).unwrap();
        let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
        nodes[0].node.send_payment(route, our_payment_hash).unwrap();
        let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
  
        if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err {
-               assert_eq!(err,"Remote HTLC add would put them over their max HTLC value in flight");
+               assert_eq!(err,"Remote HTLC add would put them over our max HTLC value");
        } else {
                assert!(false);
        }
  #[test]
  fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() {
        //BOLT2 Requirement: if sending node sets cltv_expiry to greater or equal to 500000000: SHOULD fail the channel.
 -      let mut nodes = create_network(2);
 -      create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
 +      let mut nodes = create_network(2, &[None, None]);
 +      create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, LocalFeatures::new(), LocalFeatures::new());
        let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 3999999, TEST_FINAL_CLTV).unwrap();
        let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
        nodes[0].node.send_payment(route, our_payment_hash).unwrap();
@@@ -5178,8 -5012,8 +5178,8 @@@ fn test_update_add_htlc_bolt2_receiver_
        //BOLT 2 requirement: if the sender did not previously acknowledge the commitment of that HTLC: MUST ignore a repeated id value after a reconnection.
        // We test this by first testing that that repeated HTLCs pass commitment signature checks
        // after disconnect and that non-sequential htlc_ids result in a channel failure.
 -      let mut nodes = create_network(2);
 -      create_announced_chan_between_nodes(&nodes, 0, 1);
 +      let mut nodes = create_network(2, &[None, None]);
 +      create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
        let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 1000000, TEST_FINAL_CLTV).unwrap();
        let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
        nodes[0].node.send_payment(route, our_payment_hash).unwrap();
  fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() {
        //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions:     MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
  
 -      let mut nodes = create_network(2);
 -      let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
 +      let mut nodes = create_network(2, &[None, None]);
 +      let chan = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
  
        let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 1000000, TEST_FINAL_CLTV).unwrap();
        let (our_payment_preimage, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
  fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() {
        //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions:     MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
  
 -      let mut nodes = create_network(2);
 -      let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
 +      let mut nodes = create_network(2, &[None, None]);
 +      let chan = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
  
        let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 1000000, TEST_FINAL_CLTV).unwrap();
        let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
  fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitment() {
        //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions:     MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
  
 -      let mut nodes = create_network(2);
 -      let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
 +      let mut nodes = create_network(2, &[None, None]);
 +      let chan = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
  
        let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 1000000, TEST_FINAL_CLTV).unwrap();
        let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
  fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() {
        //BOLT 2 Requirement: A receiving node: if the id does not correspond to an HTLC in its current commitment transaction MUST fail the channel.
  
 -      let nodes = create_network(2);
 -      create_announced_chan_between_nodes(&nodes, 0, 1);
 +      let nodes = create_network(2, &[None, None]);
 +      create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
  
        let our_payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 100000).0;
  
  fn test_update_fulfill_htlc_bolt2_wrong_preimage() {
        //BOLT 2 Requirement: A receiving node: if the payment_preimage value in update_fulfill_htlc doesn't SHA256 hash to the corresponding HTLC payment_hash MUST fail the channel.
  
 -      let nodes = create_network(2);
 -      create_announced_chan_between_nodes(&nodes, 0, 1);
 +      let nodes = create_network(2, &[None, None]);
 +      create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
  
        let our_payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 100000).0;
  
  fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_message() {
        //BOLT 2 Requirement: A receiving node: if the BADONION bit in failure_code is not set for update_fail_malformed_htlc MUST fail the channel.
  
 -      let mut nodes = create_network(2);
 -      create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000);
 +      let mut nodes = create_network(2, &[None, None]);
 +      create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, LocalFeatures::new(), LocalFeatures::new());
        let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 1000000, TEST_FINAL_CLTV).unwrap();
        let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
        nodes[0].node.send_payment(route, our_payment_hash).unwrap();
@@@ -5449,9 -5283,9 +5449,9 @@@ fn test_update_fulfill_htlc_bolt2_after
        //BOLT 2 Requirement: a receiving node which has an outgoing HTLC canceled by update_fail_malformed_htlc:
        //    * MUST return an error in the update_fail_htlc sent to the link which originally sent the HTLC, using the failure_code given and setting the data to sha256_of_onion.
  
 -      let mut nodes = create_network(3);
 -      create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000);
 -      create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1000000, 1000000);
 +      let mut nodes = create_network(3, &[None, None, None]);
 +      create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, LocalFeatures::new(), LocalFeatures::new());
 +      create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1000000, 1000000, LocalFeatures::new(), LocalFeatures::new());
  
        let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 100000, TEST_FINAL_CLTV).unwrap();
        let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
  
        check_added_monitors!(nodes[1], 1);
  }
 +
 +fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) {
 +      // Dust-HTLC failure updates must be delayed until failure-trigger tx (in this case local commitment) reach ANTI_REORG_DELAY
 +      // We can have at most two valid local commitment tx, so both cases must be covered, and both txs must be checked to get them all as
 +      // HTLC could have been removed from lastest local commitment tx but still valid until we get remote RAA
 +
 +      let nodes = create_network(2, &[None, None]);
 +      let chan =create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +
 +      let bs_dust_limit = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().our_dust_limit_satoshis;
 +
 +      // We route 2 dust-HTLCs between A and B
 +      let (_, payment_hash_1) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
 +      let (_, payment_hash_2) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
 +      route_payment(&nodes[0], &[&nodes[1]], 1000000);
 +
 +      // Cache one local commitment tx as previous
 +      let as_prev_commitment_tx = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn.clone();
 +
 +      // Fail one HTLC to prune it in the will-be-latest-local commitment tx
 +      assert!(nodes[1].node.fail_htlc_backwards(&payment_hash_2));
 +      check_added_monitors!(nodes[1], 0);
 +      expect_pending_htlcs_forwardable!(nodes[1]);
 +      check_added_monitors!(nodes[1], 1);
 +
 +      let remove = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +      nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &remove.update_fail_htlcs[0]).unwrap();
 +      nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &remove.commitment_signed).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +
 +      // Cache one local commitment tx as lastest
 +      let as_last_commitment_tx = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn.clone();
 +
 +      let events = nodes[0].node.get_and_clear_pending_msg_events();
 +      match events[0] {
 +              MessageSendEvent::SendRevokeAndACK { node_id, .. } => {
 +                      assert_eq!(node_id, nodes[1].node.get_our_node_id());
 +              },
 +              _ => panic!("Unexpected event"),
 +      }
 +      match events[1] {
 +              MessageSendEvent::UpdateHTLCs { node_id, .. } => {
 +                      assert_eq!(node_id, nodes[1].node.get_our_node_id());
 +              },
 +              _ => panic!("Unexpected event"),
 +      }
 +
 +      assert_ne!(as_prev_commitment_tx, as_last_commitment_tx);
 +      // Fail the 2 dust-HTLCs, move their failure in maturation buffer (htlc_updated_waiting_threshold_conf)
 +      let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +      if announce_latest {
 +              nodes[0].chain_monitor.block_connected_checked(&header, 1, &[&as_last_commitment_tx[0]], &[1; 1]);
 +      } else {
 +              nodes[0].chain_monitor.block_connected_checked(&header, 1, &[&as_prev_commitment_tx[0]], &[1; 1]);
 +      }
 +
 +      let events = nodes[0].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events.len(), 1);
 +      match events[0] {
 +              MessageSendEvent::BroadcastChannelUpdate { .. } => {},
 +              _ => panic!("Unexpected event"),
 +      }
 +
 +      assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
 +      connect_blocks(&nodes[0].chain_monitor, ANTI_REORG_DELAY - 1, 1, true,  header.bitcoin_hash());
 +      let events = nodes[0].node.get_and_clear_pending_events();
 +      // Only 2 PaymentFailed events should show up, over-dust HTLC has to be failed by timeout tx
 +      assert_eq!(events.len(), 2);
 +      let mut first_failed = false;
 +      for event in events {
 +              match event {
 +                      Event::PaymentFailed { payment_hash, .. } => {
 +                              if payment_hash == payment_hash_1 {
 +                                      assert!(!first_failed);
 +                                      first_failed = true;
 +                              } else {
 +                                      assert_eq!(payment_hash, payment_hash_2);
 +                              }
 +                      }
 +                      _ => panic!("Unexpected event"),
 +              }
 +      }
 +}
 +
 +#[test]
 +fn test_failure_delay_dust_htlc_local_commitment() {
 +      do_test_failure_delay_dust_htlc_local_commitment(true);
 +      do_test_failure_delay_dust_htlc_local_commitment(false);
 +}
 +
 +#[test]
 +fn test_no_failure_dust_htlc_local_commitment() {
 +      // Transaction filters for failing back dust htlc based on local commitment txn infos has been
 +      // prone to error, we test here that a dummy transaction don't fail them.
 +
 +      let nodes = create_network(2, &[None, None]);
 +      let chan = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +
 +      // Rebalance a bit
 +      send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
 +
 +      let as_dust_limit = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().our_dust_limit_satoshis;
 +      let bs_dust_limit = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().our_dust_limit_satoshis;
 +
 +      // We route 2 dust-HTLCs between A and B
 +      let (preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
 +      let (preimage_2, _) = route_payment(&nodes[1], &[&nodes[0]], as_dust_limit*1000);
 +
 +      // Build a dummy invalid transaction trying to spend a commitment tx
 +      let input = TxIn {
 +              previous_output: BitcoinOutPoint { txid: chan.3.txid(), vout: 0 },
 +              script_sig: Script::new(),
 +              sequence: 0,
 +              witness: Vec::new(),
 +      };
 +
 +      let outp = TxOut {
 +              script_pubkey: Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(),
 +              value: 10000,
 +      };
 +
 +      let dummy_tx = Transaction {
 +              version: 2,
 +              lock_time: 0,
 +              input: vec![input],
 +              output: vec![outp]
 +      };
 +
 +      let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +      nodes[0].chan_monitor.simple_monitor.block_connected(&header, 1, &[&dummy_tx], &[1;1]);
 +      assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
 +      assert_eq!(nodes[0].node.get_and_clear_pending_msg_events().len(), 0);
 +      // We broadcast a few more block to check everything is all right
 +      connect_blocks(&nodes[0].chain_monitor, 20, 1, true,  header.bitcoin_hash());
 +      assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
 +      assert_eq!(nodes[0].node.get_and_clear_pending_msg_events().len(), 0);
 +
 +      claim_payment(&nodes[0], &vec!(&nodes[1])[..], preimage_1);
 +      claim_payment(&nodes[1], &vec!(&nodes[0])[..], preimage_2);
 +}
 +
 +fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
 +      // Outbound HTLC-failure updates must be cancelled if we get a reorg before we reach ANTI_REORG_DELAY.
 +      // Broadcast of revoked remote commitment tx, trigger failure-update of dust/non-dust HTLCs
 +      // Broadcast of remote commitment tx, trigger failure-update of dust-HTLCs
 +      // Broadcast of timeout tx on remote commitment tx, trigger failure-udate of non-dust HTLCs
 +      // Broadcast of local commitment tx, trigger failure-update of dust-HTLCs
 +      // Broadcast of HTLC-timeout tx on local commitment tx, trigger failure-update of non-dust HTLCs
 +
 +      let nodes = create_network(3, &[None, None, None]);
 +      let chan = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 +
 +      let bs_dust_limit = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().our_dust_limit_satoshis;
 +
 +      let (_payment_preimage_1, dust_hash) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
 +      let (_payment_preimage_2, non_dust_hash) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
 +
 +      let as_commitment_tx = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn.clone();
 +      let bs_commitment_tx = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn.clone();
 +
 +      // We revoked bs_commitment_tx
 +      if revoked {
 +              let (payment_preimage_3, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
 +              claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3);
 +      }
 +
 +      let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +      let mut timeout_tx = Vec::new();
 +      if local {
 +              // We fail dust-HTLC 1 by broadcast of local commitment tx
 +              nodes[0].chain_monitor.block_connected_checked(&header, 1, &[&as_commitment_tx[0]], &[1; 1]);
 +              let events = nodes[0].node.get_and_clear_pending_msg_events();
 +              assert_eq!(events.len(), 1);
 +              match events[0] {
 +                      MessageSendEvent::BroadcastChannelUpdate { .. } => {},
 +                      _ => panic!("Unexpected event"),
 +              }
 +              assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
 +              timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0].clone());
 +              let parent_hash  = connect_blocks(&nodes[0].chain_monitor, ANTI_REORG_DELAY - 1, 2, true, header.bitcoin_hash());
 +              let events = nodes[0].node.get_and_clear_pending_events();
 +              assert_eq!(events.len(), 1);
 +              match events[0] {
 +                      Event::PaymentFailed { payment_hash, .. } => {
 +                              assert_eq!(payment_hash, dust_hash);
 +                      },
 +                      _ => panic!("Unexpected event"),
 +              }
 +              assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
 +              // We fail non-dust-HTLC 2 by broadcast of local HTLC-timeout tx on local commitment tx
 +              let header_2 = BlockHeader { version: 0x20000000, prev_blockhash: parent_hash, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +              assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
 +              nodes[0].chain_monitor.block_connected_checked(&header_2, 7, &[&timeout_tx[0]], &[1; 1]);
 +              let header_3 = BlockHeader { version: 0x20000000, prev_blockhash: header_2.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +              connect_blocks(&nodes[0].chain_monitor, ANTI_REORG_DELAY - 1, 8, true, header_3.bitcoin_hash());
 +              let events = nodes[0].node.get_and_clear_pending_events();
 +              assert_eq!(events.len(), 1);
 +              match events[0] {
 +                      Event::PaymentFailed { payment_hash, .. } => {
 +                              assert_eq!(payment_hash, non_dust_hash);
 +                      },
 +                      _ => panic!("Unexpected event"),
 +              }
 +      } else {
 +              // We fail dust-HTLC 1 by broadcast of remote commitment tx. If revoked, fail also non-dust HTLC
 +              nodes[0].chain_monitor.block_connected_checked(&header, 1, &[&bs_commitment_tx[0]], &[1; 1]);
 +              assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
 +              let events = nodes[0].node.get_and_clear_pending_msg_events();
 +              assert_eq!(events.len(), 1);
 +              match events[0] {
 +                      MessageSendEvent::BroadcastChannelUpdate { .. } => {},
 +                      _ => panic!("Unexpected event"),
 +              }
 +              timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0].clone());
 +              let parent_hash  = connect_blocks(&nodes[0].chain_monitor, ANTI_REORG_DELAY - 1, 2, true, header.bitcoin_hash());
 +              let header_2 = BlockHeader { version: 0x20000000, prev_blockhash: parent_hash, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +              if !revoked {
 +                      let events = nodes[0].node.get_and_clear_pending_events();
 +                      assert_eq!(events.len(), 1);
 +                      match events[0] {
 +                              Event::PaymentFailed { payment_hash, .. } => {
 +                                      assert_eq!(payment_hash, dust_hash);
 +                              },
 +                              _ => panic!("Unexpected event"),
 +                      }
 +                      assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
 +                      // We fail non-dust-HTLC 2 by broadcast of local timeout tx on remote commitment tx
 +                      nodes[0].chain_monitor.block_connected_checked(&header_2, 7, &[&timeout_tx[0]], &[1; 1]);
 +                      assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
 +                      let header_3 = BlockHeader { version: 0x20000000, prev_blockhash: header_2.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 +                      connect_blocks(&nodes[0].chain_monitor, ANTI_REORG_DELAY - 1, 8, true, header_3.bitcoin_hash());
 +                      let events = nodes[0].node.get_and_clear_pending_events();
 +                      assert_eq!(events.len(), 1);
 +                      match events[0] {
 +                              Event::PaymentFailed { payment_hash, .. } => {
 +                                      assert_eq!(payment_hash, non_dust_hash);
 +                              },
 +                              _ => panic!("Unexpected event"),
 +                      }
 +              } else {
 +                      // If revoked, both dust & non-dust HTLCs should have been failed after ANTI_REORG_DELAY confs of revoked
 +                      // commitment tx
 +                      let events = nodes[0].node.get_and_clear_pending_events();
 +                      assert_eq!(events.len(), 2);
 +                      let first;
 +                      match events[0] {
 +                              Event::PaymentFailed { payment_hash, .. } => {
 +                                      if payment_hash == dust_hash { first = true; }
 +                                      else { first = false; }
 +                              },
 +                              _ => panic!("Unexpected event"),
 +                      }
 +                      match events[1] {
 +                              Event::PaymentFailed { payment_hash, .. } => {
 +                                      if first { assert_eq!(payment_hash, non_dust_hash); }
 +                                      else { assert_eq!(payment_hash, dust_hash); }
 +                              },
 +                              _ => panic!("Unexpected event"),
 +                      }
 +              }
 +      }
 +}
 +
 +#[test]
 +fn test_sweep_outbound_htlc_failure_update() {
 +      do_test_sweep_outbound_htlc_failure_update(false, true);
 +      do_test_sweep_outbound_htlc_failure_update(false, false);
 +      do_test_sweep_outbound_htlc_failure_update(true, false);
 +}
 +
 +#[test]
 +fn test_upfront_shutdown_script() {
 +      // BOLT 2 : Option upfront shutdown script, if peer commit its closing_script at channel opening
 +      // enforce it at shutdown message
 +
 +      let mut config = UserConfig::new();
 +      config.channel_options.announced_channel = true;
 +      config.peer_channel_config_limits.force_announced_channel_preference = false;
 +      config.channel_options.commit_upfront_shutdown_pubkey = false;
 +      let nodes = create_network(3, &[None, Some(config), None]);
 +
 +      // We test that in case of peer committing upfront to a script, if it changes at closing, we refuse to sign
 +      let flags = LocalFeatures::new();
 +      let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1000000, 1000000, flags.clone(), flags.clone());
 +      nodes[0].node.close_channel(&OutPoint::new(chan.3.txid(), 0).to_channel_id()).unwrap();
 +      let mut node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[2].node.get_our_node_id());
 +      node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh();
 +      // Test we enforce upfront_scriptpbukey if by providing a diffrent one at closing that  we disconnect peer
 +      if let Err(error) = nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown) {
 +              if let Some(error) = error.action {
 +                      match error {
 +                              ErrorAction::SendErrorMessage { msg } => {
 +                                      assert_eq!(msg.data,"Got shutdown request with a scriptpubkey which did not match their previous scriptpubkey");
 +                              },
 +                              _ => { assert!(false); }
 +                      }
 +              } else { assert!(false); }
 +      } else { assert!(false); }
 +      let events = nodes[2].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events.len(), 1);
 +      match events[0] {
 +              MessageSendEvent::BroadcastChannelUpdate { .. } => {},
 +              _ => panic!("Unexpected event"),
 +      }
 +
 +      // We test that in case of peer committing upfront to a script, if it doesn't change at closing, we sign
 +      let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1000000, 1000000, flags.clone(), flags.clone());
 +      nodes[0].node.close_channel(&OutPoint::new(chan.3.txid(), 0).to_channel_id()).unwrap();
 +      let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[2].node.get_our_node_id());
 +      // We test that in case of peer committing upfront to a script, if it oesn't change at closing, we sign
 +      if let Ok(_) = nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown) {}
 +      else { assert!(false) }
 +      let events = nodes[2].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events.len(), 1);
 +      match events[0] {
 +              MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[0].node.get_our_node_id()) }
 +              _ => panic!("Unexpected event"),
 +      }
 +
 +      // We test that if case of peer non-signaling we don't enforce committed script at channel opening
 +      let mut flags_no = LocalFeatures::new();
 +      flags_no.unset_upfront_shutdown_script();
 +      let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, flags_no, flags.clone());
 +      nodes[0].node.close_channel(&OutPoint::new(chan.3.txid(), 0).to_channel_id()).unwrap();
 +      let mut node_1_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
 +      node_1_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh();
 +      if let Ok(_) = nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_1_shutdown) {}
 +      else { assert!(false) }
 +      let events = nodes[1].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events.len(), 1);
 +      match events[0] {
 +              MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[0].node.get_our_node_id()) }
 +              _ => panic!("Unexpected event"),
 +      }
 +
 +      // We test that if user opt-out, we provide a zero-length script at channel opening and we are able to close
 +      // channel smoothly, opt-out is from channel initiator here
 +      let chan = create_announced_chan_between_nodes_with_value(&nodes, 1, 0, 1000000, 1000000, flags.clone(), flags.clone());
 +      nodes[1].node.close_channel(&OutPoint::new(chan.3.txid(), 0).to_channel_id()).unwrap();
 +      let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
 +      node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh();
 +      if let Ok(_) = nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_0_shutdown) {}
 +      else { assert!(false) }
 +      let events = nodes[0].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events.len(), 1);
 +      match events[0] {
 +              MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) }
 +              _ => panic!("Unexpected event"),
 +      }
 +
 +      //// We test that if user opt-out, we provide a zero-length script at channel opening and we are able to close
 +      //// channel smoothly
 +      let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, flags.clone(), flags.clone());
 +      nodes[1].node.close_channel(&OutPoint::new(chan.3.txid(), 0).to_channel_id()).unwrap();
 +      let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
 +      node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh();
 +      if let Ok(_) = nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_0_shutdown) {}
 +      else { assert!(false) }
 +      let events = nodes[0].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events.len(), 2);
 +      match events[0] {
 +              MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) }
 +              _ => panic!("Unexpected event"),
 +      }
 +      match events[1] {
 +              MessageSendEvent::SendClosingSigned { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) }
 +              _ => panic!("Unexpected event"),
 +      }
 +}
 +
 +#[test]
 +fn test_user_configurable_csv_delay() {
 +      // We test our channel constructors yield errors when we pass them absurd csv delay
 +
 +      let mut low_our_to_self_config = UserConfig::new();
 +      low_our_to_self_config.own_channel_config.our_to_self_delay = 6;
 +      let mut high_their_to_self_config = UserConfig::new();
 +      high_their_to_self_config.peer_channel_config_limits.their_to_self_delay = 100;
 +      let nodes = create_network(2, &[Some(high_their_to_self_config.clone()), None]);
 +
 +      // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in Channel::new_outbound()
 +      let keys_manager: Arc<KeysInterface> = Arc::new(KeysManager::new(&nodes[0].node_seed, Network::Testnet, Arc::new(test_utils::TestLogger::new()), 10, 20));
 +      if let Err(error) = Channel::new_outbound(&test_utils::TestFeeEstimator { sat_per_kw: 253 }, &keys_manager, nodes[1].node.get_our_node_id(), 1000000, 1000000, 0, Arc::new(test_utils::TestLogger::new()), &low_our_to_self_config) {
 +              match error {
 +                      APIError::APIMisuseError { err } => { assert_eq!(err, "Configured with an unreasonable our_to_self_delay putting user funds at risks"); },
 +                      _ => panic!("Unexpected event"),
 +              }
 +      } else { assert!(false) }
 +
 +      // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in Channel::new_from_req()
 +      nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42).unwrap();
 +      let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
 +      open_channel.to_self_delay = 200;
 +      if let Err(error) = Channel::new_from_req(&test_utils::TestFeeEstimator { sat_per_kw: 253 }, &keys_manager, nodes[1].node.get_our_node_id(), LocalFeatures::new(), &open_channel, 0, Arc::new(test_utils::TestLogger::new()), &low_our_to_self_config) {
 +              match error {
 +                      ChannelError::Close(err) => { assert_eq!(err, "Configured with an unreasonable our_to_self_delay putting user funds at risks"); },
 +                      _ => panic!("Unexpected event"),
 +              }
 +      } else { assert!(false); }
 +
 +      // We test msg.to_self_delay <= config.their_to_self_delay is enforced in Chanel::accept_channel()
 +      nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1000000, 1000000, 42).unwrap();
 +      nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), LocalFeatures::new(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id())).unwrap();
 +      let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
 +      accept_channel.to_self_delay = 200;
 +      if let Err(error) = nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), LocalFeatures::new(), &accept_channel) {
 +              if let Some(error) = error.action {
 +                      match error {
 +                              ErrorAction::SendErrorMessage { msg } => {
 +                                      assert_eq!(msg.data,"They wanted our payments to be delayed by a needlessly long period");
 +                              },
 +                              _ => { assert!(false); }
 +                      }
 +              } else { assert!(false); }
 +      } else { assert!(false); }
 +
 +      // We test msg.to_self_delay <= config.their_to_self_delay is enforced in Channel::new_from_req()
 +      nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42).unwrap();
 +      let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
 +      open_channel.to_self_delay = 200;
 +      if let Err(error) = Channel::new_from_req(&test_utils::TestFeeEstimator { sat_per_kw: 253 }, &keys_manager, nodes[1].node.get_our_node_id(), LocalFeatures::new(), &open_channel, 0, Arc::new(test_utils::TestLogger::new()), &high_their_to_self_config) {
 +              match error {
 +                      ChannelError::Close(err) => { assert_eq!(err, "They wanted our payments to be delayed by a needlessly long period"); },
 +                      _ => panic!("Unexpected event"),
 +              }
 +      } else { assert!(false); }
 +}