Merge pull request #1475 from atalw/2022-04-paymentforwarded-event
authorvalentinewallace <valentinewallace@users.noreply.github.com>
Mon, 16 May 2022 18:21:39 +0000 (14:21 -0400)
committerGitHub <noreply@github.com>
Mon, 16 May 2022 18:21:39 +0000 (14:21 -0400)
Expose `next_channel_id` in `PaymentForwarded` event

1  2 
fuzz/src/chanmon_consistency.rs
lightning/src/chain/chainmonitor.rs
lightning/src/chain/mod.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/functional_tests.rs
lightning/src/ln/reorg_tests.rs
lightning/src/ln/shutdown_tests.rs
lightning/src/util/events.rs
lightning/src/util/test_utils.rs

index 8472f8fa6278a4e158e810a600895545cbbd27ae,7327f37db4891ea03ecab0b5a1657f4fb3325ef2..5db658c5818195866843b1a2dc1d386507a3a633
@@@ -53,8 -53,8 +53,8 @@@ use lightning::routing::router::{Route
  use utils::test_logger::{self, Output};
  use utils::test_persister::TestPersister;
  
 -use bitcoin::secp256k1::key::{PublicKey,SecretKey};
 -use bitcoin::secp256k1::recovery::RecoverableSignature;
 +use bitcoin::secp256k1::{PublicKey,SecretKey};
 +use bitcoin::secp256k1::ecdsa::RecoverableSignature;
  use bitcoin::secp256k1::Secp256k1;
  
  use std::mem;
@@@ -148,7 -148,7 +148,7 @@@ impl chain::Watch<EnforcingSigner> for 
                self.chain_monitor.update_channel(funding_txo, update)
        }
  
-       fn release_pending_monitor_events(&self) -> Vec<MonitorEvent> {
+       fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>)> {
                return self.chain_monitor.release_pending_monitor_events();
        }
  }
index aae260e735bdbae5c0a538af8024e7e2ef2a78cb,e05c7e031214537b3f56ae3e56052c6800ed117d..503e6bdee0669551d1853932447a5db08fc92c17
@@@ -23,7 -23,7 +23,7 @@@
  //! events. The remote server would make use of [`ChainMonitor`] for block processing and for
  //! servicing [`ChannelMonitor`] updates from the client.
  
 -use bitcoin::blockdata::block::{Block, BlockHeader};
 +use bitcoin::blockdata::block::BlockHeader;
  use bitcoin::hash_types::Txid;
  
  use chain;
@@@ -235,7 -235,7 +235,7 @@@ pub struct ChainMonitor<ChannelSigner: 
        persister: P,
        /// "User-provided" (ie persistence-completion/-failed) [`MonitorEvent`]s. These came directly
        /// from the user and not from a [`ChannelMonitor`].
-       pending_monitor_events: Mutex<Vec<MonitorEvent>>,
+       pending_monitor_events: Mutex<Vec<(OutPoint, Vec<MonitorEvent>)>>,
        /// The best block height seen, used as a proxy for the passage of time.
        highest_chain_height: AtomicUsize,
  }
@@@ -299,7 -299,7 +299,7 @@@ where C::Target: chain::Filter
                                                        log_trace!(self.logger, "Finished syncing Channel Monitor for channel {}", log_funding_info!(monitor)),
                                                Err(ChannelMonitorUpdateErr::PermanentFailure) => {
                                                        monitor_state.channel_perm_failed.store(true, Ordering::Release);
-                                                       self.pending_monitor_events.lock().unwrap().push(MonitorEvent::UpdateFailed(*funding_outpoint));
+                                                       self.pending_monitor_events.lock().unwrap().push((*funding_outpoint, vec![MonitorEvent::UpdateFailed(*funding_outpoint)]));
                                                },
                                                Err(ChannelMonitorUpdateErr::TemporaryFailure) => {
                                                        log_debug!(self.logger, "Channel Monitor sync for channel {} in progress, holding events until completion!", log_funding_info!(monitor));
                                        // UpdateCompleted event.
                                        return Ok(());
                                }
-                               self.pending_monitor_events.lock().unwrap().push(MonitorEvent::UpdateCompleted {
+                               self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::UpdateCompleted {
                                        funding_txo,
                                        monitor_update_id: monitor_data.monitor.get_latest_update_id(),
-                               });
+                               }]));
                        },
                        MonitorUpdateId { contents: UpdateOrigin::ChainSync(_) } => {
                                if !monitor_data.has_pending_chainsync_updates(&pending_monitor_updates) {
        /// channel_monitor_updated once with the highest ID.
        #[cfg(any(test, fuzzing))]
        pub fn force_channel_monitor_updated(&self, funding_txo: OutPoint, monitor_update_id: u64) {
-               self.pending_monitor_events.lock().unwrap().push(MonitorEvent::UpdateCompleted {
+               self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::UpdateCompleted {
                        funding_txo,
                        monitor_update_id,
-               });
+               }]));
        }
  
        #[cfg(any(test, fuzzing, feature = "_test_utils"))]
@@@ -501,7 -501,9 +501,7 @@@ wher
        L::Target: Logger,
        P::Target: Persist<ChannelSigner>,
  {
 -      fn block_connected(&self, block: &Block, height: u32) {
 -              let header = &block.header;
 -              let txdata: Vec<_> = block.txdata.iter().enumerate().collect();
 +      fn filtered_block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
                log_debug!(self.logger, "New best block {} at height {} provided via block_connected", header.block_hash(), height);
                self.process_chain_data(header, Some(height), &txdata, |monitor, txdata| {
                        monitor.block_connected(
@@@ -666,7 -668,7 +666,7 @@@ where C::Target: chain::Filter
                }
        }
  
-       fn release_pending_monitor_events(&self) -> Vec<MonitorEvent> {
+       fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>)> {
                let mut pending_monitor_events = self.pending_monitor_events.lock().unwrap().split_off(0);
                for monitor_state in self.monitors.read().unwrap().values() {
                        let is_pending_monitor_update = monitor_state.has_pending_chainsync_updates(&monitor_state.pending_monitor_updates.lock().unwrap());
                                        log_error!(self.logger, "   To avoid funds-loss, we are allowing monitor updates to be released.");
                                        log_error!(self.logger, "   This may cause duplicate payment events to be generated.");
                                }
-                               pending_monitor_events.append(&mut monitor_state.monitor.get_and_clear_pending_monitor_events());
+                               let monitor_events = monitor_state.monitor.get_and_clear_pending_monitor_events();
+                               if monitor_events.len() > 0 {
+                                       let monitor_outpoint = monitor_state.monitor.get_funding_txo().0;
+                                       pending_monitor_events.push((monitor_outpoint, monitor_events));
+                               }
                        }
                }
                pending_monitor_events
index 24eb09e7090b212ea9867aba9844ed6993c83735,0bc205fa16afacfcc3196e4caef3fa3fcb043a56..48aa712f39c1d55263f7cd5129e0b3cce3690d41
@@@ -87,20 -87,9 +87,20 @@@ pub trait Access 
  /// sourcing chain data using a block-oriented API should prefer this interface over [`Confirm`].
  /// Such clients fetch the entire header chain whereas clients using [`Confirm`] only fetch headers
  /// when needed.
 +///
 +/// By using [`Listen::filtered_block_connected`] this interface supports clients fetching the
 +/// entire header chain and only blocks with matching transaction data using BIP 157 filters or
 +/// other similar filtering.
  pub trait Listen {
 +      /// Notifies the listener that a block was added at the given height, with the transaction data
 +      /// possibly filtered.
 +      fn filtered_block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32);
 +
        /// Notifies the listener that a block was added at the given height.
 -      fn block_connected(&self, block: &Block, height: u32);
 +      fn block_connected(&self, block: &Block, height: u32) {
 +              let txdata: Vec<_> = block.txdata.iter().enumerate().collect();
 +              self.filtered_block_connected(&block.header, &txdata, height);
 +      }
  
        /// Notifies the listener that a block was removed at the given height.
        fn block_disconnected(&self, header: &BlockHeader, height: u32);
@@@ -302,7 -291,7 +302,7 @@@ pub trait Watch<ChannelSigner: Sign> 
        ///
        /// For details on asynchronous [`ChannelMonitor`] updating and returning
        /// [`MonitorEvent::UpdateCompleted`] here, see [`ChannelMonitorUpdateErr::TemporaryFailure`].
-       fn release_pending_monitor_events(&self) -> Vec<MonitorEvent>;
+       fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>)>;
  }
  
  /// The `Filter` trait defines behavior for indicating chain activity of interest pertaining to
@@@ -366,8 -355,8 +366,8 @@@ pub struct WatchedOutput 
  }
  
  impl<T: Listen> Listen for core::ops::Deref<Target = T> {
 -      fn block_connected(&self, block: &Block, height: u32) {
 -              (**self).block_connected(block, height);
 +      fn filtered_block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
 +              (**self).filtered_block_connected(header, txdata, height);
        }
  
        fn block_disconnected(&self, header: &BlockHeader, height: u32) {
@@@ -380,9 -369,9 +380,9 @@@ wher
        T::Target: Listen,
        U::Target: Listen,
  {
 -      fn block_connected(&self, block: &Block, height: u32) {
 -              self.0.block_connected(block, height);
 -              self.1.block_connected(block, height);
 +      fn filtered_block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
 +              self.0.filtered_block_connected(header, txdata, height);
 +              self.1.filtered_block_connected(header, txdata, height);
        }
  
        fn block_disconnected(&self, header: &BlockHeader, height: u32) {
index 066cddd42ccdb0245fbadbfba08eea89d6c498b4,fb3f71ee60a2bc493a821709e804ba9ff4c593b2..ac9e26b3f0d5498cda4e370212d85c25c79f6a6b
@@@ -18,7 -18,7 +18,7 @@@
  //! imply it needs to fail HTLCs/payments/channels it manages).
  //!
  
 -use bitcoin::blockdata::block::{Block, BlockHeader};
 +use bitcoin::blockdata::block::BlockHeader;
  use bitcoin::blockdata::transaction::Transaction;
  use bitcoin::blockdata::constants::genesis_block;
  use bitcoin::network::constants::Network;
@@@ -28,7 -28,7 +28,7 @@@ use bitcoin::hashes::sha256::Hash as Sh
  use bitcoin::hashes::sha256d::Hash as Sha256dHash;
  use bitcoin::hash_types::{BlockHash, Txid};
  
 -use bitcoin::secp256k1::key::{SecretKey,PublicKey};
 +use bitcoin::secp256k1::{SecretKey,PublicKey};
  use bitcoin::secp256k1::Secp256k1;
  use bitcoin::secp256k1::ecdh::SharedSecret;
  use bitcoin::secp256k1;
@@@ -48,7 -48,6 +48,7 @@@ use ln::msgs
  use ln::msgs::NetAddress;
  use ln::onion_utils;
  use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, MAX_VALUE_MSAT, OptionalField};
 +use ln::wire::Encode;
  use chain::keysinterface::{Sign, KeysInterface, KeysManager, InMemorySigner, Recipient};
  use util::config::UserConfig;
  use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
@@@ -160,26 -159,20 +160,26 @@@ pub(crate) struct HTLCPreviousHopData 
  }
  
  enum OnionPayload {
 -      /// Contains a total_msat (which may differ from value if this is a Multi-Path Payment) and a
 -      /// payment_secret which prevents path-probing attacks and can associate different HTLCs which
 -      /// are part of the same payment.
 -      Invoice(msgs::FinalOnionHopData),
 +      /// Indicates this incoming onion payload is for the purpose of paying an invoice.
 +      Invoice {
 +              /// This is only here for backwards-compatibility in serialization, in the future it can be
 +              /// removed, breaking clients running 0.0.106 and earlier.
 +              _legacy_hop_data: msgs::FinalOnionHopData,
 +      },
        /// Contains the payer-provided preimage.
        Spontaneous(PaymentPreimage),
  }
  
 +/// HTLCs that are to us and can be failed/claimed by the user
  struct ClaimableHTLC {
        prev_hop: HTLCPreviousHopData,
        cltv_expiry: u32,
 +      /// The amount (in msats) of this MPP part
        value: u64,
        onion_payload: OnionPayload,
        timer_ticks: u8,
 +      /// The sum total of all MPP parts
 +      total_msat: u64,
  }
  
  /// A payment identifier used to uniquely identify a payment to LDK.
@@@ -1012,13 -1005,6 +1012,13 @@@ pub struct ChannelDetails 
        /// conflict-avoidance policy, exactly this amount is not likely to be spendable. However, we
        /// should be able to spend nearly this amount.
        pub outbound_capacity_msat: u64,
 +      /// The available outbound capacity for sending a single HTLC to the remote peer. This is
 +      /// similar to [`ChannelDetails::outbound_capacity_msat`] but it may be further restricted by
 +      /// the current state and per-HTLC limit(s). This is intended for use when routing, allowing us
 +      /// to use a limit as close as possible to the HTLC limit we can currently send.
 +      ///
 +      /// See also [`ChannelDetails::balance_msat`] and [`ChannelDetails::outbound_capacity_msat`].
 +      pub next_outbound_htlc_limit_msat: u64,
        /// The available inbound capacity for the remote peer to send HTLCs to us. This does not
        /// include any pending HTLCs which are not yet fully resolved (and, thus, whose balance is not
        /// available for inclusion in new inbound HTLCs).
@@@ -1523,6 -1509,8 +1523,6 @@@ impl<Signer: Sign, M: Deref, T: Deref, 
        ///
        /// Non-proportional fees are fixed according to our risk using the provided fee estimator.
        ///
 -      /// panics if channel_value_satoshis is >= `MAX_FUNDING_SATOSHIS`!
 -      ///
        /// Users need to notify the new ChannelManager when a new block is connected or
        /// disconnected using its `block_connected` and `block_disconnected` methods, starting
        /// from after `params.latest_hash`.
                        let channel_state = self.channel_state.lock().unwrap();
                        res.reserve(channel_state.by_id.len());
                        for (channel_id, channel) in channel_state.by_id.iter().filter(f) {
 -                              let (inbound_capacity_msat, outbound_capacity_msat) = channel.get_inbound_outbound_available_balance_msat();
 -                              let balance_msat = channel.get_balance_msat();
 +                              let balance = channel.get_available_balances();
                                let (to_remote_reserve_satoshis, to_self_reserve_satoshis) =
                                        channel.get_holder_counterparty_selected_channel_reserve_satoshis();
                                res.push(ChannelDetails {
                                        inbound_scid_alias: channel.latest_inbound_scid_alias(),
                                        channel_value_satoshis: channel.get_value_satoshis(),
                                        unspendable_punishment_reserve: to_self_reserve_satoshis,
 -                                      balance_msat,
 -                                      inbound_capacity_msat,
 -                                      outbound_capacity_msat,
 +                                      balance_msat: balance.balance_msat,
 +                                      inbound_capacity_msat: balance.inbound_capacity_msat,
 +                                      outbound_capacity_msat: balance.outbound_capacity_msat,
 +                                      next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat,
                                        user_channel_id: channel.get_user_id(),
                                        confirmations_required: channel.minimum_depth(),
                                        force_close_spend_delay: channel.get_counterparty_selected_contest_delay(),
                        return_malformed_err!("invalid ephemeral pubkey", 0x8000 | 0x4000 | 6);
                }
  
 -              let shared_secret = {
 -                      let mut arr = [0; 32];
 -                      arr.copy_from_slice(&SharedSecret::new(&msg.onion_routing_packet.public_key.unwrap(), &self.our_network_key)[..]);
 -                      arr
 -              };
 +              let shared_secret = SharedSecret::new(&msg.onion_routing_packet.public_key.unwrap(), &self.our_network_key).secret_bytes();
  
                if msg.onion_routing_packet.version != 0 {
                        //TODO: Spec doesn't indicate if we should only hash hop_data here (and in other
                                        break None;
                                }
                                {
 -                                      let mut res = VecWriter(Vec::with_capacity(chan_update.serialized_length() + 8 + 2));
 +                                      let mut res = VecWriter(Vec::with_capacity(chan_update.serialized_length() + 2 + 8 + 2));
                                        if let Some(chan_update) = chan_update {
                                                if code == 0x1000 | 11 || code == 0x1000 | 12 {
                                                        msg.amount_msat.write(&mut res).expect("Writes cannot fail");
                                                        // TODO: underspecified, follow https://github.com/lightningnetwork/lightning-rfc/issues/791
                                                        0u16.write(&mut res).expect("Writes cannot fail");
                                                }
 -                                              (chan_update.serialized_length() as u16).write(&mut res).expect("Writes cannot fail");
 +                                              (chan_update.serialized_length() as u16 + 2).write(&mut res).expect("Writes cannot fail");
 +                                              msgs::ChannelUpdate::TYPE.write(&mut res).expect("Writes cannot fail");
                                                chan_update.write(&mut res).expect("Writes cannot fail");
                                        }
                                        return_err!(err, code, &res.0[..]);
                };
  
                let msg_hash = Sha256dHash::hash(&unsigned.encode()[..]);
 -              let sig = self.secp_ctx.sign(&hash_to_message!(&msg_hash[..]), &self.our_network_key);
 +              let sig = self.secp_ctx.sign_ecdsa(&hash_to_message!(&msg_hash[..]), &self.our_network_key);
  
                Ok(msgs::ChannelUpdate {
                        signature: sig,
                                                                                        if let PendingHTLCRouting::Forward { onion_packet, .. } = routing {
                                                                                                let phantom_secret_res = self.keys_manager.get_node_secret(Recipient::PhantomNode);
                                                                                                if phantom_secret_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id) {
 -                                                                                                      let phantom_shared_secret = {
 -                                                                                                              let mut arr = [0; 32];
 -                                                                                                              arr.copy_from_slice(&SharedSecret::new(&onion_packet.public_key.unwrap(), &phantom_secret_res.unwrap())[..]);
 -                                                                                                              arr
 -                                                                                                      };
 +                                                                                                      let phantom_shared_secret = SharedSecret::new(&onion_packet.public_key.unwrap(), &phantom_secret_res.unwrap()).secret_bytes();
                                                                                                        let next_hop = match onion_utils::decode_next_hop(phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac, payment_hash) {
                                                                                                                Ok(res) => res,
                                                                                                                Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => {
                                                        HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo {
                                                                        routing, incoming_shared_secret, payment_hash, amt_to_forward, .. },
                                                                        prev_funding_outpoint } => {
 -                                                              let (cltv_expiry, onion_payload, phantom_shared_secret) = match routing {
 -                                                                      PendingHTLCRouting::Receive { payment_data, incoming_cltv_expiry, phantom_shared_secret } =>
 -                                                                              (incoming_cltv_expiry, OnionPayload::Invoice(payment_data), phantom_shared_secret),
 +                                                              let (cltv_expiry, onion_payload, payment_data, phantom_shared_secret) = match routing {
 +                                                                      PendingHTLCRouting::Receive { payment_data, incoming_cltv_expiry, phantom_shared_secret } => {
 +                                                                              let _legacy_hop_data = payment_data.clone();
 +                                                                              (incoming_cltv_expiry, OnionPayload::Invoice { _legacy_hop_data }, Some(payment_data), phantom_shared_secret)
 +                                                                      },
                                                                        PendingHTLCRouting::ReceiveKeysend { payment_preimage, incoming_cltv_expiry } =>
 -                                                                              (incoming_cltv_expiry, OnionPayload::Spontaneous(payment_preimage), None),
 +                                                                              (incoming_cltv_expiry, OnionPayload::Spontaneous(payment_preimage), None, None),
                                                                        _ => {
                                                                                panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive");
                                                                        }
                                                                        },
                                                                        value: amt_to_forward,
                                                                        timer_ticks: 0,
 +                                                                      total_msat: if let Some(data) = &payment_data { data.total_msat } else { amt_to_forward },
                                                                        cltv_expiry,
                                                                        onion_payload,
                                                                };
                                                                }
  
                                                                macro_rules! check_total_value {
 -                                                                      ($payment_data_total_msat: expr, $payment_secret: expr, $payment_preimage: expr) => {{
 +                                                                      ($payment_data: expr, $payment_preimage: expr) => {{
                                                                                let mut payment_received_generated = false;
                                                                                let htlcs = channel_state.claimable_htlcs.entry(payment_hash)
                                                                                        .or_insert(Vec::new());
                                                                                for htlc in htlcs.iter() {
                                                                                        total_value += htlc.value;
                                                                                        match &htlc.onion_payload {
 -                                                                                              OnionPayload::Invoice(htlc_payment_data) => {
 -                                                                                                      if htlc_payment_data.total_msat != $payment_data_total_msat {
 +                                                                                              OnionPayload::Invoice { .. } => {
 +                                                                                                      if htlc.total_msat != $payment_data.total_msat {
                                                                                                                log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the HTLCs had inconsistent total values (eg {} and {})",
 -                                                                                                                      log_bytes!(payment_hash.0), $payment_data_total_msat, htlc_payment_data.total_msat);
 +                                                                                                                      log_bytes!(payment_hash.0), $payment_data.total_msat, htlc.total_msat);
                                                                                                                total_value = msgs::MAX_VALUE_MSAT;
                                                                                                        }
                                                                                                        if total_value >= msgs::MAX_VALUE_MSAT { break; }
                                                                                                _ => unreachable!(),
                                                                                        }
                                                                                }
 -                                                                              if total_value >= msgs::MAX_VALUE_MSAT || total_value > $payment_data_total_msat {
 +                                                                              if total_value >= msgs::MAX_VALUE_MSAT || total_value > $payment_data.total_msat {
                                                                                        log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the total value {} ran over expected value {} (or HTLCs were inconsistent)",
 -                                                                                              log_bytes!(payment_hash.0), total_value, $payment_data_total_msat);
 +                                                                                              log_bytes!(payment_hash.0), total_value, $payment_data.total_msat);
                                                                                        fail_htlc!(claimable_htlc);
 -                                                                              } else if total_value == $payment_data_total_msat {
 +                                                                              } else if total_value == $payment_data.total_msat {
                                                                                        htlcs.push(claimable_htlc);
                                                                                        new_events.push(events::Event::PaymentReceived {
                                                                                                payment_hash,
                                                                                                purpose: events::PaymentPurpose::InvoicePayment {
                                                                                                        payment_preimage: $payment_preimage,
 -                                                                                                      payment_secret: $payment_secret,
 +                                                                                                      payment_secret: $payment_data.payment_secret,
                                                                                                },
                                                                                                amt: total_value,
                                                                                        });
                                                                match payment_secrets.entry(payment_hash) {
                                                                        hash_map::Entry::Vacant(_) => {
                                                                                match claimable_htlc.onion_payload {
 -                                                                                      OnionPayload::Invoice(ref payment_data) => {
 -                                                                                              let payment_preimage = match inbound_payment::verify(payment_hash, payment_data.clone(), self.highest_seen_timestamp.load(Ordering::Acquire) as u64, &self.inbound_payment_key, &self.logger) {
 +                                                                                      OnionPayload::Invoice { .. } => {
 +                                                                                              let payment_data = payment_data.unwrap();
 +                                                                                              let payment_preimage = match inbound_payment::verify(payment_hash, &payment_data, self.highest_seen_timestamp.load(Ordering::Acquire) as u64, &self.inbound_payment_key, &self.logger) {
                                                                                                        Ok(payment_preimage) => payment_preimage,
                                                                                                        Err(()) => {
                                                                                                                fail_htlc!(claimable_htlc);
                                                                                                                continue
                                                                                                        }
                                                                                                };
 -                                                                                              let payment_data_total_msat = payment_data.total_msat;
 -                                                                                              let payment_secret = payment_data.payment_secret.clone();
 -                                                                                              check_total_value!(payment_data_total_msat, payment_secret, payment_preimage);
 +                                                                                              check_total_value!(payment_data, payment_preimage);
                                                                                        },
                                                                                        OnionPayload::Spontaneous(preimage) => {
                                                                                                match channel_state.claimable_htlcs.entry(payment_hash) {
                                                                                }
                                                                        },
                                                                        hash_map::Entry::Occupied(inbound_payment) => {
 -                                                                              let payment_data =
 -                                                                                      if let OnionPayload::Invoice(ref data) = claimable_htlc.onion_payload {
 -                                                                                              data.clone()
 -                                                                                      } else {
 -                                                                                              log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} because we already have an inbound payment with the same payment hash", log_bytes!(payment_hash.0));
 -                                                                                              fail_htlc!(claimable_htlc);
 -                                                                                              continue
 -                                                                                      };
 +                                                                              if payment_data.is_none() {
 +                                                                                      log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} because we already have an inbound payment with the same payment hash", log_bytes!(payment_hash.0));
 +                                                                                      fail_htlc!(claimable_htlc);
 +                                                                                      continue
 +                                                                              };
 +                                                                              let payment_data = payment_data.unwrap();
                                                                                if inbound_payment.get().payment_secret != payment_data.payment_secret {
                                                                                        log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our expected payment secret.", log_bytes!(payment_hash.0));
                                                                                        fail_htlc!(claimable_htlc);
                                                                                                log_bytes!(payment_hash.0), payment_data.total_msat, inbound_payment.get().min_value_msat.unwrap());
                                                                                        fail_htlc!(claimable_htlc);
                                                                                } else {
 -                                                                                      let payment_received_generated = check_total_value!(payment_data.total_msat, payment_data.payment_secret, inbound_payment.get().payment_preimage);
 +                                                                                      let payment_received_generated = check_total_value!(payment_data, inbound_payment.get().payment_preimage);
                                                                                        if payment_received_generated {
                                                                                                inbound_payment.remove_entry();
                                                                                        }
                                                debug_assert!(false);
                                                return false;
                                        }
 -                                      if let OnionPayload::Invoice(ref final_hop_data) = htlcs[0].onion_payload {
 +                                      if let OnionPayload::Invoice { .. } = htlcs[0].onion_payload {
                                                // Check if we've received all the parts we need for an MPP (the value of the parts adds to total_msat).
                                                // In this case we're not going to handle any timeouts of the parts here.
 -                                              if final_hop_data.total_msat == htlcs.iter().fold(0, |total, htlc| total + htlc.value) {
 +                                              if htlcs[0].total_msat == htlcs.iter().fold(0, |total, htlc| total + htlc.value) {
                                                        return true;
                                                } else if htlcs.into_iter().any(|htlc| {
                                                        htlc.timer_ticks += 1;
        fn get_htlc_temp_fail_err_and_data(&self, desired_err_code: u16, scid: u64, chan: &Channel<Signer>) -> (u16, Vec<u8>) {
                debug_assert_eq!(desired_err_code & 0x1000, 0x1000);
                if let Ok(upd) = self.get_channel_update_for_onion(scid, chan) {
 -                      let mut enc = VecWriter(Vec::with_capacity(upd.serialized_length() + 4));
 +                      let mut enc = VecWriter(Vec::with_capacity(upd.serialized_length() + 6));
                        if desired_err_code == 0x1000 | 20 {
                                // TODO: underspecified, follow https://github.com/lightning/bolts/issues/791
                                0u16.write(&mut enc).expect("Writes cannot fail");
                        }
 -                      (upd.serialized_length() as u16).write(&mut enc).expect("Writes cannot fail");
 +                      (upd.serialized_length() as u16 + 2).write(&mut enc).expect("Writes cannot fail");
 +                      msgs::ChannelUpdate::TYPE.write(&mut enc).expect("Writes cannot fail");
                        upd.write(&mut enc).expect("Writes cannot fail");
                        (desired_err_code, enc.0)
                } else {
                }
        }
  
-       fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<Signer>>, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option<u64>, from_onchain: bool) {
+       fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<Signer>>, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option<u64>, from_onchain: bool, next_channel_id: [u8; 32]) {
                match source {
                        HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
                                mem::drop(channel_state_lock);
                                                } else { None };
  
                                                let mut pending_events = self.pending_events.lock().unwrap();
+                                               let prev_channel_id = Some(prev_outpoint.to_channel_id());
+                                               let next_channel_id = Some(next_channel_id);
  
-                                               let source_channel_id = Some(prev_outpoint.to_channel_id());
                                                pending_events.push(events::Event::PaymentForwarded {
-                                                       source_channel_id,
                                                        fee_earned_msat,
                                                        claim_from_onchain_tx: from_onchain,
+                                                       prev_channel_id,
+                                                       next_channel_id,
                                                });
                                        }
                                }
                                hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                        }
                };
-               self.claim_funds_internal(channel_lock, htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false);
+               self.claim_funds_internal(channel_lock, htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, msg.channel_id);
                Ok(())
        }
  
                let mut failed_channels = Vec::new();
                let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events();
                let has_pending_monitor_events = !pending_monitor_events.is_empty();
-               for monitor_event in pending_monitor_events.drain(..) {
-                       match monitor_event {
-                               MonitorEvent::HTLCEvent(htlc_update) => {
-                                       if let Some(preimage) = htlc_update.payment_preimage {
-                                               log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
-                                               self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage, htlc_update.onchain_value_satoshis.map(|v| v * 1000), true);
-                                       } else {
-                                               log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
-                                               self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
-                                       }
-                               },
-                               MonitorEvent::CommitmentTxConfirmed(funding_outpoint) |
-                               MonitorEvent::UpdateFailed(funding_outpoint) => {
-                                       let mut channel_lock = self.channel_state.lock().unwrap();
-                                       let channel_state = &mut *channel_lock;
-                                       let by_id = &mut channel_state.by_id;
-                                       let pending_msg_events = &mut channel_state.pending_msg_events;
-                                       if let hash_map::Entry::Occupied(chan_entry) = by_id.entry(funding_outpoint.to_channel_id()) {
-                                               let mut chan = remove_channel!(self, channel_state, chan_entry);
-                                               failed_channels.push(chan.force_shutdown(false));
-                                               if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
-                                                       pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
-                                                               msg: update
+               for (funding_outpoint, mut monitor_events) in pending_monitor_events.drain(..) {
+                       for monitor_event in monitor_events.drain(..) {
+                               match monitor_event {
+                                       MonitorEvent::HTLCEvent(htlc_update) => {
+                                               if let Some(preimage) = htlc_update.payment_preimage {
+                                                       log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
+                                                       self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage, htlc_update.onchain_value_satoshis.map(|v| v * 1000), true, funding_outpoint.to_channel_id());
+                                               } else {
+                                                       log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
+                                                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+                                               }
+                                       },
+                                       MonitorEvent::CommitmentTxConfirmed(funding_outpoint) |
+                                       MonitorEvent::UpdateFailed(funding_outpoint) => {
+                                               let mut channel_lock = self.channel_state.lock().unwrap();
+                                               let channel_state = &mut *channel_lock;
+                                               let by_id = &mut channel_state.by_id;
+                                               let pending_msg_events = &mut channel_state.pending_msg_events;
+                                               if let hash_map::Entry::Occupied(chan_entry) = by_id.entry(funding_outpoint.to_channel_id()) {
+                                                       let mut chan = remove_channel!(self, channel_state, chan_entry);
+                                                       failed_channels.push(chan.force_shutdown(false));
+                                                       if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
+                                                               pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                                                       msg: update
+                                                               });
+                                                       }
+                                                       let reason = if let MonitorEvent::UpdateFailed(_) = monitor_event {
+                                                               ClosureReason::ProcessingError { err: "Failed to persist ChannelMonitor update during chain sync".to_string() }
+                                                       } else {
+                                                               ClosureReason::CommitmentTxConfirmed
+                                                       };
+                                                       self.issue_channel_close_events(&chan, reason);
+                                                       pending_msg_events.push(events::MessageSendEvent::HandleError {
+                                                               node_id: chan.get_counterparty_node_id(),
+                                                               action: msgs::ErrorAction::SendErrorMessage {
+                                                                       msg: msgs::ErrorMessage { channel_id: chan.channel_id(), data: "Channel force-closed".to_owned() }
+                                                               },
                                                        });
                                                }
-                                               let reason = if let MonitorEvent::UpdateFailed(_) = monitor_event {
-                                                       ClosureReason::ProcessingError { err: "Failed to persist ChannelMonitor update during chain sync".to_string() }
-                                               } else {
-                                                       ClosureReason::CommitmentTxConfirmed
-                                               };
-                                               self.issue_channel_close_events(&chan, reason);
-                                               pending_msg_events.push(events::MessageSendEvent::HandleError {
-                                                       node_id: chan.get_counterparty_node_id(),
-                                                       action: msgs::ErrorAction::SendErrorMessage {
-                                                               msg: msgs::ErrorMessage { channel_id: chan.channel_id(), data: "Channel force-closed".to_owned() }
-                                                       },
-                                               });
-                                       }
-                               },
-                               MonitorEvent::UpdateCompleted { funding_txo, monitor_update_id } => {
-                                       self.channel_monitor_updated(&funding_txo, monitor_update_id);
-                               },
+                                       },
+                                       MonitorEvent::UpdateCompleted { funding_txo, monitor_update_id } => {
+                                               self.channel_monitor_updated(&funding_txo, monitor_update_id);
+                                       },
+                               }
                        }
                }
  
@@@ -5309,17 -5307,18 +5313,17 @@@ wher
        F::Target: FeeEstimator,
        L::Target: Logger,
  {
 -      fn block_connected(&self, block: &Block, height: u32) {
 +      fn filtered_block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
                {
                        let best_block = self.best_block.read().unwrap();
 -                      assert_eq!(best_block.block_hash(), block.header.prev_blockhash,
 +                      assert_eq!(best_block.block_hash(), header.prev_blockhash,
                                "Blocks must be connected in chain-order - the connected header must build on the last connected header");
                        assert_eq!(best_block.height(), height - 1,
                                "Blocks must be connected in chain-order - the connected block height must be one greater than the previous height");
                }
  
 -              let txdata: Vec<_> = block.txdata.iter().enumerate().collect();
 -              self.transactions_confirmed(&block.header, &txdata, height);
 -              self.best_block_updated(&block.header, height);
 +              self.transactions_confirmed(header, txdata, height);
 +              self.best_block_updated(header, height);
        }
  
        fn block_disconnected(&self, header: &BlockHeader, height: u32) {
@@@ -5698,21 -5697,39 +5702,21 @@@ impl<Signer: Sign, M: Deref , T: Deref 
                        let channel_state = &mut *channel_state_lock;
                        let pending_msg_events = &mut channel_state.pending_msg_events;
                        let short_to_id = &mut channel_state.short_to_id;
 -                      if no_connection_possible {
 -                              log_debug!(self.logger, "Failing all channels with {} due to no_connection_possible", log_pubkey!(counterparty_node_id));
 -                              channel_state.by_id.retain(|_, chan| {
 -                                      if chan.get_counterparty_node_id() == *counterparty_node_id {
 +                      log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates. We believe we {} make future connections to this peer.",
 +                              log_pubkey!(counterparty_node_id), if no_connection_possible { "cannot" } else { "can" });
 +                      channel_state.by_id.retain(|_, chan| {
 +                              if chan.get_counterparty_node_id() == *counterparty_node_id {
 +                                      chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
 +                                      if chan.is_shutdown() {
                                                update_maps_on_chan_removal!(self, short_to_id, chan);
 -                                              failed_channels.push(chan.force_shutdown(true));
 -                                              if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
 -                                                      pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
 -                                                              msg: update
 -                                                      });
 -                                              }
                                                self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer);
 -                                              false
 +                                              return false;
                                        } else {
 -                                              true
 +                                              no_channels_remain = false;
                                        }
 -                              });
 -                      } else {
 -                              log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates", log_pubkey!(counterparty_node_id));
 -                              channel_state.by_id.retain(|_, chan| {
 -                                      if chan.get_counterparty_node_id() == *counterparty_node_id {
 -                                              chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
 -                                              if chan.is_shutdown() {
 -                                                      update_maps_on_chan_removal!(self, short_to_id, chan);
 -                                                      self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer);
 -                                                      return false;
 -                                              } else {
 -                                                      no_channels_remain = false;
 -                                              }
 -                                      }
 -                                      true
 -                              })
 -                      }
 +                              }
 +                              true
 +                      });
                        pending_msg_events.retain(|msg| {
                                match msg {
                                        &events::MessageSendEvent::SendAcceptChannel { ref node_id, .. } => node_id != counterparty_node_id,
@@@ -5924,9 -5941,6 +5928,9 @@@ impl_writeable_tlv_based!(ChannelDetail
        (14, user_channel_id, required),
        (16, balance_msat, required),
        (18, outbound_capacity_msat, required),
 +      // Note that by the time we get past the required read above, outbound_capacity_msat will be
 +      // filled in, so we can safely unwrap it here.
 +      (19, next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat.0.unwrap())),
        (20, inbound_capacity_msat, required),
        (22, confirmations_required, option),
        (24, force_close_spend_delay, option),
@@@ -6052,21 -6066,20 +6056,21 @@@ impl_writeable_tlv_based!(HTLCPreviousH
  impl Writeable for ClaimableHTLC {
        fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
                let payment_data = match &self.onion_payload {
 -                      OnionPayload::Invoice(data) => Some(data.clone()),
 +                      OnionPayload::Invoice { _legacy_hop_data } => Some(_legacy_hop_data),
                        _ => None,
                };
                let keysend_preimage = match self.onion_payload {
 -                      OnionPayload::Invoice(_) => None,
 +                      OnionPayload::Invoice { .. } => None,
                        OnionPayload::Spontaneous(preimage) => Some(preimage.clone()),
                };
 -              write_tlv_fields!
 -              (writer,
 -               {
 -                 (0, self.prev_hop, required), (2, self.value, required),
 -                 (4, payment_data, option), (6, self.cltv_expiry, required),
 -                       (8, keysend_preimage, option),
 -               });
 +              write_tlv_fields!(writer, {
 +                      (0, self.prev_hop, required),
 +                      (1, self.total_msat, required),
 +                      (2, self.value, required),
 +                      (4, payment_data, option),
 +                      (6, self.cltv_expiry, required),
 +                      (8, keysend_preimage, option),
 +              });
                Ok(())
        }
  }
@@@ -6077,41 -6090,32 +6081,41 @@@ impl Readable for ClaimableHTLC 
                let mut value = 0;
                let mut payment_data: Option<msgs::FinalOnionHopData> = None;
                let mut cltv_expiry = 0;
 +              let mut total_msat = None;
                let mut keysend_preimage: Option<PaymentPreimage> = None;
 -              read_tlv_fields!
 -              (reader,
 -               {
 -                 (0, prev_hop, required), (2, value, required),
 -                 (4, payment_data, option), (6, cltv_expiry, required),
 -                       (8, keysend_preimage, option)
 -               });
 +              read_tlv_fields!(reader, {
 +                      (0, prev_hop, required),
 +                      (1, total_msat, option),
 +                      (2, value, required),
 +                      (4, payment_data, option),
 +                      (6, cltv_expiry, required),
 +                      (8, keysend_preimage, option)
 +              });
                let onion_payload = match keysend_preimage {
                        Some(p) => {
                                if payment_data.is_some() {
                                        return Err(DecodeError::InvalidValue)
                                }
 +                              if total_msat.is_none() {
 +                                      total_msat = Some(value);
 +                              }
                                OnionPayload::Spontaneous(p)
                        },
                        None => {
                                if payment_data.is_none() {
                                        return Err(DecodeError::InvalidValue)
                                }
 -                              OnionPayload::Invoice(payment_data.unwrap())
 +                              if total_msat.is_none() {
 +                                      total_msat = Some(payment_data.as_ref().unwrap().total_msat);
 +                              }
 +                              OnionPayload::Invoice { _legacy_hop_data: payment_data.unwrap() }
                        },
                };
                Ok(Self {
                        prev_hop: prev_hop.0.unwrap(),
                        timer_ticks: 0,
                        value,
 +                      total_msat: total_msat.unwrap(),
                        onion_payload,
                        cltv_expiry,
                })
@@@ -7312,7 -7316,7 +7316,7 @@@ mod tests 
                // payment verification fails as expected.
                let mut bad_payment_hash = payment_hash.clone();
                bad_payment_hash.0[0] += 1;
 -              match inbound_payment::verify(bad_payment_hash, payment_data.clone(), nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger) {
 +              match inbound_payment::verify(bad_payment_hash, &payment_data, nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger) {
                        Ok(_) => panic!("Unexpected ok"),
                        Err(()) => {
                                nodes[0].logger.assert_log_contains("lightning::ln::inbound_payment".to_string(), "Failing HTLC with user-generated payment_hash".to_string(), 1);
                }
  
                // Check that using the original payment hash succeeds.
 -              assert!(inbound_payment::verify(payment_hash, payment_data, nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger).is_ok());
 +              assert!(inbound_payment::verify(payment_hash, &payment_data, nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger).is_ok());
        }
  }
  
index b4807ea688629a5a1940a1bb37e9b9ba56aa9760,dbeb9cc44a33f1840593f77cc59844fe1d153b3d..39950de9afd8aa7359cf8584c9d804675e161411
@@@ -35,7 -35,7 +35,7 @@@ use bitcoin::network::constants::Networ
  
  use bitcoin::hash_types::BlockHash;
  
 -use bitcoin::secp256k1::key::PublicKey;
 +use bitcoin::secp256k1::PublicKey;
  
  use io;
  use prelude::*;
@@@ -859,7 -859,7 +859,7 @@@ macro_rules! check_spends 
                        for output in $tx.output.iter() {
                                total_value_out += output.value;
                        }
 -                      let min_fee = ($tx.get_weight() as u64 + 3) / 4; // One sat per vbyte (ie per weight/4, rounded up)
 +                      let min_fee = ($tx.weight() as u64 + 3) / 4; // One sat per vbyte (ie per weight/4, rounded up)
                        // Input amount - output amount = fee, so check that out + min_fee is smaller than input
                        assert!(total_value_out + min_fee <= total_value_in);
                        $tx.verify(get_output).unwrap();
@@@ -1167,21 -1167,6 +1167,21 @@@ macro_rules! get_payment_preimage_hash 
        }
  }
  
 +#[macro_export]
 +macro_rules! get_route {
 +      ($send_node: expr, $payment_params: expr, $recv_value: expr, $cltv: expr) => {{
 +              use $crate::chain::keysinterface::KeysInterface;
 +              let scorer = $crate::util::test_utils::TestScorer::with_penalty(0);
 +              let keys_manager = $crate::util::test_utils::TestKeysInterface::new(&[0u8; 32], bitcoin::network::constants::Network::Testnet);
 +              let random_seed_bytes = keys_manager.get_secure_random_bytes();
 +              $crate::routing::router::get_route(
 +                      &$send_node.node.get_our_node_id(), &$payment_params, &$send_node.network_graph.read_only(),
 +                      Some(&$send_node.node.list_usable_channels().iter().collect::<Vec<_>>()),
 +                      $recv_value, $cltv, $send_node.logger, &scorer, &random_seed_bytes
 +              )
 +      }}
 +}
 +
  #[cfg(test)]
  #[macro_export]
  macro_rules! get_route_and_payment_hash {
                $crate::get_route_and_payment_hash!($send_node, $recv_node, payment_params, $recv_value, TEST_FINAL_CLTV)
        }};
        ($send_node: expr, $recv_node: expr, $payment_params: expr, $recv_value: expr, $cltv: expr) => {{
 -              use $crate::chain::keysinterface::KeysInterface;
                let (payment_preimage, payment_hash, payment_secret) = $crate::get_payment_preimage_hash!($recv_node, Some($recv_value));
 -              let scorer = $crate::util::test_utils::TestScorer::with_penalty(0);
 -              let keys_manager = $crate::util::test_utils::TestKeysInterface::new(&[0u8; 32], bitcoin::network::constants::Network::Testnet);
 -              let random_seed_bytes = keys_manager.get_secure_random_bytes();
 -              let route = $crate::routing::router::get_route(
 -                      &$send_node.node.get_our_node_id(), &$payment_params, &$send_node.network_graph.read_only(),
 -                      Some(&$send_node.node.list_usable_channels().iter().collect::<Vec<_>>()),
 -                      $recv_value, $cltv, $send_node.logger, &scorer, &random_seed_bytes
 -              ).unwrap();
 -              (route, payment_hash, payment_preimage, payment_secret)
 +              let route = $crate::get_route!($send_node, $payment_params, $recv_value, $cltv);
 +              (route.unwrap(), payment_hash, payment_preimage, payment_secret)
        }}
  }
  
@@@ -1334,15 -1327,20 +1334,20 @@@ macro_rules! expect_payment_path_succes
  }
  
  macro_rules! expect_payment_forwarded {
-       ($node: expr, $source_node: expr, $expected_fee: expr, $upstream_force_closed: expr) => {
+       ($node: expr, $prev_node: expr, $next_node: expr, $expected_fee: expr, $upstream_force_closed: expr, $downstream_force_closed: expr) => {
                let events = $node.node.get_and_clear_pending_events();
                assert_eq!(events.len(), 1);
                match events[0] {
-                       Event::PaymentForwarded { fee_earned_msat, source_channel_id, claim_from_onchain_tx } => {
+                       Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id } => {
                                assert_eq!(fee_earned_msat, $expected_fee);
                                if fee_earned_msat.is_some() {
-                                       // Is the event channel_id in one of the channels between the two nodes?
-                                       assert!($node.node.list_channels().iter().any(|x| x.counterparty.node_id == $source_node.node.get_our_node_id() && x.channel_id == source_channel_id.unwrap()));
+                                       // Is the event prev_channel_id in one of the channels between the two nodes?
+                                       assert!($node.node.list_channels().iter().any(|x| x.counterparty.node_id == $prev_node.node.get_our_node_id() && x.channel_id == prev_channel_id.unwrap()));
+                               }
+                               // We check for force closures since a force closed channel is removed from the
+                               // node's channel list
+                               if !$downstream_force_closed {
+                                       assert!($node.node.list_channels().iter().any(|x| x.counterparty.node_id == $next_node.node.get_our_node_id() && x.channel_id == next_channel_id.unwrap()));
                                }
                                assert_eq!(claim_from_onchain_tx, $upstream_force_closed);
                        },
@@@ -1586,7 -1584,7 +1591,7 @@@ pub fn do_claim_payment_along_route<'a
                                {
                                        $node.node.handle_update_fulfill_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0);
                                        let fee = $node.node.channel_state.lock().unwrap().by_id.get(&next_msgs.as_ref().unwrap().0.channel_id).unwrap().config.forwarding_fee_base_msat;
-                                       expect_payment_forwarded!($node, $next_node, Some(fee as u64), false);
+                                       expect_payment_forwarded!($node, $next_node, $prev_node, Some(fee as u64), false, false);
                                        expected_total_fee_msat += fee as u64;
                                        check_added_monitors!($node, 1);
                                        let new_next_msgs = if $new_msgs {
@@@ -1657,7 -1655,15 +1662,7 @@@ pub const TEST_FINAL_CLTV: u32 = 70
  pub fn route_payment<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_route: &[&Node<'a, 'b, 'c>], recv_value: u64) -> (PaymentPreimage, PaymentHash, PaymentSecret) {
        let payment_params = PaymentParameters::from_node_id(expected_route.last().unwrap().node.get_our_node_id())
                .with_features(InvoiceFeatures::known());
 -      let network_graph = origin_node.network_graph.read_only();
 -      let scorer = test_utils::TestScorer::with_penalty(0);
 -      let seed = [0u8; 32];
 -      let keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
 -      let random_seed_bytes = keys_manager.get_secure_random_bytes();
 -      let route = get_route(
 -              &origin_node.node.get_our_node_id(), &payment_params, &network_graph,
 -              Some(&origin_node.node.list_usable_channels().iter().collect::<Vec<_>>()),
 -              recv_value, TEST_FINAL_CLTV, origin_node.logger, &scorer, &random_seed_bytes).unwrap();
 +      let route = get_route!(origin_node, payment_params, recv_value, TEST_FINAL_CLTV).unwrap();
        assert_eq!(route.paths.len(), 1);
        assert_eq!(route.paths[0].len(), expected_route.len());
        for (node, hop) in expected_route.iter().zip(route.paths[0].iter()) {
@@@ -2004,10 -2010,7 +2009,10 @@@ pub fn check_preimage_claim<'a, 'b, 'c>
        for tx in prev_txn {
                if node_txn[0].input[0].previous_output.txid == tx.txid() {
                        check_spends!(node_txn[0], tx);
 -                      assert!(node_txn[0].input[0].witness[2].len() > 106); // must spend an htlc output
 +                      let mut iter = node_txn[0].input[0].witness.iter();
 +                      iter.next().expect("expected 3 witness items");
 +                      iter.next().expect("expected 3 witness items");
 +                      assert!(iter.next().expect("expected 3 witness items").len() > 106); // must spend an htlc output
                        assert_eq!(tx.input.len(), 1); // must spend a commitment tx
  
                        found_prev = true;
index 3961b70773bf3c15c8d51c5335c16fbd18489250,c09ff85f46a9bfe8d8991a9b3aa5ed901fe0f203..8b06f9fe4f41b9eb209589c55b15a69069ee0826
@@@ -26,7 -26,7 +26,7 @@@ use ln::chan_utils::{htlc_success_tx_we
  use routing::router::{PaymentParameters, Route, RouteHop, RouteParameters, find_route, get_route};
  use ln::features::{ChannelFeatures, InitFeatures, InvoiceFeatures, NodeFeatures};
  use ln::msgs;
 -use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
 +use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, OptionalField, ErrorAction};
  use util::enforcing_trait_impls::EnforcingSigner;
  use util::{byte_utils, test_utils};
  use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason};
@@@ -42,7 -42,7 +42,7 @@@ use bitcoin::blockdata::constants::gene
  use bitcoin::network::constants::Network;
  
  use bitcoin::secp256k1::Secp256k1;
 -use bitcoin::secp256k1::key::{PublicKey,SecretKey};
 +use bitcoin::secp256k1::{PublicKey,SecretKey};
  
  use regex;
  
@@@ -58,12 -58,9 +58,12 @@@ use ln::chan_utils::CommitmentTransacti
  #[test]
  fn test_insane_channel_opens() {
        // Stand up a network of 2 nodes
 +      use ln::channel::TOTAL_BITCOIN_SUPPLY_SATOSHIS;
 +      let mut cfg = UserConfig::default();
 +      cfg.peer_channel_config_limits.max_funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1;
        let chanmon_cfgs = create_chanmon_cfgs(2);
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
 -      let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
 +      let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(cfg)]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
  
        // Instantiate channel parameters where we push the maximum msats given our
                } else { assert!(false); }
        };
  
 -      use ln::channel::MAX_FUNDING_SATOSHIS;
        use ln::channelmanager::MAX_LOCAL_BREAKDOWN_TIMEOUT;
  
        // Test all mutations that would make the channel open message insane
 -      insane_open_helper(format!("Funding must be smaller than {}. It was {}", MAX_FUNDING_SATOSHIS, MAX_FUNDING_SATOSHIS).as_str(), |mut msg| { msg.funding_satoshis = MAX_FUNDING_SATOSHIS; msg });
 +      insane_open_helper(format!("Per our config, funding must be at most {}. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1, TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2).as_str(), |mut msg| { msg.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2; msg });
 +      insane_open_helper(format!("Funding must be smaller than the total bitcoin supply. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS).as_str(), |mut msg| { msg.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS; msg });
  
        insane_open_helper("Bogus channel_reserve_satoshis", |mut msg| { msg.channel_reserve_satoshis = msg.funding_satoshis + 1; msg });
  
 -      insane_open_helper(r"push_msat \d+ was larger than funding value \d+", |mut msg| { msg.push_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000 + 1; msg });
 +      insane_open_helper(r"push_msat \d+ was larger than channel amount minus reserve \(\d+\)", |mut msg| { msg.push_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000 + 1; msg });
  
        insane_open_helper("Peer never wants payout outputs?", |mut msg| { msg.dust_limit_satoshis = msg.funding_satoshis + 1 ; msg });
  
        insane_open_helper("max_accepted_htlcs was 484. It must not be larger than 483", |mut msg| { msg.max_accepted_htlcs = 484; msg });
  }
  
 +#[test]
 +fn test_funding_exceeds_no_wumbo_limit() {
 +      // Test that if a peer does not support wumbo channels, we'll refuse to open a wumbo channel to
 +      // them.
 +      use ln::channel::MAX_FUNDING_SATOSHIS_NO_WUMBO;
 +      let chanmon_cfgs = create_chanmon_cfgs(2);
 +      let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
 +      node_cfgs[1].features = InitFeatures::known().clear_wumbo();
 +      let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
 +      let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 +
 +      match nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), MAX_FUNDING_SATOSHIS_NO_WUMBO + 1, 0, 42, None) {
 +              Err(APIError::APIMisuseError { err }) => {
 +                      assert_eq!(format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, MAX_FUNDING_SATOSHIS_NO_WUMBO + 1), err);
 +              },
 +              _ => panic!()
 +      }
 +}
 +
  fn do_test_counterparty_no_reserve(send_from_initiator: bool) {
        // A peer providing a channel_reserve_satoshis of 0 (or less than our dust limit) is insecure,
        // but only for them. Because some LSPs do it with some level of trust of the clients (for a
@@@ -2178,9 -2156,9 +2178,9 @@@ fn channel_monitor_network_test() 
        send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
  
        // Simple case with no pending HTLCs:
 -      nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), true);
 +      nodes[1].node.force_close_channel(&chan_1.2).unwrap();
        check_added_monitors!(nodes[1], 1);
 -      check_closed_broadcast!(nodes[1], false);
 +      check_closed_broadcast!(nodes[1], true);
        {
                let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE);
                assert_eq!(node_txn.len(), 1);
        assert_eq!(nodes[0].node.list_channels().len(), 0);
        assert_eq!(nodes[1].node.list_channels().len(), 1);
        check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
 -      check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
 +      check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
  
        // One pending HTLC is discarded by the force-close:
        let payment_preimage_1 = route_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 3000000).0;
  
        // Simple case of one pending HTLC to HTLC-Timeout (note that the HTLC-Timeout is not
        // broadcasted until we reach the timelock time).
 -      nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), true);
 -      check_closed_broadcast!(nodes[1], false);
 +      nodes[1].node.force_close_channel(&chan_2.2).unwrap();
 +      check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
        {
                let mut node_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::NONE);
        check_closed_broadcast!(nodes[2], true);
        assert_eq!(nodes[1].node.list_channels().len(), 0);
        assert_eq!(nodes[2].node.list_channels().len(), 1);
 -      check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
 +      check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
        check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
  
        macro_rules! claim_funds {
  
        // nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2]
        // HTLC-Timeout and a nodes[3] claim against it (+ its own announces)
 -      nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id(), true);
 +      nodes[2].node.force_close_channel(&chan_3.2).unwrap();
        check_added_monitors!(nodes[2], 1);
 -      check_closed_broadcast!(nodes[2], false);
 +      check_closed_broadcast!(nodes[2], true);
        let node2_commitment_txid;
        {
                let node_txn = test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::NONE);
        check_closed_broadcast!(nodes[3], true);
        assert_eq!(nodes[2].node.list_channels().len(), 0);
        assert_eq!(nodes[3].node.list_channels().len(), 1);
 -      check_closed_event!(nodes[2], 1, ClosureReason::DisconnectedPeer);
 +      check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed);
        check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed);
  
        // Drop the ChannelMonitor for the previous channel to avoid it broadcasting transactions and
@@@ -2708,18 -2686,20 +2708,20 @@@ fn test_htlc_on_chain_success() 
        }
        let chan_id = Some(chan_1.2);
        match forwarded_events[1] {
-               Event::PaymentForwarded { fee_earned_msat, source_channel_id, claim_from_onchain_tx } => {
+               Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id } => {
                        assert_eq!(fee_earned_msat, Some(1000));
-                       assert_eq!(source_channel_id, chan_id);
+                       assert_eq!(prev_channel_id, chan_id);
                        assert_eq!(claim_from_onchain_tx, true);
+                       assert_eq!(next_channel_id, Some(chan_2.2));
                },
                _ => panic!()
        }
        match forwarded_events[2] {
-               Event::PaymentForwarded { fee_earned_msat, source_channel_id, claim_from_onchain_tx } => {
+               Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id } => {
                        assert_eq!(fee_earned_msat, Some(1000));
-                       assert_eq!(source_channel_id, chan_id);
+                       assert_eq!(prev_channel_id, chan_id);
                        assert_eq!(claim_from_onchain_tx, true);
+                       assert_eq!(next_channel_id, Some(chan_2.2));
                },
                _ => panic!()
        }
@@@ -3489,47 -3469,6 +3491,47 @@@ fn test_dup_events_on_peer_disconnect(
        expect_payment_path_successful!(nodes[0]);
  }
  
 +#[test]
 +fn test_peer_disconnected_before_funding_broadcasted() {
 +      // Test that channels are closed with `ClosureReason::DisconnectedPeer` if the peer disconnects
 +      // before the funding transaction has been broadcasted.
 +      let chanmon_cfgs = create_chanmon_cfgs(2);
 +      let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
 +      let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
 +      let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 +
 +      // Open a channel between `nodes[0]` and `nodes[1]`, for which the funding transaction is never
 +      // broadcasted, even though it's created by `nodes[0]`.
 +      let expected_temporary_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None).unwrap();
 +      let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
 +      nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel);
 +      let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
 +      nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_channel);
 +
 +      let (temporary_channel_id, tx, _funding_output) = create_funding_transaction(&nodes[0], 1_000_000, 42);
 +      assert_eq!(temporary_channel_id, expected_temporary_channel_id);
 +
 +      assert!(nodes[0].node.funding_transaction_generated(&temporary_channel_id, tx.clone()).is_ok());
 +
 +      let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
 +      assert_eq!(funding_created_msg.temporary_channel_id, expected_temporary_channel_id);
 +
 +      // Even though the funding transaction is created by `nodes[0]`, the `FundingCreated` msg is
 +      // never sent to `nodes[1]`, and therefore the tx is never signed by either party nor
 +      // broadcasted.
 +      {
 +              assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
 +      }
 +
 +      // Ensure that the channel is closed with `ClosureReason::DisconnectedPeer` when the peers are
 +      // disconnected before the funding transaction was broadcasted.
 +      nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
 +      nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 +
 +      check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer);
 +      check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
 +}
 +
  #[test]
  fn test_simple_peer_disconnect() {
        // Test that we can reconnect when there are no lost messages
@@@ -5180,10 -5119,11 +5182,11 @@@ fn test_onchain_to_onchain_claim() 
                _ => panic!("Unexpected event"),
        }
        match events[1] {
-               Event::PaymentForwarded { fee_earned_msat, source_channel_id, claim_from_onchain_tx } => {
+               Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id } => {
                        assert_eq!(fee_earned_msat, Some(1000));
-                       assert_eq!(source_channel_id, Some(chan_1.2));
+                       assert_eq!(prev_channel_id, Some(chan_1.2));
                        assert_eq!(claim_from_onchain_tx, true);
+                       assert_eq!(next_channel_id, Some(chan_2.2));
                },
                _ => panic!("Unexpected event"),
        }
@@@ -5350,7 -5290,7 +5353,7 @@@ fn test_duplicate_payment_hash_one_fail
        // Note that the fee paid is effectively double as the HTLC value (including the nodes[1] fee
        // and nodes[2] fee) is rounded down and then claimed in full.
        mine_transaction(&nodes[1], &htlc_success_txn[0]);
-       expect_payment_forwarded!(nodes[1], nodes[0], Some(196*2), true);
+       expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(196*2), true, true);
        let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        assert!(updates.update_add_htlcs.is_empty());
        assert!(updates.update_fail_htlcs.is_empty());
@@@ -5787,8 -5727,8 +5790,8 @@@ fn test_key_derivation_params() 
        check_spends!(local_txn_1[0], chan_1.3);
  
        // We check funding pubkey are unique
 -      let (from_0_funding_key_0, from_0_funding_key_1) = (PublicKey::from_slice(&local_txn_0[0].input[0].witness[3][2..35]), PublicKey::from_slice(&local_txn_0[0].input[0].witness[3][36..69]));
 -      let (from_1_funding_key_0, from_1_funding_key_1) = (PublicKey::from_slice(&local_txn_1[0].input[0].witness[3][2..35]), PublicKey::from_slice(&local_txn_1[0].input[0].witness[3][36..69]));
 +      let (from_0_funding_key_0, from_0_funding_key_1) = (PublicKey::from_slice(&local_txn_0[0].input[0].witness.to_vec()[3][2..35]), PublicKey::from_slice(&local_txn_0[0].input[0].witness.to_vec()[3][36..69]));
 +      let (from_1_funding_key_0, from_1_funding_key_1) = (PublicKey::from_slice(&local_txn_1[0].input[0].witness.to_vec()[3][2..35]), PublicKey::from_slice(&local_txn_1[0].input[0].witness.to_vec()[3][36..69]));
        if from_0_funding_key_0 == from_1_funding_key_0
            || from_0_funding_key_0 == from_1_funding_key_1
            || from_0_funding_key_1 == from_1_funding_key_0
@@@ -7386,7 -7326,7 +7389,7 @@@ fn test_data_loss_protect() 
        logger = test_utils::TestLogger::with_id(format!("node {}", 0));
        let mut chain_monitor = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut io::Cursor::new(previous_chain_monitor_state.0), keys_manager).unwrap().1;
        chain_source = test_utils::TestChainSource::new(Network::Testnet);
 -      tx_broadcaster = test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new()), blocks: Arc::new(Mutex::new(Vec::new()))};
 +      tx_broadcaster = test_utils::TestBroadcaster { txn_broadcasted: Mutex::new(Vec::new()), blocks: Arc::new(Mutex::new(Vec::new())) };
        fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
        persister = test_utils::TestPersister::new();
        monitor = test_utils::TestChainMonitor::new(Some(&chain_source), &tx_broadcaster, &logger, &fee_estimator, &persister, keys_manager);
        }
  
        // Check we close channel detecting A is fallen-behind
 +      // Check that we sent the warning message when we detected that A has fallen behind,
 +      // and give the possibility for A to recover from the warning.
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
 -      check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Peer attempted to reestablish channel with a very old local commitment transaction".to_string() });
 -      assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Peer attempted to reestablish channel with a very old local commitment transaction");
 -      check_added_monitors!(nodes[1], 1);
 +      let warn_msg = "Peer attempted to reestablish channel with a very old local commitment transaction".to_owned();
 +      assert!(check_warn_msg!(nodes[1], nodes[0].node.get_our_node_id(), chan.2).contains(&warn_msg));
  
        // Check A is able to claim to_remote output
 -      let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
 -      assert_eq!(node_txn.len(), 1);
 -      check_spends!(node_txn[0], chan.3);
 -      assert_eq!(node_txn[0].output.len(), 2);
 -      mine_transaction(&nodes[0], &node_txn[0]);
 -      connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
 -      check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can\'t do any automated broadcasting".to_string() });
 -      let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
 -      assert_eq!(spend_txn.len(), 1);
 -      check_spends!(spend_txn[0], node_txn[0]);
 +      let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
 +      // The node B should not broadcast the transaction to force close the channel!
 +      assert!(node_txn.is_empty());
 +      // B should now detect that there is something wrong and should force close the channel.
 +      let exp_err = "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can\'t do any automated broadcasting";
 +      check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: exp_err.to_string() });
 +
 +      // after the warning message sent by B, we should not able to
 +      // use the channel, or reconnect with success to the channel.
 +      assert!(nodes[0].node.list_usable_channels().is_empty());
 +      nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
 +      nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
 +      let retry_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
 +
 +      nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &retry_reestablish[0]);
 +      let mut err_msgs_0 = Vec::with_capacity(1);
 +      for msg in nodes[0].node.get_and_clear_pending_msg_events() {
 +              if let MessageSendEvent::HandleError { ref action, .. } = msg {
 +                      match action {
 +                              &ErrorAction::SendErrorMessage { ref msg } => {
 +                                      assert_eq!(msg.data, "Failed to find corresponding channel");
 +                                      err_msgs_0.push(msg.clone());
 +                              },
 +                              _ => panic!("Unexpected event!"),
 +                      }
 +              } else {
 +                      panic!("Unexpected event!");
 +              }
 +      }
 +      assert_eq!(err_msgs_0.len(), 1);
 +      nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(), &err_msgs_0[0]);
 +      assert!(nodes[1].node.list_usable_channels().is_empty());
 +      check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "Failed to find corresponding channel".to_owned() });
 +      check_closed_broadcast!(nodes[1], false);
  }
  
  #[test]
@@@ -7682,7 -7596,7 +7685,7 @@@ fn test_bump_penalty_txn_on_revoked_com
                assert_eq!(node_txn[0].output.len(), 1);
                check_spends!(node_txn[0], revoked_txn[0]);
                let fee_1 = penalty_sum - node_txn[0].output[0].value;
 -              feerate_1 = fee_1 * 1000 / node_txn[0].get_weight() as u64;
 +              feerate_1 = fee_1 * 1000 / node_txn[0].weight() as u64;
                penalty_1 = node_txn[0].txid();
                node_txn.clear();
        };
                        // Verify new bumped tx is different from last claiming transaction, we don't want spurrious rebroadcast
                        assert_ne!(penalty_2, penalty_1);
                        let fee_2 = penalty_sum - node_txn[0].output[0].value;
 -                      feerate_2 = fee_2 * 1000 / node_txn[0].get_weight() as u64;
 +                      feerate_2 = fee_2 * 1000 / node_txn[0].weight() as u64;
                        // Verify 25% bump heuristic
                        assert!(feerate_2 * 100 >= feerate_1 * 125);
                        node_txn.clear();
                        // Verify new bumped tx is different from last claiming transaction, we don't want spurrious rebroadcast
                        assert_ne!(penalty_3, penalty_2);
                        let fee_3 = penalty_sum - node_txn[0].output[0].value;
 -                      feerate_3 = fee_3 * 1000 / node_txn[0].get_weight() as u64;
 +                      feerate_3 = fee_3 * 1000 / node_txn[0].weight() as u64;
                        // Verify 25% bump heuristic
                        assert!(feerate_3 * 100 >= feerate_2 * 125);
                        node_txn.clear();
@@@ -7844,7 -7758,7 +7847,7 @@@ fn test_bump_penalty_txn_on_revoked_htl
                first = node_txn[4].txid();
                // Store both feerates for later comparison
                let fee_1 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[2].output[0].value - node_txn[4].output[0].value;
 -              feerate_1 = fee_1 * 1000 / node_txn[4].get_weight() as u64;
 +              feerate_1 = fee_1 * 1000 / node_txn[4].weight() as u64;
                penalty_txn = vec![node_txn[2].clone()];
                node_txn.clear();
        }
                // Verify bumped tx is different and 25% bump heuristic
                assert_ne!(first, node_txn[0].txid());
                let fee_2 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[2].output[0].value - node_txn[0].output[0].value;
 -              let feerate_2 = fee_2 * 1000 / node_txn[0].get_weight() as u64;
 +              let feerate_2 = fee_2 * 1000 / node_txn[0].weight() as u64;
                assert!(feerate_2 * 100 > feerate_1 * 125);
                let txn = vec![node_txn[0].clone()];
                node_txn.clear();
@@@ -7968,12 -7882,12 +7971,12 @@@ fn test_bump_penalty_txn_on_remote_comm
                timeout = node_txn[6].txid();
                let index = node_txn[6].input[0].previous_output.vout;
                let fee = remote_txn[0].output[index as usize].value - node_txn[6].output[0].value;
 -              feerate_timeout = fee * 1000 / node_txn[6].get_weight() as u64;
 +              feerate_timeout = fee * 1000 / node_txn[6].weight() as u64;
  
                preimage = node_txn[0].txid();
                let index = node_txn[0].input[0].previous_output.vout;
                let fee = remote_txn[0].output[index as usize].value - node_txn[0].output[0].value;
 -              feerate_preimage = fee * 1000 / node_txn[0].get_weight() as u64;
 +              feerate_preimage = fee * 1000 / node_txn[0].weight() as u64;
  
                node_txn.clear();
        };
  
                let index = preimage_bump.input[0].previous_output.vout;
                let fee = remote_txn[0].output[index as usize].value - preimage_bump.output[0].value;
 -              let new_feerate = fee * 1000 / preimage_bump.get_weight() as u64;
 +              let new_feerate = fee * 1000 / preimage_bump.weight() as u64;
                assert!(new_feerate * 100 > feerate_timeout * 125);
                assert_ne!(timeout, preimage_bump.txid());
  
                let index = node_txn[0].input[0].previous_output.vout;
                let fee = remote_txn[0].output[index as usize].value - node_txn[0].output[0].value;
 -              let new_feerate = fee * 1000 / node_txn[0].get_weight() as u64;
 +              let new_feerate = fee * 1000 / node_txn[0].weight() as u64;
                assert!(new_feerate * 100 > feerate_preimage * 125);
                assert_ne!(preimage, node_txn[0].txid());
  
@@@ -8210,58 -8124,6 +8213,58 @@@ fn test_override_0msat_htlc_minimum() 
        assert_eq!(res.htlc_minimum_msat, 1);
  }
  
 +#[test]
 +fn test_channel_update_has_correct_htlc_maximum_msat() {
 +      // Tests that the `ChannelUpdate` message has the correct values for `htlc_maximum_msat` set.
 +      // Bolt 7 specifies that if present `htlc_maximum_msat`:
 +      // 1. MUST be set to less than or equal to the channel capacity. In LDK, this is capped to
 +      // 90% of the `channel_value`.
 +      // 2. MUST be set to less than or equal to the `max_htlc_value_in_flight_msat` received from the peer.
 +
 +      let mut config_30_percent = UserConfig::default();
 +      config_30_percent.channel_options.announced_channel = true;
 +      config_30_percent.own_channel_config.max_inbound_htlc_value_in_flight_percent_of_channel = 30;
 +      let mut config_50_percent = UserConfig::default();
 +      config_50_percent.channel_options.announced_channel = true;
 +      config_50_percent.own_channel_config.max_inbound_htlc_value_in_flight_percent_of_channel = 50;
 +      let mut config_95_percent = UserConfig::default();
 +      config_95_percent.channel_options.announced_channel = true;
 +      config_95_percent.own_channel_config.max_inbound_htlc_value_in_flight_percent_of_channel = 95;
 +      let mut config_100_percent = UserConfig::default();
 +      config_100_percent.channel_options.announced_channel = true;
 +      config_100_percent.own_channel_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100;
 +
 +      let chanmon_cfgs = create_chanmon_cfgs(4);
 +      let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
 +      let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[Some(config_30_percent), Some(config_50_percent), Some(config_95_percent), Some(config_100_percent)]);
 +      let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
 +
 +      let channel_value_satoshis = 100000;
 +      let channel_value_msat = channel_value_satoshis * 1000;
 +      let channel_value_30_percent_msat = (channel_value_msat as f64 * 0.3) as u64;
 +      let channel_value_50_percent_msat = (channel_value_msat as f64 * 0.5) as u64;
 +      let channel_value_90_percent_msat = (channel_value_msat as f64 * 0.9) as u64;
 +
 +      let (node_0_chan_update, node_1_chan_update, _, _)  = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value_satoshis, 10001, InitFeatures::known(), InitFeatures::known());
 +      let (node_2_chan_update, node_3_chan_update, _, _)  = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, channel_value_satoshis, 10001, InitFeatures::known(), InitFeatures::known());
 +
 +      // Assert that `node[0]`'s `ChannelUpdate` is capped at 50 percent of the `channel_value`, as
 +      // that's the value of `node[1]`'s `holder_max_htlc_value_in_flight_msat`.
 +      assert_eq!(node_0_chan_update.contents.htlc_maximum_msat, OptionalField::Present(channel_value_50_percent_msat));
 +      // Assert that `node[1]`'s `ChannelUpdate` is capped at 30 percent of the `channel_value`, as
 +      // that's the value of `node[0]`'s `holder_max_htlc_value_in_flight_msat`.
 +      assert_eq!(node_1_chan_update.contents.htlc_maximum_msat, OptionalField::Present(channel_value_30_percent_msat));
 +
 +      // Assert that `node[2]`'s `ChannelUpdate` is capped at 90 percent of the `channel_value`, as
 +      // the value of `node[3]`'s `holder_max_htlc_value_in_flight_msat` (100%), exceeds 90% of the
 +      // `channel_value`.
 +      assert_eq!(node_2_chan_update.contents.htlc_maximum_msat, OptionalField::Present(channel_value_90_percent_msat));
 +      // Assert that `node[3]`'s `ChannelUpdate` is capped at 90 percent of the `channel_value`, as
 +      // the value of `node[2]`'s `holder_max_htlc_value_in_flight_msat` (95%), exceeds 90% of the
 +      // `channel_value`.
 +      assert_eq!(node_3_chan_update.contents.htlc_maximum_msat, OptionalField::Present(channel_value_90_percent_msat));
 +}
 +
  #[test]
  fn test_manually_accept_inbound_channel_request() {
        let mut manually_accept_conf = UserConfig::default();
@@@ -9010,7 -8872,7 +9013,7 @@@ fn do_test_onchain_htlc_settlement_afte
        assert_eq!(carol_updates.update_fulfill_htlcs.len(), 1);
  
        nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &carol_updates.update_fulfill_htlcs[0]);
-       expect_payment_forwarded!(nodes[1], nodes[0], if go_onchain_before_fulfill || force_closing_node == 1 { None } else { Some(1000) }, false);
+       expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], if go_onchain_before_fulfill || force_closing_node == 1 { None } else { Some(1000) }, false, false);
        // If Alice broadcasted but Bob doesn't know yet, here he prepares to tell her about the preimage.
        if !go_onchain_before_fulfill && broadcast_alice {
                let events = nodes[1].node.get_and_clear_pending_msg_events();
@@@ -9576,7 -9438,12 +9579,7 @@@ fn test_forwardable_regen() 
        claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_2);
  }
  
 -#[test]
 -fn test_dup_htlc_second_fail_panic() {
 -      // Previously, if we received two HTLCs back-to-back, where the second overran the expected
 -      // value for the payment, we'd fail back both HTLCs after generating a `PaymentReceived` event.
 -      // Then, if the user failed the second payment, they'd hit a "tried to fail an already failed
 -      // HTLC" debug panic. This tests for this behavior, checking that only one HTLC is auto-failed.
 +fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) {
        let chanmon_cfgs = create_chanmon_cfgs(2);
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
  
        let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id())
                .with_features(InvoiceFeatures::known());
 -      let scorer = test_utils::TestScorer::with_penalty(0);
 -      let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
 -      let route = get_route(
 -              &nodes[0].node.get_our_node_id(), &payment_params, &nodes[0].network_graph.read_only(),
 -              Some(&nodes[0].node.list_usable_channels().iter().collect::<Vec<_>>()),
 -              10_000, TEST_FINAL_CLTV, nodes[0].logger, &scorer, &random_seed_bytes).unwrap();
 +      let route = get_route!(nodes[0], payment_params, 10_000, TEST_FINAL_CLTV).unwrap();
  
 -      let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(&nodes[1]);
 +      let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(&nodes[1]);
  
        {
                nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
                // the first HTLC delivered above.
        }
  
 -      // Now we go fail back the first HTLC from the user end.
        expect_pending_htlcs_forwardable_ignore!(nodes[1]);
        nodes[1].node.process_pending_htlc_forwards();
 -      nodes[1].node.fail_htlc_backwards(&our_payment_hash);
  
 -      expect_pending_htlcs_forwardable_ignore!(nodes[1]);
 -      nodes[1].node.process_pending_htlc_forwards();
 +      if test_for_second_fail_panic {
 +              // Now we go fail back the first HTLC from the user end.
 +              nodes[1].node.fail_htlc_backwards(&our_payment_hash);
  
 -      check_added_monitors!(nodes[1], 1);
 -      let fail_updates_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 -      assert_eq!(fail_updates_1.update_fail_htlcs.len(), 2);
 +              expect_pending_htlcs_forwardable_ignore!(nodes[1]);
 +              nodes[1].node.process_pending_htlc_forwards();
 +
 +              check_added_monitors!(nodes[1], 1);
 +              let fail_updates_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +              assert_eq!(fail_updates_1.update_fail_htlcs.len(), 2);
 +
 +              nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
 +              nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[1]);
 +              commitment_signed_dance!(nodes[0], nodes[1], fail_updates_1.commitment_signed, false);
 +
 +              let failure_events = nodes[0].node.get_and_clear_pending_events();
 +              assert_eq!(failure_events.len(), 2);
 +              if let Event::PaymentPathFailed { .. } = failure_events[0] {} else { panic!(); }
 +              if let Event::PaymentPathFailed { .. } = failure_events[1] {} else { panic!(); }
 +      } else {
 +              // Let the second HTLC fail and claim the first
 +              expect_pending_htlcs_forwardable_ignore!(nodes[1]);
 +              nodes[1].node.process_pending_htlc_forwards();
 +
 +              check_added_monitors!(nodes[1], 1);
 +              let fail_updates_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 +              nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
 +              commitment_signed_dance!(nodes[0], nodes[1], fail_updates_1.commitment_signed, false);
 +
 +              expect_payment_failed_conditions!(nodes[0], our_payment_hash, true, PaymentFailedConditions::new().mpp_parts_remain());
 +
 +              claim_payment(&nodes[0], &[&nodes[1]], our_payment_preimage);
 +      }
 +}
 +
 +#[test]
 +fn test_dup_htlc_second_fail_panic() {
 +      // Previously, if we received two HTLCs back-to-back, where the second overran the expected
 +      // value for the payment, we'd fail back both HTLCs after generating a `PaymentReceived` event.
 +      // Then, if the user failed the second payment, they'd hit a "tried to fail an already failed
 +      // HTLC" debug panic. This tests for this behavior, checking that only one HTLC is auto-failed.
 +      do_test_dup_htlc_second_rejected(true);
 +}
 +
 +#[test]
 +fn test_dup_htlc_second_rejected() {
 +      // Test that if we receive a second HTLC for an MPP payment that overruns the payment amount we
 +      // simply reject the second HTLC but are still able to claim the first HTLC.
 +      do_test_dup_htlc_second_rejected(false);
 +}
 +
 +#[test]
 +fn test_inconsistent_mpp_params() {
 +      // Test that if we recieve two HTLCs with different payment parameters we fail back the first
 +      // such HTLC and allow the second to stay.
 +      let chanmon_cfgs = create_chanmon_cfgs(4);
 +      let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
 +      let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
 +      let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
 +
 +      create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0, InitFeatures::known(), InitFeatures::known());
 +      create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0, InitFeatures::known(), InitFeatures::known());
 +      create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known());
 +      create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known());
  
 -      nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
 -      nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[1]);
 -      commitment_signed_dance!(nodes[0], nodes[1], fail_updates_1.commitment_signed, false);
 +      let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id())
 +              .with_features(InvoiceFeatures::known());
 +      let mut route = get_route!(nodes[0], payment_params, 15_000_000, TEST_FINAL_CLTV).unwrap();
 +      assert_eq!(route.paths.len(), 2);
 +      route.paths.sort_by(|path_a, _| {
 +              // Sort the path so that the path through nodes[1] comes first
 +              if path_a[0].pubkey == nodes[1].node.get_our_node_id() {
 +                      core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater }
 +      });
 +      let payment_params_opt = Some(payment_params);
 +
 +      let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(&nodes[3]);
 +
 +      let cur_height = nodes[0].best_block_info().1;
 +      let payment_id = PaymentId([42; 32]);
 +      {
 +              nodes[0].node.send_payment_along_path(&route.paths[0], &payment_params_opt, &our_payment_hash, &Some(our_payment_secret), 15_000_000, cur_height, payment_id, &None).unwrap();
 +              check_added_monitors!(nodes[0], 1);
 +
 +              let mut events = nodes[0].node.get_and_clear_pending_msg_events();
 +              assert_eq!(events.len(), 1);
 +              pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, our_payment_hash, Some(our_payment_secret), events.pop().unwrap(), false, None);
 +      }
 +      assert!(nodes[3].node.get_and_clear_pending_events().is_empty());
 +
 +      {
 +              nodes[0].node.send_payment_along_path(&route.paths[1], &payment_params_opt, &our_payment_hash, &Some(our_payment_secret), 14_000_000, cur_height, payment_id, &None).unwrap();
 +              check_added_monitors!(nodes[0], 1);
 +
 +              let mut events = nodes[0].node.get_and_clear_pending_msg_events();
 +              assert_eq!(events.len(), 1);
 +              let payment_event = SendEvent::from_event(events.pop().unwrap());
 +
 +              nodes[2].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
 +              commitment_signed_dance!(nodes[2], nodes[0], payment_event.commitment_msg, false);
 +
 +              expect_pending_htlcs_forwardable!(nodes[2]);
 +              check_added_monitors!(nodes[2], 1);
 +
 +              let mut events = nodes[2].node.get_and_clear_pending_msg_events();
 +              assert_eq!(events.len(), 1);
 +              let payment_event = SendEvent::from_event(events.pop().unwrap());
 +
 +              nodes[3].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
 +              check_added_monitors!(nodes[3], 0);
 +              commitment_signed_dance!(nodes[3], nodes[2], payment_event.commitment_msg, true, true);
 +
 +              // At this point, nodes[3] should notice the two HTLCs don't contain the same total payment
 +              // amount. It will assume the second is a privacy attack (no longer particularly relevant
 +              // post-payment_secrets) and fail back the new HTLC.
 +      }
 +      expect_pending_htlcs_forwardable_ignore!(nodes[3]);
 +      nodes[3].node.process_pending_htlc_forwards();
 +      expect_pending_htlcs_forwardable_ignore!(nodes[3]);
 +      nodes[3].node.process_pending_htlc_forwards();
 +
 +      check_added_monitors!(nodes[3], 1);
 +
 +      let fail_updates_1 = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id());
 +      nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
 +      commitment_signed_dance!(nodes[2], nodes[3], fail_updates_1.commitment_signed, false);
 +
 +      expect_pending_htlcs_forwardable!(nodes[2]);
 +      check_added_monitors!(nodes[2], 1);
 +
 +      let fail_updates_2 = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id());
 +      nodes[0].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &fail_updates_2.update_fail_htlcs[0]);
 +      commitment_signed_dance!(nodes[0], nodes[2], fail_updates_2.commitment_signed, false);
 +
 +      expect_payment_failed_conditions!(nodes[0], our_payment_hash, true, PaymentFailedConditions::new().mpp_parts_remain());
 +
 +      nodes[0].node.send_payment_along_path(&route.paths[1], &payment_params_opt, &our_payment_hash, &Some(our_payment_secret), 15_000_000, cur_height, payment_id, &None).unwrap();
 +      check_added_monitors!(nodes[0], 1);
 +
 +      let mut events = nodes[0].node.get_and_clear_pending_msg_events();
 +      assert_eq!(events.len(), 1);
 +      pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, our_payment_hash, Some(our_payment_secret), events.pop().unwrap(), true, None);
  
 -      let failure_events = nodes[0].node.get_and_clear_pending_events();
 -      assert_eq!(failure_events.len(), 2);
 -      if let Event::PaymentPathFailed { .. } = failure_events[0] {} else { panic!(); }
 -      if let Event::PaymentPathFailed { .. } = failure_events[1] {} else { panic!(); }
 +      claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, our_payment_preimage);
  }
  
  #[test]
index 96fda526d68d2d46369b9c9ec831d96551e86112,de4026c1771c9dca855e739376fadcb7e57b60ac..8a4ec2dc3518a4d9ca67ae7040c500563f7d9e3c
@@@ -138,7 -138,7 +138,7 @@@ fn do_test_onchain_htlc_reorg(local_com
                // ChannelManager only polls chain::Watch::release_pending_monitor_events when we
                // probe it for events, so we probe non-message events here (which should just be the
                // PaymentForwarded event).
-               expect_payment_forwarded!(nodes[1], nodes[0], Some(1000), true);
+               expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), true, true);
        } else {
                // Confirm the timeout tx and check that we fail the HTLC backwards
                let block = Block {
@@@ -209,14 -209,14 +209,14 @@@ fn do_test_unconf_chan(reload_node: boo
                        let relevant_txids = nodes[0].node.get_relevant_txids();
                        assert_eq!(&relevant_txids[..], &[chan.3.txid()]);
                        nodes[0].node.transaction_unconfirmed(&relevant_txids[0]);
 +              } else if connect_style == ConnectStyle::FullBlockViaListen {
 +                      disconnect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH - 1);
 +                      assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
 +                      disconnect_blocks(&nodes[0], 1);
                } else {
                        disconnect_all_blocks(&nodes[0]);
                }
 -              if connect_style == ConnectStyle::FullBlockViaListen && !use_funding_unconfirmed {
 -                      handle_announce_close_broadcast_events(&nodes, 0, 1, true, "Channel closed because of an exception: Funding transaction was un-confirmed. Locked at 6 confs, now have 2 confs.");
 -              } else {
 -                      handle_announce_close_broadcast_events(&nodes, 0, 1, true, "Channel closed because of an exception: Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs.");
 -              }
 +              handle_announce_close_broadcast_events(&nodes, 0, 1, true, "Channel closed because of an exception: Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs.");
                check_added_monitors!(nodes[1], 1);
                {
                        let channel_state = nodes[0].node.channel_state.lock().unwrap();
                        let relevant_txids = nodes[0].node.get_relevant_txids();
                        assert_eq!(&relevant_txids[..], &[chan.3.txid()]);
                        nodes[0].node.transaction_unconfirmed(&relevant_txids[0]);
 +              } else if connect_style == ConnectStyle::FullBlockViaListen {
 +                      disconnect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH - 1);
 +                      assert_eq!(nodes[0].node.list_channels().len(), 1);
 +                      disconnect_blocks(&nodes[0], 1);
                } else {
                        disconnect_all_blocks(&nodes[0]);
                }
 -              if connect_style == ConnectStyle::FullBlockViaListen && !use_funding_unconfirmed {
 -                      handle_announce_close_broadcast_events(&nodes, 0, 1, true, "Channel closed because of an exception: Funding transaction was un-confirmed. Locked at 6 confs, now have 2 confs.");
 -              } else {
 -                      handle_announce_close_broadcast_events(&nodes, 0, 1, true, "Channel closed because of an exception: Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs.");
 -              }
 +              handle_announce_close_broadcast_events(&nodes, 0, 1, true, "Channel closed because of an exception: Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs.");
                check_added_monitors!(nodes[1], 1);
                {
                        let channel_state = nodes[0].node.channel_state.lock().unwrap();
        *nodes[0].chain_monitor.expect_channel_force_closed.lock().unwrap() = Some((chan.2, true));
        nodes[0].node.test_process_background_events(); // Required to free the pending background monitor update
        check_added_monitors!(nodes[0], 1);
 -      let expected_err = if connect_style == ConnectStyle::FullBlockViaListen && !use_funding_unconfirmed {
 -              "Funding transaction was un-confirmed. Locked at 6 confs, now have 2 confs."
 -      } else {
 -              "Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs."
 -      };
 +      let expected_err = "Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs.";
        check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "Channel closed because of an exception: ".to_owned() + expected_err });
        check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: expected_err.to_owned() });
        assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
index 59004547b1489afe21f0a88ae5b71788b1f90fe1,5e50ea77cc2cfeb87abc286b46c1f9158cac1026..6543f6b6e90e32fd5b92d418853b2576ca241f28
@@@ -26,11 -26,11 +26,11 @@@ use util::config::UserConfig
  use bitcoin::blockdata::script::Builder;
  use bitcoin::blockdata::opcodes;
  use bitcoin::network::constants::Network;
 +use bitcoin::util::address::WitnessVersion;
  
  use regex;
  
  use core::default::Default;
 -use core::num::NonZeroU8;
  
  use ln::functional_test_utils::*;
  use ln::msgs::OptionalField::Present;
@@@ -110,7 -110,7 +110,7 @@@ fn updates_shutdown_wait() 
        assert!(updates.update_fee.is_none());
        assert_eq!(updates.update_fulfill_htlcs.len(), 1);
        nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
-       expect_payment_forwarded!(nodes[1], nodes[0], Some(1000), false);
+       expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false);
        check_added_monitors!(nodes[1], 1);
        let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
@@@ -279,7 -279,7 +279,7 @@@ fn do_test_shutdown_rebroadcast(recv_co
        assert!(updates.update_fee.is_none());
        assert_eq!(updates.update_fulfill_htlcs.len(), 1);
        nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
-       expect_payment_forwarded!(nodes[1], nodes[0], Some(1000), false);
+       expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false);
        check_added_monitors!(nodes[1], 1);
        let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
@@@ -654,7 -654,7 +654,7 @@@ fn test_unsupported_anysegwit_shutdown_
        // Check that using an unsupported shutdown script fails and a supported one succeeds.
        let supported_shutdown_script = chanmon_cfgs[1].keys_manager.get_shutdown_scriptpubkey();
        let unsupported_shutdown_script =
 -              ShutdownScript::new_witness_program(NonZeroU8::new(16).unwrap(), &[0, 40]).unwrap();
 +              ShutdownScript::new_witness_program(WitnessVersion::V16, &[0, 40]).unwrap();
        chanmon_cfgs[1].keys_manager
                .expect(OnGetShutdownScriptpubkey { returns: unsupported_shutdown_script.clone() })
                .expect(OnGetShutdownScriptpubkey { returns: supported_shutdown_script });
index 6fff0cf055c165f05d5df2b2cfeef814ac5ef1a2,b97afd4ccd3e3ccb4150ec20425a24dde88dcf81..5953ee68940e61472f4e2acc2665dd89a0f71c98
@@@ -29,7 -29,7 +29,7 @@@ use bitcoin::Transaction
  use bitcoin::blockdata::script::Script;
  use bitcoin::hashes::Hash;
  use bitcoin::hashes::sha256::Hash as Sha256;
 -use bitcoin::secp256k1::key::PublicKey;
 +use bitcoin::secp256k1::PublicKey;
  use io;
  use prelude::*;
  use core::time::Duration;
@@@ -100,11 -100,12 +100,11 @@@ pub enum ClosureReason 
                /// A developer-readable error message which we generated.
                err: String,
        },
 -      /// The `PeerManager` informed us that we've disconnected from the peer. We close channels
 -      /// if the `PeerManager` informed us that it is unlikely we'll be able to connect to the
 -      /// peer again in the future or if the peer disconnected before we finished negotiating
 -      /// the channel open. The first case may be caused by incompatible features which our
 -      /// counterparty, or we, require.
 -      //TODO: split between PeerUnconnectable/PeerDisconnected ?
 +      /// The peer disconnected prior to funding completing. In this case the spec mandates that we
 +      /// forget the channel entirely - we can attempt again if the peer reconnects.
 +      ///
 +      /// In LDK versions prior to 0.0.107 this could also occur if we were unable to connect to the
 +      /// peer because of mutual incompatibility between us and our channel counterparty.
        DisconnectedPeer,
        /// Closure generated from `ChannelManager::read` if the ChannelMonitor is newer than
        /// the ChannelManager deserialized.
@@@ -229,47 -230,6 +229,47 @@@ pub enum Event 
                /// [`Route::get_total_fees`]: crate::routing::router::Route::get_total_fees
                fee_paid_msat: Option<u64>,
        },
 +      /// Indicates an outbound payment failed. Individual [`Event::PaymentPathFailed`] events
 +      /// provide failure information for each MPP part in the payment.
 +      ///
 +      /// This event is provided once there are no further pending HTLCs for the payment and the
 +      /// payment is no longer retryable, either due to a several-block timeout or because
 +      /// [`ChannelManager::abandon_payment`] was previously called for the corresponding payment.
 +      ///
 +      /// [`ChannelManager::abandon_payment`]: crate::ln::channelmanager::ChannelManager::abandon_payment
 +      PaymentFailed {
 +              /// The id returned by [`ChannelManager::send_payment`] and used with
 +              /// [`ChannelManager::retry_payment`] and [`ChannelManager::abandon_payment`].
 +              ///
 +              /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
 +              /// [`ChannelManager::retry_payment`]: crate::ln::channelmanager::ChannelManager::retry_payment
 +              /// [`ChannelManager::abandon_payment`]: crate::ln::channelmanager::ChannelManager::abandon_payment
 +              payment_id: PaymentId,
 +              /// The hash that was given to [`ChannelManager::send_payment`].
 +              ///
 +              /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
 +              payment_hash: PaymentHash,
 +      },
 +      /// Indicates that a path for an outbound payment was successful.
 +      ///
 +      /// Always generated after [`Event::PaymentSent`] and thus useful for scoring channels. See
 +      /// [`Event::PaymentSent`] for obtaining the payment preimage.
 +      PaymentPathSuccessful {
 +              /// The id returned by [`ChannelManager::send_payment`] and used with
 +              /// [`ChannelManager::retry_payment`].
 +              ///
 +              /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
 +              /// [`ChannelManager::retry_payment`]: crate::ln::channelmanager::ChannelManager::retry_payment
 +              payment_id: PaymentId,
 +              /// The hash that was given to [`ChannelManager::send_payment`].
 +              ///
 +              /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
 +              payment_hash: Option<PaymentHash>,
 +              /// The payment path that was successful.
 +              ///
 +              /// May contain a closed channel if the HTLC sent along the path was fulfilled on chain.
 +              path: Vec<RouteHop>,
 +      },
        /// Indicates an outbound HTLC we sent failed. Probably some intermediary node dropped
        /// something. You may wish to retry with a different route.
        ///
  #[cfg(test)]
                error_data: Option<Vec<u8>>,
        },
 -      /// Indicates an outbound payment failed. Individual [`Event::PaymentPathFailed`] events
 -      /// provide failure information for each MPP part in the payment.
 -      ///
 -      /// This event is provided once there are no further pending HTLCs for the payment and the
 -      /// payment is no longer retryable, either due to a several-block timeout or because
 -      /// [`ChannelManager::abandon_payment`] was previously called for the corresponding payment.
 -      ///
 -      /// [`ChannelManager::abandon_payment`]: crate::ln::channelmanager::ChannelManager::abandon_payment
 -      PaymentFailed {
 -              /// The id returned by [`ChannelManager::send_payment`] and used with
 -              /// [`ChannelManager::retry_payment`] and [`ChannelManager::abandon_payment`].
 -              ///
 -              /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
 -              /// [`ChannelManager::retry_payment`]: crate::ln::channelmanager::ChannelManager::retry_payment
 -              /// [`ChannelManager::abandon_payment`]: crate::ln::channelmanager::ChannelManager::abandon_payment
 -              payment_id: PaymentId,
 -              /// The hash that was given to [`ChannelManager::send_payment`].
 -              ///
 -              /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
 -              payment_hash: PaymentHash,
 -      },
        /// Used to indicate that [`ChannelManager::process_pending_htlc_forwards`] should be called at
        /// a time in the future.
        ///
        /// This event is generated when a payment has been successfully forwarded through us and a
        /// forwarding fee earned.
        PaymentForwarded {
-               /// The channel between the source node and us. Optional because versions prior to 0.0.107
-               /// do not serialize this field.
-               source_channel_id: Option<[u8; 32]>,
+               /// The incoming channel between the previous node and us. This is only `None` for events
+               /// generated or serialized by versions prior to 0.0.107.
+               prev_channel_id: Option<[u8; 32]>,
+               /// The outgoing channel between the next node and us. This is only `None` for events
+               /// generated or serialized by versions prior to 0.0.107.
+               next_channel_id: Option<[u8; 32]>,
                /// The fee, in milli-satoshis, which was earned as a result of the payment.
                ///
                /// Note that if we force-closed the channel over which we forwarded an HTLC while the HTLC
                /// The full transaction received from the user
                transaction: Transaction
        },
 -      /// Indicates that a path for an outbound payment was successful.
 -      ///
 -      /// Always generated after [`Event::PaymentSent`] and thus useful for scoring channels. See
 -      /// [`Event::PaymentSent`] for obtaining the payment preimage.
 -      PaymentPathSuccessful {
 -              /// The id returned by [`ChannelManager::send_payment`] and used with
 -              /// [`ChannelManager::retry_payment`].
 -              ///
 -              /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
 -              /// [`ChannelManager::retry_payment`]: crate::ln::channelmanager::ChannelManager::retry_payment
 -              payment_id: PaymentId,
 -              /// The hash that was given to [`ChannelManager::send_payment`].
 -              ///
 -              /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
 -              payment_hash: Option<PaymentHash>,
 -              /// The payment path that was successful.
 -              ///
 -              /// May contain a closed channel if the HTLC sent along the path was fulfilled on chain.
 -              path: Vec<RouteHop>,
 -      },
        /// Indicates a request to open a new channel by a peer.
        ///
        /// To accept the request, call [`ChannelManager::accept_inbound_channel`]. To reject the
@@@ -522,12 -526,13 +525,13 @@@ impl Writeable for Event 
                                        (0, VecWriteWrapper(outputs), required),
                                });
                        },
-                       &Event::PaymentForwarded { fee_earned_msat, source_channel_id, claim_from_onchain_tx } => {
+                       &Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id } => {
                                7u8.write(writer)?;
                                write_tlv_fields!(writer, {
                                        (0, fee_earned_msat, option),
-                                       (1, source_channel_id, option),
+                                       (1, prev_channel_id, option),
                                        (2, claim_from_onchain_tx, required),
+                                       (3, next_channel_id, option),
                                });
                        },
                        &Event::ChannelClosed { ref channel_id, ref user_channel_id, ref reason } => {
@@@ -687,14 -692,16 +691,16 @@@ impl MaybeReadable for Event 
                        7u8 => {
                                let f = || {
                                        let mut fee_earned_msat = None;
-                                       let mut source_channel_id = None;
+                                       let mut prev_channel_id = None;
                                        let mut claim_from_onchain_tx = false;
+                                       let mut next_channel_id = None;
                                        read_tlv_fields!(reader, {
                                                (0, fee_earned_msat, option),
-                                               (1, source_channel_id, option),
+                                               (1, prev_channel_id, option),
                                                (2, claim_from_onchain_tx, required),
+                                               (3, next_channel_id, option),
                                        });
-                                       Ok(Some(Event::PaymentForwarded { fee_earned_msat, source_channel_id, claim_from_onchain_tx }))
+                                       Ok(Some(Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id }))
                                };
                                f()
                        },
index 5e9591d8eba76652b4b2820959c0fd5e8cb5d67c,0ff0fc31aff93369c7836097ba3eb2a9844ef4dd..3682a0e8f0af02d1ac654853bff16e3096f4025a
@@@ -18,7 -18,7 +18,7 @@@ use chain::channelmonitor::MonitorEvent
  use chain::transaction::OutPoint;
  use chain::keysinterface;
  use ln::features::{ChannelFeatures, InitFeatures};
 -use ln::msgs;
 +use ln::{msgs, wire};
  use ln::msgs::OptionalField;
  use ln::script::ShutdownScript;
  use routing::scoring::FixedPenaltyScorer;
@@@ -35,8 -35,8 +35,8 @@@ use bitcoin::blockdata::block::BlockHea
  use bitcoin::network::constants::Network;
  use bitcoin::hash_types::{BlockHash, Txid};
  
 -use bitcoin::secp256k1::{SecretKey, PublicKey, Secp256k1, Signature};
 -use bitcoin::secp256k1::recovery::RecoverableSignature;
 +use bitcoin::secp256k1::{SecretKey, PublicKey, Secp256k1, ecdsa::Signature};
 +use bitcoin::secp256k1::ecdsa::RecoverableSignature;
  
  use regex;
  
@@@ -49,9 -49,6 +49,9 @@@ use core::{cmp, mem}
  use bitcoin::bech32::u5;
  use chain::keysinterface::{InMemorySigner, Recipient, KeyMaterial};
  
 +#[cfg(feature = "std")]
 +use std::time::{SystemTime, UNIX_EPOCH};
 +
  pub struct TestVecWriter(pub Vec<u8>);
  impl Writer for TestVecWriter {
        fn write_all(&mut self, buf: &[u8]) -> Result<(), io::Error> {
@@@ -164,7 -161,7 +164,7 @@@ impl<'a> chain::Watch<EnforcingSigner> 
                update_res
        }
  
-       fn release_pending_monitor_events(&self) -> Vec<MonitorEvent> {
+       fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>)> {
                return self.chain_monitor.release_pending_monitor_events();
        }
  }
@@@ -249,106 -246,37 +249,106 @@@ impl chaininterface::BroadcasterInterfa
  
  pub struct TestChannelMessageHandler {
        pub pending_events: Mutex<Vec<events::MessageSendEvent>>,
 +      expected_recv_msgs: Mutex<Option<Vec<wire::Message<()>>>>,
  }
  
  impl TestChannelMessageHandler {
        pub fn new() -> Self {
                TestChannelMessageHandler {
                        pending_events: Mutex::new(Vec::new()),
 +                      expected_recv_msgs: Mutex::new(None),
 +              }
 +      }
 +
 +      #[cfg(test)]
 +      pub(crate) fn expect_receive_msg(&self, ev: wire::Message<()>) {
 +              let mut expected_msgs = self.expected_recv_msgs.lock().unwrap();
 +              if expected_msgs.is_none() { *expected_msgs = Some(Vec::new()); }
 +              expected_msgs.as_mut().unwrap().push(ev);
 +      }
 +
 +      fn received_msg(&self, _ev: wire::Message<()>) {
 +              let mut msgs = self.expected_recv_msgs.lock().unwrap();
 +              if msgs.is_none() { return; }
 +              assert!(!msgs.as_ref().unwrap().is_empty(), "Received message when we weren't expecting one");
 +              #[cfg(test)]
 +              assert_eq!(msgs.as_ref().unwrap()[0], _ev);
 +              msgs.as_mut().unwrap().remove(0);
 +      }
 +}
 +
 +impl Drop for TestChannelMessageHandler {
 +      fn drop(&mut self) {
 +              let l = self.expected_recv_msgs.lock().unwrap();
 +              #[cfg(feature = "std")]
 +              {
 +                      if !std::thread::panicking() {
 +                              assert!(l.is_none() || l.as_ref().unwrap().is_empty());
 +                      }
                }
        }
  }
  
  impl msgs::ChannelMessageHandler for TestChannelMessageHandler {
 -      fn handle_open_channel(&self, _their_node_id: &PublicKey, _their_features: InitFeatures, _msg: &msgs::OpenChannel) {}
 -      fn handle_accept_channel(&self, _their_node_id: &PublicKey, _their_features: InitFeatures, _msg: &msgs::AcceptChannel) {}
 -      fn handle_funding_created(&self, _their_node_id: &PublicKey, _msg: &msgs::FundingCreated) {}
 -      fn handle_funding_signed(&self, _their_node_id: &PublicKey, _msg: &msgs::FundingSigned) {}
 -      fn handle_funding_locked(&self, _their_node_id: &PublicKey, _msg: &msgs::FundingLocked) {}
 -      fn handle_shutdown(&self, _their_node_id: &PublicKey, _their_features: &InitFeatures, _msg: &msgs::Shutdown) {}
 -      fn handle_closing_signed(&self, _their_node_id: &PublicKey, _msg: &msgs::ClosingSigned) {}
 -      fn handle_update_add_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateAddHTLC) {}
 -      fn handle_update_fulfill_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFulfillHTLC) {}
 -      fn handle_update_fail_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFailHTLC) {}
 -      fn handle_update_fail_malformed_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFailMalformedHTLC) {}
 -      fn handle_commitment_signed(&self, _their_node_id: &PublicKey, _msg: &msgs::CommitmentSigned) {}
 -      fn handle_revoke_and_ack(&self, _their_node_id: &PublicKey, _msg: &msgs::RevokeAndACK) {}
 -      fn handle_update_fee(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFee) {}
 -      fn handle_channel_update(&self, _their_node_id: &PublicKey, _msg: &msgs::ChannelUpdate) {}
 -      fn handle_announcement_signatures(&self, _their_node_id: &PublicKey, _msg: &msgs::AnnouncementSignatures) {}
 -      fn handle_channel_reestablish(&self, _their_node_id: &PublicKey, _msg: &msgs::ChannelReestablish) {}
 +      fn handle_open_channel(&self, _their_node_id: &PublicKey, _their_features: InitFeatures, msg: &msgs::OpenChannel) {
 +              self.received_msg(wire::Message::OpenChannel(msg.clone()));
 +      }
 +      fn handle_accept_channel(&self, _their_node_id: &PublicKey, _their_features: InitFeatures, msg: &msgs::AcceptChannel) {
 +              self.received_msg(wire::Message::AcceptChannel(msg.clone()));
 +      }
 +      fn handle_funding_created(&self, _their_node_id: &PublicKey, msg: &msgs::FundingCreated) {
 +              self.received_msg(wire::Message::FundingCreated(msg.clone()));
 +      }
 +      fn handle_funding_signed(&self, _their_node_id: &PublicKey, msg: &msgs::FundingSigned) {
 +              self.received_msg(wire::Message::FundingSigned(msg.clone()));
 +      }
 +      fn handle_funding_locked(&self, _their_node_id: &PublicKey, msg: &msgs::FundingLocked) {
 +              self.received_msg(wire::Message::FundingLocked(msg.clone()));
 +      }
 +      fn handle_shutdown(&self, _their_node_id: &PublicKey, _their_features: &InitFeatures, msg: &msgs::Shutdown) {
 +              self.received_msg(wire::Message::Shutdown(msg.clone()));
 +      }
 +      fn handle_closing_signed(&self, _their_node_id: &PublicKey, msg: &msgs::ClosingSigned) {
 +              self.received_msg(wire::Message::ClosingSigned(msg.clone()));
 +      }
 +      fn handle_update_add_htlc(&self, _their_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) {
 +              self.received_msg(wire::Message::UpdateAddHTLC(msg.clone()));
 +      }
 +      fn handle_update_fulfill_htlc(&self, _their_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) {
 +              self.received_msg(wire::Message::UpdateFulfillHTLC(msg.clone()));
 +      }
 +      fn handle_update_fail_htlc(&self, _their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) {
 +              self.received_msg(wire::Message::UpdateFailHTLC(msg.clone()));
 +      }
 +      fn handle_update_fail_malformed_htlc(&self, _their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) {
 +              self.received_msg(wire::Message::UpdateFailMalformedHTLC(msg.clone()));
 +      }
 +      fn handle_commitment_signed(&self, _their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) {
 +              self.received_msg(wire::Message::CommitmentSigned(msg.clone()));
 +      }
 +      fn handle_revoke_and_ack(&self, _their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) {
 +              self.received_msg(wire::Message::RevokeAndACK(msg.clone()));
 +      }
 +      fn handle_update_fee(&self, _their_node_id: &PublicKey, msg: &msgs::UpdateFee) {
 +              self.received_msg(wire::Message::UpdateFee(msg.clone()));
 +      }
 +      fn handle_channel_update(&self, _their_node_id: &PublicKey, _msg: &msgs::ChannelUpdate) {
 +              // Don't call `received_msg` here as `TestRoutingMessageHandler` generates these sometimes
 +      }
 +      fn handle_announcement_signatures(&self, _their_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) {
 +              self.received_msg(wire::Message::AnnouncementSignatures(msg.clone()));
 +      }
 +      fn handle_channel_reestablish(&self, _their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) {
 +              self.received_msg(wire::Message::ChannelReestablish(msg.clone()));
 +      }
        fn peer_disconnected(&self, _their_node_id: &PublicKey, _no_connection_possible: bool) {}
 -      fn peer_connected(&self, _their_node_id: &PublicKey, _msg: &msgs::Init) {}
 -      fn handle_error(&self, _their_node_id: &PublicKey, _msg: &msgs::ErrorMessage) {}
 +      fn peer_connected(&self, _their_node_id: &PublicKey, _msg: &msgs::Init) {
 +              // Don't bother with `received_msg` for Init as its auto-generated and we don't want to
 +              // bother re-generating the expected Init message in all tests.
 +      }
 +      fn handle_error(&self, _their_node_id: &PublicKey, msg: &msgs::ErrorMessage) {
 +              self.received_msg(wire::Message::Error(msg.clone()));
 +      }
  }
  
  impl events::MessageSendEventsProvider for TestChannelMessageHandler {
@@@ -413,7 -341,6 +413,7 @@@ fn get_dummy_channel_update(short_chan_
  pub struct TestRoutingMessageHandler {
        pub chan_upds_recvd: AtomicUsize,
        pub chan_anns_recvd: AtomicUsize,
 +      pub pending_events: Mutex<Vec<events::MessageSendEvent>>,
        pub request_full_sync: AtomicBool,
  }
  
@@@ -422,7 -349,6 +422,7 @@@ impl TestRoutingMessageHandler 
                TestRoutingMessageHandler {
                        chan_upds_recvd: AtomicUsize::new(0),
                        chan_anns_recvd: AtomicUsize::new(0),
 +                      pending_events: Mutex::new(vec![]),
                        request_full_sync: AtomicBool::new(false),
                }
        }
@@@ -458,35 -384,7 +458,35 @@@ impl msgs::RoutingMessageHandler for Te
                Vec::new()
        }
  
 -      fn peer_connected(&self, _their_node_id: &PublicKey, _init_msg: &msgs::Init) {}
 +      fn peer_connected(&self, their_node_id: &PublicKey, init_msg: &msgs::Init) {
 +              if !init_msg.features.supports_gossip_queries() {
 +                      return ();
 +              }
 +
 +              let should_request_full_sync = self.request_full_sync.load(Ordering::Acquire);
 +
 +              #[allow(unused_mut, unused_assignments)]
 +              let mut gossip_start_time = 0;
 +              #[cfg(feature = "std")]
 +              {
 +                      gossip_start_time = SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs();
 +                      if should_request_full_sync {
 +                              gossip_start_time -= 60 * 60 * 24 * 7 * 2; // 2 weeks ago
 +                      } else {
 +                              gossip_start_time -= 60 * 60; // an hour ago
 +                      }
 +              }
 +
 +              let mut pending_events = self.pending_events.lock().unwrap();
 +              pending_events.push(events::MessageSendEvent::SendGossipTimestampFilter {
 +                      node_id: their_node_id.clone(),
 +                      msg: msgs::GossipTimestampFilter {
 +                              chain_hash: genesis_block(Network::Testnet).header.block_hash(),
 +                              first_timestamp: gossip_start_time as u32,
 +                              timestamp_range: u32::max_value(),
 +                      },
 +              });
 +      }
  
        fn handle_reply_channel_range(&self, _their_node_id: &PublicKey, _msg: msgs::ReplyChannelRange) -> Result<(), msgs::LightningError> {
                Ok(())
  
  impl events::MessageSendEventsProvider for TestRoutingMessageHandler {
        fn get_and_clear_pending_msg_events(&self) -> Vec<events::MessageSendEvent> {
 -              vec![]
 +              let mut ret = Vec::new();
 +              let mut pending_events = self.pending_events.lock().unwrap();
 +              core::mem::swap(&mut ret, &mut pending_events);
 +              ret
        }
  }