Merge pull request #1351 from TheBlueMatt/2022-03-scid-privacy
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Mon, 28 Mar 2022 20:33:55 +0000 (20:33 +0000)
committerGitHub <noreply@github.com>
Mon, 28 Mar 2022 20:33:55 +0000 (20:33 +0000)
Implement the SCIDAlias Channel Type and provide SCID Privacy

1  2 
.github/workflows/build.yml
lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/priv_short_conf_tests.rs
lightning/src/routing/router.rs
lightning/src/util/events.rs

index a72f4d4ce0d9ff119a5415abada7e4ca05f5b641,9b6abc71151650bcd3a0964cdf5284c3fd0a1503..90e08b398bccd3dd4ee6b8593bc74d1492f40030
@@@ -115,9 -115,6 +115,9 @@@ jobs
            cargo test --verbose --color always --no-default-features --features no-std
            # check if there is a conflict between no-std and the default std feature
            cargo test --verbose --color always --features no-std
 +          # check that things still pass without grind_signatures
 +          # note that outbound_commitment_test only runs in this mode, because of hardcoded signature values
 +          cargo test --verbose --color always --no-default-features --features std
            # check if there is a conflict between no-std and the c_bindings cfg
            RUSTFLAGS="--cfg=c_bindings" cargo test --verbose --color always --no-default-features --features=no-std
            cd ..
            profile: minimal
        - name: Cache routing graph snapshot
          id: cache-graph
 -        uses: actions/cache@v2
 +        uses: actions/cache@v3
          with:
            path: lightning/net_graph-2021-05-31.bin
            key: ldk-net_graph-v0.0.15-2021-05-31.bin
    check_commits:
      runs-on: ubuntu-latest
      env:
-       # rustc 1.53 regressed and panics when building our (perfectly fine) docs.
-       # See https://github.com/rust-lang/rust/issues/84738
-       TOOLCHAIN: 1.52.1
+       TOOLCHAIN: 1.57.0
      steps:
        - name: Checkout source code
          uses: actions/checkout@v3
index 171a41dfa26f02478aaa3c94f6225863cc4491de,0792164e20d588a043de6700e0c23ca3956eff9d..679ff82e0181d75e8331ccd66bad1ff588cd8f1c
@@@ -810,6 -810,31 +810,31 @@@ impl<Signer: Sign> Channel<Signer> 
                self.channel_transaction_parameters.opt_anchors.is_some()
        }
  
+       fn get_initial_channel_type(config: &UserConfig) -> ChannelTypeFeatures {
+               // The default channel type (ie the first one we try) depends on whether the channel is
+               // public - if it is, we just go with `only_static_remotekey` as it's the only option
+               // available. If it's private, we first try `scid_privacy` as it provides better privacy
+               // with no other changes, and fall back to `only_static_remotekey`
+               let mut ret = ChannelTypeFeatures::only_static_remote_key();
+               if !config.channel_options.announced_channel && config.own_channel_config.negotiate_scid_privacy {
+                       ret.set_scid_privacy_required();
+               }
+               ret
+       }
+       /// If we receive an error message, it may only be a rejection of the channel type we tried,
+       /// not of our ability to open any channel at all. Thus, on error, we should first call this
+       /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
+       pub(crate) fn maybe_handle_error_without_close(&mut self, chain_hash: BlockHash) -> Result<msgs::OpenChannel, ()> {
+               if !self.is_outbound() || self.channel_state != ChannelState::OurInitSent as u32 { return Err(()); }
+               if self.channel_type == ChannelTypeFeatures::only_static_remote_key() {
+                       // We've exhausted our options
+                       return Err(());
+               }
+               self.channel_type = ChannelTypeFeatures::only_static_remote_key(); // We only currently support two types
+               Ok(self.get_open_channel(chain_hash))
+       }
        // Constructors:
        pub fn new_outbound<K: Deref, F: Deref>(
                fee_estimator: &F, keys_provider: &K, counterparty_node_id: PublicKey, their_features: &InitFeatures,
                        #[cfg(any(test, fuzzing))]
                        historical_inbound_htlc_fulfills: HashSet::new(),
  
-                       // We currently only actually support one channel type, so don't retry with new types
-                       // on error messages. When we support more we'll need fallback support (assuming we
-                       // want to support old types).
-                       channel_type: ChannelTypeFeatures::only_static_remote_key(),
+                       channel_type: Self::get_initial_channel_type(&config),
                })
        }
  
                      L::Target: Logger,
        {
                let opt_anchors = false; // TODO - should be based on features
+               let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
  
                // First check the channel type is known, failing before we do anything else if we don't
                // support this channel type.
                        if channel_type.supports_any_optional_bits() {
                                return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
                        }
-                       if *channel_type != ChannelTypeFeatures::only_static_remote_key() {
-                               return Err(ChannelError::Close("Channel Type was not understood".to_owned()));
+                       // We currently only allow two channel types, so write it all out here - we allow
+                       // `only_static_remote_key` in all contexts, and further allow
+                       // `static_remote_key|scid_privacy` if the channel is not publicly announced.
+                       let mut allowed_type = ChannelTypeFeatures::only_static_remote_key();
+                       if *channel_type != allowed_type {
+                               allowed_type.set_scid_privacy_required();
+                               if *channel_type != allowed_type {
+                                       return Err(ChannelError::Close("Channel Type was not understood".to_owned()));
+                               }
+                               if announced_channel {
+                                       return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
+                               }
                        }
                        channel_type.clone()
                } else {
  
                // Convert things into internal flags and prep our state:
  
-               let announce = if (msg.channel_flags & 1) == 1 { true } else { false };
                if config.peer_channel_config_limits.force_announced_channel_preference {
-                       if local_config.announced_channel != announce {
+                       if local_config.announced_channel != announced_channel {
                                return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
                        }
                }
                // we either accept their preference or the preferences match
-               local_config.announced_channel = announce;
+               local_config.announced_channel = announced_channel;
  
                let holder_selected_channel_reserve_satoshis = Channel::<Signer>::get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis);
                if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
                self.user_id
        }
  
+       /// Gets the channel's type
+       pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
+               &self.channel_type
+       }
        /// Guaranteed to be Some after both FundingLocked messages have been exchanged (and, thus,
        /// is_usable() returns true).
        /// Allowed in any state (including after shutdown)
        /// should be sent back to the counterparty node.
        ///
        /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
 -      pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel {
 +      pub fn accept_inbound_channel(&mut self, user_id: u64) -> msgs::AcceptChannel {
                if self.is_outbound() {
                        panic!("Tried to send accept_channel for an outbound channel?");
                }
                        panic!("The inbound channel has already been accepted");
                }
  
 +              self.user_id = user_id;
                self.inbound_awaiting_accept = false;
  
                self.generate_accept_channel_message()
@@@ -6421,7 -6457,7 +6458,7 @@@ mod tests 
                let mut node_b_chan = Channel::<EnforcingSigner>::new_from_req(&&feeest, &&keys_provider, node_b_node_id, &InitFeatures::known(), &open_channel_msg, 7, &config, 0, &&logger, 42).unwrap();
  
                // Node B --> Node A: accept channel, explicitly setting B's dust limit.
 -              let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
 +              let mut accept_channel_msg = node_b_chan.accept_inbound_channel(0);
                accept_channel_msg.dust_limit_satoshis = 546;
                node_a_chan.accept_channel(&accept_channel_msg, &config.peer_channel_config_limits, &InitFeatures::known()).unwrap();
                node_a_chan.holder_dust_limit_satoshis = 1560;
                let mut node_b_chan = Channel::<EnforcingSigner>::new_from_req(&&feeest, &&keys_provider, node_b_node_id, &InitFeatures::known(), &open_channel_msg, 7, &config, 0, &&logger, 42).unwrap();
  
                // Node B --> Node A: accept channel
 -              let accept_channel_msg = node_b_chan.accept_inbound_channel();
 +              let accept_channel_msg = node_b_chan.accept_inbound_channel(0);
                node_a_chan.accept_channel(&accept_channel_msg, &config.peer_channel_config_limits, &InitFeatures::known()).unwrap();
  
                // Node A --> Node B: funding created
                }
        }
  
 +      #[cfg(not(feature = "grind_signatures"))]
        #[test]
        fn outbound_commitment_test() {
                // Test vectors from BOLT 3 Appendices C and F (anchors):
index e68ef27a72343078b24fb5b733d85aecd9d83e93,81789313d290780ca254d910cd26d985a57a06ae..cfa2da06fd17bddb9167f14ec9b2efed6a51d7ff
@@@ -42,7 -42,7 +42,7 @@@ use chain::transaction::{OutPoint, Tran
  // construct one themselves.
  use ln::{PaymentHash, PaymentPreimage, PaymentSecret};
  use ln::channel::{Channel, ChannelError, ChannelUpdateStatus, UpdateFulfillCommitFetch};
- use ln::features::{InitFeatures, NodeFeatures};
+ use ln::features::{ChannelTypeFeatures, InitFeatures, NodeFeatures};
  use routing::router::{PaymentParameters, Route, RouteHop, RoutePath, RouteParameters};
  use ln::msgs;
  use ln::msgs::NetAddress;
@@@ -53,7 -53,7 +53,7 @@@ use util::config::UserConfig
  use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
  use util::{byte_utils, events};
  use util::scid_utils::fake_scid;
- use util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer};
+ use util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer, VecWriter};
  use util::logger::{Level, Logger};
  use util::errors::APIError;
  
@@@ -69,7 -69,6 +69,7 @@@ use core::ops::Deref
  
  #[cfg(any(test, feature = "std"))]
  use std::time::Instant;
 +use util::crypto::sign;
  
  mod inbound_payment {
        use alloc::string::ToString;
@@@ -442,7 -441,6 +442,7 @@@ struct ClaimableHTLC 
        cltv_expiry: u32,
        value: u64,
        onion_payload: OnionPayload,
 +      timer_ticks: u8,
  }
  
  /// A payment identifier used to uniquely identify a payment to LDK.
@@@ -888,8 -886,6 +888,8 @@@ impl PendingOutboundPayment 
  /// issues such as overly long function definitions. Note that the ChannelManager can take any
  /// type that implements KeysInterface for its keys manager, but this type alias chooses the
  /// concrete type of the KeysManager.
 +///
 +/// (C-not exported) as Arcs don't make sense in bindings
  pub type SimpleArcChannelManager<M, T, F, L> = ChannelManager<InMemorySigner, Arc<M>, Arc<T>, Arc<KeysManager>, Arc<F>, Arc<L>>;
  
  /// SimpleRefChannelManager is a type alias for a ChannelManager reference, and is the reference
  /// helps with issues such as long function definitions. Note that the ChannelManager can take any
  /// type that implements KeysInterface for its keys manager, but this type alias chooses the
  /// concrete type of the KeysManager.
 +///
 +/// (C-not exported) as Arcs don't make sense in bindings
  pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, M, T, F, L> = ChannelManager<InMemorySigner, &'a M, &'b T, &'c KeysManager, &'d F, &'e L>;
  
  /// Manager which keeps track of a number of channels and sends messages to the appropriate
@@@ -1154,9 -1148,6 +1154,9 @@@ const CHECK_CLTV_EXPIRY_SANITY_2: u32 
  /// pending HTLCs in flight.
  pub(crate) const PAYMENT_EXPIRY_BLOCKS: u32 = 3;
  
 +/// The number of ticks of [`ChannelManager::timer_tick_occurred`] until expiry of incomplete MPPs
 +pub(crate) const MPP_TIMEOUT_TICKS: u8 = 3;
 +
  /// Information needed for constructing an invoice route hint for this channel.
  #[derive(Clone, Debug, PartialEq)]
  pub struct CounterpartyForwardingInfo {
@@@ -1209,6 -1200,10 +1209,10 @@@ pub struct ChannelDetails 
        /// Note that, if this has been set, `channel_id` will be equivalent to
        /// `funding_txo.unwrap().to_channel_id()`.
        pub funding_txo: Option<OutPoint>,
+       /// The features which this channel operates with. See individual features for more info.
+       ///
+       /// `None` until negotiation completes and the channel type is finalized.
+       pub channel_type: Option<ChannelTypeFeatures>,
        /// The position of the funding transaction in the chain. None if the funding transaction has
        /// not yet been confirmed and the channel fully opened.
        ///
        /// counterparty will recognize the alias provided here in place of the [`short_channel_id`]
        /// when they see a payment to be routed to us.
        ///
+       /// Our counterparty may choose to rotate this value at any time, though will always recognize
+       /// previous values for inbound payment forwarding.
+       ///
        /// [`short_channel_id`]: Self::short_channel_id
        pub inbound_scid_alias: Option<u64>,
        /// The value, in satoshis, of this channel as appears in the funding output
  }
  
  impl ChannelDetails {
-       /// Gets the SCID which should be used to identify this channel for inbound payments. This
-       /// should be used for providing invoice hints or in any other context where our counterparty
-       /// will forward a payment to us.
+       /// Gets the current SCID which should be used to identify this channel for inbound payments.
+       /// This should be used for providing invoice hints or in any other context where our
+       /// counterparty will forward a payment to us.
+       ///
+       /// This is either the [`ChannelDetails::inbound_scid_alias`], if set, or the
+       /// [`ChannelDetails::short_channel_id`]. See those for more information.
        pub fn get_inbound_payment_scid(&self) -> Option<u64> {
                self.inbound_scid_alias.or(self.short_channel_id)
        }
@@@ -1931,6 -1932,9 +1941,9 @@@ impl<Signer: Sign, M: Deref, T: Deref, 
                                                forwarding_info: channel.counterparty_forwarding_info(),
                                        },
                                        funding_txo: channel.get_funding_txo(),
+                                       // Note that accept_channel (or open_channel) is always the first message, so
+                                       // `have_received_message` indicates that type negotiation has completed.
+                                       channel_type: if channel.have_received_message() { Some(channel.get_channel_type().clone()) } else { None },
                                        short_channel_id: channel.get_short_channel_id(),
                                        inbound_scid_alias: channel.latest_inbound_scid_alias(),
                                        channel_value_satoshis: channel.get_value_satoshis(),
                                        };
                                        let (chan_update_opt, forwardee_cltv_expiry_delta) = if let Some(forwarding_id) = forwarding_id_opt {
                                                let chan = channel_state.as_mut().unwrap().by_id.get_mut(&forwarding_id).unwrap();
-                                               // Leave channel updates as None for private channels.
-                                               let chan_update_opt = if chan.should_announce() {
-                                                       Some(self.get_channel_update_for_unicast(chan).unwrap()) } else { None };
                                                if !chan.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels {
                                                        // Note that the behavior here should be identical to the above block - we
                                                        // should NOT reveal the existence or non-existence of a private channel if
                                                        // we don't allow forwards outbound over them.
-                                                       break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
+                                                       break Some(("Refusing to forward to a private channel based on our config.", 0x4000 | 10, None));
+                                               }
+                                               if chan.get_channel_type().supports_scid_privacy() && *short_channel_id != chan.outbound_scid_alias() {
+                                                       // `option_scid_alias` (referred to in LDK as `scid_privacy`) means
+                                                       // "refuse to forward unless the SCID alias was used", so we pretend
+                                                       // we don't have the channel here.
+                                                       break Some(("Refusing to forward over real channel SCID as our counterparty requested.", 0x4000 | 10, None));
                                                }
+                                               let chan_update_opt = self.get_channel_update_for_onion(*short_channel_id, chan).ok();
  
                                                // Note that we could technically not return an error yet here and just hope
                                                // that the connection is reestablished or monitor updated by the time we get
                                        break None;
                                }
                                {
-                                       let mut res = Vec::with_capacity(8 + 128);
+                                       let mut res = VecWriter(Vec::with_capacity(chan_update.serialized_length() + 8 + 2));
                                        if let Some(chan_update) = chan_update {
                                                if code == 0x1000 | 11 || code == 0x1000 | 12 {
-                                                       res.extend_from_slice(&byte_utils::be64_to_array(msg.amount_msat));
+                                                       msg.amount_msat.write(&mut res).expect("Writes cannot fail");
                                                }
                                                else if code == 0x1000 | 13 {
-                                                       res.extend_from_slice(&byte_utils::be32_to_array(msg.cltv_expiry));
+                                                       msg.cltv_expiry.write(&mut res).expect("Writes cannot fail");
                                                }
                                                else if code == 0x1000 | 20 {
                                                        // TODO: underspecified, follow https://github.com/lightningnetwork/lightning-rfc/issues/791
-                                                       res.extend_from_slice(&byte_utils::be16_to_array(0));
+                                                       0u16.write(&mut res).expect("Writes cannot fail");
                                                }
-                                               res.extend_from_slice(&chan_update.encode_with_len()[..]);
+                                               (chan_update.serialized_length() as u16).write(&mut res).expect("Writes cannot fail");
+                                               chan_update.write(&mut res).expect("Writes cannot fail");
                                        }
-                                       return_err!(err, code, &res[..]);
+                                       return_err!(err, code, &res.0[..]);
                                }
                        }
                }
                        Some(id) => id,
                };
  
+               self.get_channel_update_for_onion(short_channel_id, chan)
+       }
+       fn get_channel_update_for_onion(&self, short_channel_id: u64, chan: &Channel<Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
+               log_trace!(self.logger, "Generating channel update for channel {}", log_bytes!(chan.channel_id()));
                let were_node_one = PublicKey::from_secret_key(&self.secp_ctx, &self.our_network_key).serialize()[..] < chan.get_counterparty_node_id().serialize()[..];
  
                let unsigned = msgs::UnsignedChannelUpdate {
                        excess_data: Vec::new(),
                };
                let msghash = hash_to_message!(&Sha256dHash::hash(&announcement.encode()[..])[..]);
 -              let node_announce_sig = self.secp_ctx.sign(&msghash, &self.our_network_key);
 +              let node_announce_sig = sign(&self.secp_ctx, &msghash, &self.our_network_key);
  
                let mut channel_state_lock = self.channel_state.lock().unwrap();
                let channel_state = &mut *channel_state_lock;
                                                                                        } else {
                                                                                                panic!("Stated return value requirements in send_htlc() were not met");
                                                                                        }
-                                                                                       let chan_update = self.get_channel_update_for_unicast(chan.get()).unwrap();
+                                                                                       let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan.get());
                                                                                        failed_forwards.push((htlc_source, payment_hash,
-                                                                                               HTLCFailReason::Reason { failure_code: 0x1000 | 7, data: chan_update.encode_with_len() }
+                                                                                               HTLCFailReason::Reason { failure_code, data }
                                                                                        ));
                                                                                        continue;
                                                                                },
                                                                                phantom_shared_secret,
                                                                        },
                                                                        value: amt_to_forward,
 +                                                                      timer_ticks: 0,
                                                                        cltv_expiry,
                                                                        onion_payload,
                                                                };
                        let new_feerate = self.fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);
  
                        let mut handle_errors = Vec::new();
 +                      let mut timed_out_mpp_htlcs = Vec::new();
                        {
                                let mut channel_state_lock = self.channel_state.lock().unwrap();
                                let channel_state = &mut *channel_state_lock;
  
                                        true
                                });
 +
 +                              channel_state.claimable_htlcs.retain(|payment_hash, htlcs| {
 +                                      if htlcs.is_empty() {
 +                                              // This should be unreachable
 +                                              debug_assert!(false);
 +                                              return false;
 +                                      }
 +                                      if let OnionPayload::Invoice(ref final_hop_data) = htlcs[0].onion_payload {
 +                                              // Check if we've received all the parts we need for an MPP (the value of the parts adds to total_msat).
 +                                              // In this case we're not going to handle any timeouts of the parts here.
 +                                              if final_hop_data.total_msat == htlcs.iter().fold(0, |total, htlc| total + htlc.value) {
 +                                                      return true;
 +                                              } else if htlcs.into_iter().any(|htlc| {
 +                                                      htlc.timer_ticks += 1;
 +                                                      return htlc.timer_ticks >= MPP_TIMEOUT_TICKS
 +                                              }) {
 +                                                      timed_out_mpp_htlcs.extend(htlcs.into_iter().map(|htlc| (htlc.prev_hop.clone(), payment_hash.clone())));
 +                                                      return false;
 +                                              }
 +                                      }
 +                                      true
 +                              });
 +                      }
 +
 +                      for htlc_source in timed_out_mpp_htlcs.drain(..) {
 +                              self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), HTLCSource::PreviousHopData(htlc_source.0), &htlc_source.1, HTLCFailReason::Reason { failure_code: 23, data: Vec::new() });
                        }
  
                        for (err, counterparty_node_id) in handle_errors.drain(..) {
                } else { false }
        }
  
+       /// Gets an HTLC onion failure code and error data for an `UPDATE` error, given the error code
+       /// that we want to return and a channel.
+       ///
+       /// This is for failures on the channel on which the HTLC was *received*, not failures
+       /// forwarding
+       fn get_htlc_inbound_temp_fail_err_and_data(&self, desired_err_code: u16, chan: &Channel<Signer>) -> (u16, Vec<u8>) {
+               // We can't be sure what SCID was used when relaying inbound towards us, so we have to
+               // guess somewhat. If its a public channel, we figure best to just use the real SCID (as
+               // we're not leaking that we have a channel with the counterparty), otherwise we try to use
+               // an inbound SCID alias before the real SCID.
+               let scid_pref = if chan.should_announce() {
+                       chan.get_short_channel_id().or(chan.latest_inbound_scid_alias())
+               } else {
+                       chan.latest_inbound_scid_alias().or(chan.get_short_channel_id())
+               };
+               if let Some(scid) = scid_pref {
+                       self.get_htlc_temp_fail_err_and_data(desired_err_code, scid, chan)
+               } else {
+                       (0x4000|10, Vec::new())
+               }
+       }
+       /// Gets an HTLC onion failure code and error data for an `UPDATE` error, given the error code
+       /// that we want to return and a channel.
+       fn get_htlc_temp_fail_err_and_data(&self, desired_err_code: u16, scid: u64, chan: &Channel<Signer>) -> (u16, Vec<u8>) {
+               debug_assert_eq!(desired_err_code & 0x1000, 0x1000);
+               if let Ok(upd) = self.get_channel_update_for_onion(scid, chan) {
+                       let mut enc = VecWriter(Vec::with_capacity(upd.serialized_length() + 4));
+                       if desired_err_code == 0x1000 | 20 {
+                               // TODO: underspecified, follow https://github.com/lightning/bolts/issues/791
+                               0u16.write(&mut enc).expect("Writes cannot fail");
+                       }
+                       (upd.serialized_length() as u16).write(&mut enc).expect("Writes cannot fail");
+                       upd.write(&mut enc).expect("Writes cannot fail");
+                       (desired_err_code, enc.0)
+               } else {
+                       // If we fail to get a unicast channel_update, it implies we don't yet have an SCID,
+                       // which means we really shouldn't have gotten a payment to be forwarded over this
+                       // channel yet, or if we did it's from a route hint. Either way, returning an error of
+                       // PERM|no_such_channel should be fine.
+                       (0x4000|10, Vec::new())
+               }
+       }
        // Fail a list of HTLCs that were just freed from the holding cell. The HTLCs need to be
        // failed backwards or, if they were one of our outgoing HTLCs, then their failure needs to
        // be surfaced to the user.
                                        let (failure_code, onion_failure_data) =
                                                match self.channel_state.lock().unwrap().by_id.entry(channel_id) {
                                                        hash_map::Entry::Occupied(chan_entry) => {
-                                                               if let Ok(upd) = self.get_channel_update_for_unicast(&chan_entry.get()) {
-                                                                       (0x1000|7, upd.encode_with_len())
-                                                               } else {
-                                                                       (0x4000|10, Vec::new())
-                                                               }
+                                                               self.get_htlc_inbound_temp_fail_err_and_data(0x1000|7, &chan_entry.get())
                                                        },
                                                        hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new())
                                                };
                                // channel_update later through the announcement_signatures process for public
                                // channels, but there's no reason not to just inform our counterparty of our fees
                                // now.
-                               Some(events::MessageSendEvent::SendChannelUpdate {
-                                       node_id: channel.get().get_counterparty_node_id(),
-                                       msg: self.get_channel_update_for_unicast(channel.get()).unwrap(),
-                               })
+                               if let Ok(msg) = self.get_channel_update_for_unicast(channel.get()) {
+                                       Some(events::MessageSendEvent::SendChannelUpdate {
+                                               node_id: channel.get().get_counterparty_node_id(),
+                                               msg,
+                                       })
+                               } else { None }
                        } else { None };
                        chan_restoration_res = handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, updates.raa, updates.commitment_update, updates.order, None, updates.accepted_htlcs, updates.funding_broadcastable, updates.funding_locked, updates.announcement_sigs);
                        if let Some(upd) = channel_update {
        ///
        /// The `temporary_channel_id` parameter indicates which inbound channel should be accepted.
        ///
 -      /// [`Event::OpenChannelRequest`]: crate::util::events::Event::OpenChannelRequest
 -      pub fn accept_inbound_channel(&self, temporary_channel_id: &[u8; 32]) -> Result<(), APIError> {
 +      /// For inbound channels, the `user_channel_id` parameter will be provided back in
 +      /// [`Event::ChannelClosed::user_channel_id`] to allow tracking of which events correspond
 +      /// with which `accept_inbound_channel` call.
 +      ///
 +      /// [`Event::OpenChannelRequest`]: events::Event::OpenChannelRequest
 +      /// [`Event::ChannelClosed::user_channel_id`]: events::Event::ChannelClosed::user_channel_id
 +      pub fn accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], user_channel_id: u64) -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
  
                let mut channel_state_lock = self.channel_state.lock().unwrap();
                                }
                                channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
                                        node_id: channel.get().get_counterparty_node_id(),
 -                                      msg: channel.get_mut().accept_inbound_channel(),
 +                                      msg: channel.get_mut().accept_inbound_channel(user_channel_id),
                                });
                        }
                        hash_map::Entry::Vacant(_) => {
                                if !self.default_configuration.manually_accept_inbound_channels {
                                        channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
                                                node_id: counterparty_node_id.clone(),
 -                                              msg: channel.accept_inbound_channel(),
 +                                              msg: channel.accept_inbound_channel(0),
                                        });
                                } else {
                                        let mut pending_events = self.pending_events.lock().unwrap();
                                                        counterparty_node_id: counterparty_node_id.clone(),
                                                        funding_satoshis: msg.funding_satoshis,
                                                        push_msat: msg.push_msat,
+                                                       channel_type: channel.get_channel_type().clone(),
                                                }
                                        );
                                }
                                        // channel_update here if the channel is not public, i.e. we're not sending an
                                        // announcement_signatures.
                                        log_trace!(self.logger, "Sending private initial channel_update for our counterparty on channel {}", log_bytes!(chan.get().channel_id()));
-                                       channel_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
-                                               node_id: counterparty_node_id.clone(),
-                                               msg: self.get_channel_update_for_unicast(chan.get()).unwrap(),
-                                       });
+                                       if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) {
+                                               channel_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
+                                                       node_id: counterparty_node_id.clone(),
+                                                       msg,
+                                               });
+                                       }
                                }
                                Ok(())
                        },
                                        match pending_forward_info {
                                                PendingHTLCStatus::Forward(PendingHTLCInfo { ref incoming_shared_secret, .. }) => {
                                                        let reason = if (error_code & 0x1000) != 0 {
-                                                               if let Ok(upd) = self.get_channel_update_for_unicast(chan) {
-                                                                       onion_utils::build_first_hop_failure_packet(incoming_shared_secret, error_code, &{
-                                                                               let mut res = Vec::with_capacity(8 + 128);
-                                                                               // TODO: underspecified, follow https://github.com/lightningnetwork/lightning-rfc/issues/791
-                                                                               if error_code == 0x1000 | 20 {
-                                                                                       res.extend_from_slice(&byte_utils::be16_to_array(0));
-                                                                               }
-                                                                               res.extend_from_slice(&upd.encode_with_len()[..]);
-                                                                               res
-                                                                       }[..])
-                                                               } else {
-                                                                       // The only case where we'd be unable to
-                                                                       // successfully get a channel update is if the
-                                                                       // channel isn't in the fully-funded state yet,
-                                                                       // implying our counterparty is trying to route
-                                                                       // payments over the channel back to themselves
-                                                                       // (because no one else should know the short_id
-                                                                       // is a lightning channel yet). We should have
-                                                                       // no problem just calling this
-                                                                       // unknown_next_peer (0x4000|10).
-                                                                       onion_utils::build_first_hop_failure_packet(incoming_shared_secret, 0x4000|10, &[])
-                                                               }
+                                                               let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan);
+                                                               onion_utils::build_first_hop_failure_packet(incoming_shared_secret, real_code, &error_data)
                                                        } else {
                                                                onion_utils::build_first_hop_failure_packet(incoming_shared_secret, error_code, &[])
                                                        };
                                                // If the channel is in a usable state (ie the channel is not being shut
                                                // down), send a unicast channel_update to our counterparty to make sure
                                                // they have the latest channel parameters.
-                                               channel_update = Some(events::MessageSendEvent::SendChannelUpdate {
-                                                       node_id: chan.get().get_counterparty_node_id(),
-                                                       msg: self.get_channel_update_for_unicast(chan.get()).unwrap(),
-                                               });
+                                               if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) {
+                                                       channel_update = Some(events::MessageSendEvent::SendChannelUpdate {
+                                                               node_id: chan.get().get_counterparty_node_id(),
+                                                               msg,
+                                                       });
+                                               }
                                        }
                                        let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take();
                                        chan_restoration_res = handle_chan_restoration_locked!(
@@@ -5547,12 -5555,6 +5597,12 @@@ wher
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
                self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.genesis_hash.clone(), self.get_our_node_id(), &self.logger)
                        .map(|(a, b)| (a, Vec::new(), b)));
 +
 +              let last_best_block_height = self.best_block.read().unwrap().height();
 +              if height < last_best_block_height {
 +                      let timestamp = self.highest_seen_timestamp.load(Ordering::Acquire);
 +                      self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.genesis_hash.clone(), self.get_our_node_id(), &self.logger));
 +              }
        }
  
        fn best_block_updated(&self, header: &BlockHeader, height: u32) {
@@@ -5659,20 -5661,21 +5709,21 @@@ wher
                                let res = f(channel);
                                if let Ok((funding_locked_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res {
                                        for (source, payment_hash) in timed_out_pending_htlcs.drain(..) {
-                                               let chan_update = self.get_channel_update_for_unicast(&channel).map(|u| u.encode_with_len()).unwrap(); // Cannot add/recv HTLCs before we have a short_id so unwrap is safe
-                                               timed_out_htlcs.push((source, payment_hash,  HTLCFailReason::Reason {
-                                                       failure_code: 0x1000 | 14, // expiry_too_soon, or at least it is now
-                                                       data: chan_update,
+                                               let (failure_code, data) = self.get_htlc_inbound_temp_fail_err_and_data(0x1000|14 /* expiry_too_soon */, &channel);
+                                               timed_out_htlcs.push((source, payment_hash, HTLCFailReason::Reason {
+                                                       failure_code, data,
                                                }));
                                        }
                                        if let Some(funding_locked) = funding_locked_opt {
                                                send_funding_locked!(short_to_id, pending_msg_events, channel, funding_locked);
                                                if channel.is_usable() {
                                                        log_trace!(self.logger, "Sending funding_locked with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id()));
-                                                       pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
-                                                               node_id: channel.get_counterparty_node_id(),
-                                                               msg: self.get_channel_update_for_unicast(channel).unwrap(),
-                                                       });
+                                                       if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
+                                                               pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
+                                                                       node_id: channel.get_counterparty_node_id(),
+                                                                       msg,
+                                                               });
+                                                       }
                                                } else {
                                                        log_trace!(self.logger, "Sending funding_locked WITHOUT channel_update for {}", log_bytes!(channel.channel_id()));
                                                }
@@@ -5941,7 -5944,6 +5992,7 @@@ impl<Signer: Sign, M: Deref , T: Deref 
                                        &events::MessageSendEvent::SendChannelRangeQuery { .. } => false,
                                        &events::MessageSendEvent::SendShortIdsQuery { .. } => false,
                                        &events::MessageSendEvent::SendReplyChannelRange { .. } => false,
 +                                      &events::MessageSendEvent::SendGossipTimestampFilter { .. } => false,
                                }
                        });
                }
                                }
                        }
                } else {
+                       {
+                               // First check if we can advance the channel type and try again.
+                               let mut channel_state = self.channel_state.lock().unwrap();
+                               if let Some(chan) = channel_state.by_id.get_mut(&msg.channel_id) {
+                                       if chan.get_counterparty_node_id() != *counterparty_node_id {
+                                               return;
+                                       }
+                                       if let Ok(msg) = chan.maybe_handle_error_without_close(self.genesis_hash) {
+                                               channel_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
+                                                       node_id: *counterparty_node_id,
+                                                       msg,
+                                               });
+                                               return;
+                                       }
+                               }
+                       }
                        // Untrusted messages from peer, we throw away the error if id points to a non-existent channel
                        let _ = self.force_close_channel_with_peer(&msg.channel_id, Some(counterparty_node_id), Some(&msg.data));
                }
@@@ -6103,6 -6122,7 +6171,7 @@@ impl_writeable_tlv_based!(ChannelCounte
  impl_writeable_tlv_based!(ChannelDetails, {
        (1, inbound_scid_alias, option),
        (2, channel_id, required),
+       (3, channel_type, option),
        (4, counterparty, required),
        (6, funding_txo, option),
        (8, short_channel_id, option),
@@@ -6282,7 -6302,6 +6351,7 @@@ impl Readable for ClaimableHTLC 
                };
                Ok(Self {
                        prev_hop: prev_hop.0.unwrap(),
 +                      timer_ticks: 0,
                        value,
                        onion_payload,
                        cltv_expiry,
@@@ -7357,8 -7376,8 +7426,8 @@@ mod tests 
  
                let payer_pubkey = nodes[0].node.get_our_node_id();
                let payee_pubkey = nodes[1].node.get_our_node_id();
 -              nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known() });
 -              nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known() });
 +              nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
 +              nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
  
                let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known());
                let route_params = RouteParameters {
  
                let payer_pubkey = nodes[0].node.get_our_node_id();
                let payee_pubkey = nodes[1].node.get_our_node_id();
 -              nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known() });
 -              nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known() });
 +              nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
 +              nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
  
                let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known());
                let route_params = RouteParameters {
@@@ -7567,8 -7586,8 +7636,8 @@@ pub mod bench 
                });
                let node_b_holder = NodeHolder { node: &node_b };
  
 -              node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: InitFeatures::known() });
 -              node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: InitFeatures::known() });
 +              node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
 +              node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
                node_a.create_channel(node_b.get_our_node_id(), 8_000_000, 100_000_000, 42, None).unwrap();
                node_b.handle_open_channel(&node_a.get_our_node_id(), InitFeatures::known(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id()));
                node_a.handle_accept_channel(&node_b.get_our_node_id(), InitFeatures::known(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id()));
index 7088149515d529877659716882f56da06bb80d30,d65bf2ee3445303d60e52c7f9bcfa7e23f7c92fd..aa6fdc97b4a897e1034fc3524880c9aa46b1ed44
@@@ -14,7 -14,7 +14,7 @@@ use chain::{BestBlock, Confirm, Listen
  use chain::channelmonitor::ChannelMonitor;
  use chain::transaction::OutPoint;
  use ln::{PaymentPreimage, PaymentHash, PaymentSecret};
- use ln::channelmanager::{ChainParameters, ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentSendFailure, PaymentId};
+ use ln::channelmanager::{ChainParameters, ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentSendFailure, PaymentId, MIN_CLTV_EXPIRY_DELTA};
  use routing::network_graph::{NetGraphMsgHandler, NetworkGraph};
  use routing::router::{PaymentParameters, Route, get_route};
  use ln::features::{InitFeatures, InvoiceFeatures};
@@@ -396,6 -396,26 +396,26 @@@ macro_rules! get_event_msg 
        }
  }
  
+ /// Get an error message from the pending events queue.
+ #[macro_export]
+ macro_rules! get_err_msg {
+       ($node: expr, $node_id: expr) => {
+               {
+                       let events = $node.node.get_and_clear_pending_msg_events();
+                       assert_eq!(events.len(), 1);
+                       match events[0] {
+                               $crate::util::events::MessageSendEvent::HandleError {
+                                       action: $crate::ln::msgs::ErrorAction::SendErrorMessage { ref msg }, ref node_id
+                               } => {
+                                       assert_eq!(*node_id, $node_id);
+                                       (*msg).clone()
+                               },
+                               _ => panic!("Unexpected event"),
+                       }
+               }
+       }
+ }
  /// Get a specific event from the pending events queue.
  #[macro_export]
  macro_rules! get_event {
@@@ -1828,7 -1848,7 +1848,7 @@@ pub fn test_default_channel_config() -
        let mut default_config = UserConfig::default();
        // Set cltv_expiry_delta slightly lower to keep the final CLTV values inside one byte in our
        // tests so that our script-length checks don't fail (see ACCEPTED_HTLC_SCRIPT_WEIGHT).
-       default_config.channel_options.cltv_expiry_delta = 6*6;
+       default_config.channel_options.cltv_expiry_delta = MIN_CLTV_EXPIRY_DELTA;
        default_config.channel_options.announced_channel = true;
        default_config.peer_channel_config_limits.force_announced_channel_preference = false;
        // When most of our tests were written, the default HTLC minimum was fixed at 1000.
@@@ -1877,8 -1897,8 +1897,8 @@@ pub fn create_network<'a, 'b: 'a, 'c: '
  
        for i in 0..node_count {
                for j in (i+1)..node_count {
 -                      nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &msgs::Init { features: cfgs[j].features.clone() });
 -                      nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &msgs::Init { features: cfgs[i].features.clone() });
 +                      nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &msgs::Init { features: cfgs[j].features.clone(), remote_network_address: None });
 +                      nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &msgs::Init { features: cfgs[i].features.clone(), remote_network_address: None });
                }
        }
  
@@@ -2137,9 -2157,9 +2157,9 @@@ macro_rules! handle_chan_reestablish_ms
  /// pending_htlc_adds includes both the holding cell and in-flight update_add_htlcs, whereas
  /// for claims/fails they are separated out.
  pub fn reconnect_nodes<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, send_funding_locked: (bool, bool), pending_htlc_adds: (i64, i64), pending_htlc_claims: (usize, usize), pending_htlc_fails: (usize, usize), pending_cell_htlc_claims: (usize, usize), pending_cell_htlc_fails: (usize, usize), pending_raa: (bool, bool))  {
 -      node_a.node.peer_connected(&node_b.node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
 +      node_a.node.peer_connected(&node_b.node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
        let reestablish_1 = get_chan_reestablish_msgs!(node_a, node_b);
 -      node_b.node.peer_connected(&node_a.node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
 +      node_b.node.peer_connected(&node_a.node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
        let reestablish_2 = get_chan_reestablish_msgs!(node_b, node_a);
  
        if send_funding_locked.0 {
index 66c1b20d2de0179a83a414069a678921f01b8aa3,996d64f377cc8e4a60bef0c152cfe0e13ab0f8bd..d3d54467a2f421551d381be64e56d76c00f639cd
  
  use chain::Watch;
  use chain::channelmonitor::ChannelMonitor;
+ use chain::keysinterface::{Recipient, KeysInterface};
  use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, MIN_CLTV_EXPIRY_DELTA};
  use routing::network_graph::RoutingFees;
  use routing::router::{RouteHint, RouteHintHop};
  use ln::features::InitFeatures;
  use ln::msgs;
- use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler};
+ use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, OptionalField};
  use util::enforcing_trait_impls::EnforcingSigner;
  use util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
  use util::config::UserConfig;
@@@ -30,7 -31,12 +31,12 @@@ use core::default::Default
  
  use ln::functional_test_utils::*;
  
+ use bitcoin::blockdata::constants::genesis_block;
  use bitcoin::hash_types::BlockHash;
+ use bitcoin::hashes::Hash;
+ use bitcoin::hashes::sha256d::Hash as Sha256dHash;
+ use bitcoin::network::constants::Network;
+ use bitcoin::secp256k1::Secp256k1;
  
  #[test]
  fn test_priv_forwarding_rejection() {
        check_added_monitors!(nodes[1], 2);
        nodes[1].node = &nodes_1_deserialized;
  
 -      nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known() });
 -      nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
 +      nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
 +      nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
        let as_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
        let bs_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish);
        get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
        get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
  
 -      nodes[1].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known() });
 -      nodes[2].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
 +      nodes[1].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
 +      nodes[2].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
        let bs_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[2].node.get_our_node_id());
        let cs_reestablish = get_event_msg!(nodes[2], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
        nodes[2].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
@@@ -284,3 -290,254 +290,254 @@@ fn test_routed_scid_alias() 
        // Note that because we never send a duplicate funding_locked we can't send a payment through
        // the 0xdeadbeef SCID alias.
  }
+ #[test]
+ fn test_scid_privacy_on_pub_channel() {
+       // Tests rejecting the scid_privacy feature for public channels and that we don't ever try to
+       // send them.
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+       let mut scid_privacy_cfg = test_default_channel_config();
+       scid_privacy_cfg.channel_options.announced_channel = true;
+       scid_privacy_cfg.own_channel_config.negotiate_scid_privacy = true;
+       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, Some(scid_privacy_cfg)).unwrap();
+       let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+       assert!(!open_channel.channel_type.as_ref().unwrap().supports_scid_privacy()); // we ignore `negotiate_scid_privacy` on pub channels
+       open_channel.channel_type.as_mut().unwrap().set_scid_privacy_required();
+       assert_eq!(open_channel.channel_flags & 1, 1); // The `announce_channel` bit is set.
+       nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel);
+       let err = get_err_msg!(nodes[1], nodes[0].node.get_our_node_id());
+       assert_eq!(err.data, "SCID Alias/Privacy Channel Type cannot be set on a public channel");
+ }
+ #[test]
+ fn test_scid_privacy_negotiation() {
+       // Tests of the negotiation of SCID alias and falling back to non-SCID-alias if our
+       // counterparty doesn't support it.
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+       let mut scid_privacy_cfg = test_default_channel_config();
+       scid_privacy_cfg.channel_options.announced_channel = false;
+       scid_privacy_cfg.own_channel_config.negotiate_scid_privacy = true;
+       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, Some(scid_privacy_cfg)).unwrap();
+       let init_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+       assert!(init_open_channel.channel_type.as_ref().unwrap().supports_scid_privacy());
+       assert!(nodes[0].node.list_channels()[0].channel_type.is_none()); // channel_type is none until counterparty accepts
+       // now simulate nodes[1] responding with an Error message, indicating it doesn't understand
+       // SCID alias.
+       nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage {
+               channel_id: init_open_channel.temporary_channel_id,
+               data: "Yo, no SCID aliases, no privacy here!".to_string()
+       });
+       assert!(nodes[0].node.list_channels()[0].channel_type.is_none()); // channel_type is none until counterparty accepts
+       let second_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+       assert!(!second_open_channel.channel_type.as_ref().unwrap().supports_scid_privacy());
+       nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &second_open_channel);
+       nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
+       let events = nodes[0].node.get_and_clear_pending_events();
+       assert_eq!(events.len(), 1);
+       match events[0] {
+               Event::FundingGenerationReady { .. } => {},
+               _ => panic!("Unexpected event"),
+       }
+       assert!(!nodes[0].node.list_channels()[0].channel_type.as_ref().unwrap().supports_scid_privacy());
+       assert!(!nodes[1].node.list_channels()[0].channel_type.as_ref().unwrap().supports_scid_privacy());
+ }
+ #[test]
+ fn test_inbound_scid_privacy() {
+       // Tests accepting channels with the scid_privacy feature and rejecting forwards using the
+       // channel's real SCID as required by the channel feature.
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let mut accept_forward_cfg = test_default_channel_config();
+       accept_forward_cfg.accept_forwards_to_priv_channels = true;
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(accept_forward_cfg), None]);
+       let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+       create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0, InitFeatures::known(), InitFeatures::known());
+       let mut no_announce_cfg = test_default_channel_config();
+       no_announce_cfg.channel_options.announced_channel = false;
+       no_announce_cfg.own_channel_config.negotiate_scid_privacy = true;
+       nodes[1].node.create_channel(nodes[2].node.get_our_node_id(), 100_000, 10_000, 42, Some(no_announce_cfg)).unwrap();
+       let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[2].node.get_our_node_id());
+       assert!(open_channel.channel_type.as_ref().unwrap().requires_scid_privacy());
+       nodes[2].node.handle_open_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &open_channel);
+       let accept_channel = get_event_msg!(nodes[2], MessageSendEvent::SendAcceptChannel, nodes[1].node.get_our_node_id());
+       nodes[1].node.handle_accept_channel(&nodes[2].node.get_our_node_id(), InitFeatures::known(), &accept_channel);
+       let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[1], 100_000, 42);
+       nodes[1].node.funding_transaction_generated(&temporary_channel_id, tx.clone()).unwrap();
+       nodes[2].node.handle_funding_created(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingCreated, nodes[2].node.get_our_node_id()));
+       check_added_monitors!(nodes[2], 1);
+       let cs_funding_signed = get_event_msg!(nodes[2], MessageSendEvent::SendFundingSigned, nodes[1].node.get_our_node_id());
+       nodes[1].node.handle_funding_signed(&nodes[2].node.get_our_node_id(), &cs_funding_signed);
+       check_added_monitors!(nodes[1], 1);
+       let conf_height = core::cmp::max(nodes[1].best_block_info().1 + 1, nodes[2].best_block_info().1 + 1);
+       confirm_transaction_at(&nodes[1], &tx, conf_height);
+       connect_blocks(&nodes[1], CHAN_CONFIRM_DEPTH - 1);
+       confirm_transaction_at(&nodes[2], &tx, conf_height);
+       connect_blocks(&nodes[2], CHAN_CONFIRM_DEPTH - 1);
+       let bs_funding_locked = get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[2].node.get_our_node_id());
+       nodes[1].node.handle_funding_locked(&nodes[2].node.get_our_node_id(), &get_event_msg!(nodes[2], MessageSendEvent::SendFundingLocked, nodes[1].node.get_our_node_id()));
+       let bs_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[2].node.get_our_node_id());
+       nodes[2].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &bs_funding_locked);
+       let cs_update = get_event_msg!(nodes[2], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
+       nodes[1].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &cs_update);
+       nodes[2].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &bs_update);
+       // Now we can pay just fine using the SCID alias nodes[2] gave to nodes[1]...
+       let last_hop = nodes[2].node.list_usable_channels();
+       let mut hop_hints = vec![RouteHint(vec![RouteHintHop {
+               src_node_id: nodes[1].node.get_our_node_id(),
+               short_channel_id: last_hop[0].inbound_scid_alias.unwrap(),
+               fees: RoutingFees {
+                       base_msat: last_hop[0].counterparty.forwarding_info.as_ref().unwrap().fee_base_msat,
+                       proportional_millionths: last_hop[0].counterparty.forwarding_info.as_ref().unwrap().fee_proportional_millionths,
+               },
+               cltv_expiry_delta: last_hop[0].counterparty.forwarding_info.as_ref().unwrap().cltv_expiry_delta,
+               htlc_maximum_msat: None,
+               htlc_minimum_msat: None,
+       }])];
+       let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], hop_hints.clone(), 100_000, 42);
+       assert_eq!(route.paths[0][1].short_channel_id, last_hop[0].inbound_scid_alias.unwrap());
+       nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
+       check_added_monitors!(nodes[0], 1);
+       pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], 100_000, payment_hash, payment_secret);
+       claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
+       // ... but if we try to pay using the real SCID, nodes[1] will just tell us they don't know
+       // what channel we're talking about.
+       hop_hints[0].0[0].short_channel_id = last_hop[0].short_channel_id.unwrap();
+       let (route_2, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[2], hop_hints, 100_000, 42);
+       assert_eq!(route_2.paths[0][1].short_channel_id, last_hop[0].short_channel_id.unwrap());
+       nodes[0].node.send_payment(&route_2, payment_hash_2, &Some(payment_secret_2)).unwrap();
+       check_added_monitors!(nodes[0], 1);
+       let payment_event = SendEvent::from_node(&nodes[0]);
+       assert_eq!(nodes[1].node.get_our_node_id(), payment_event.node_id);
+       nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
+       commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, true, true);
+       nodes[1].logger.assert_log_regex("lightning::ln::channelmanager".to_string(), regex::Regex::new(r"Refusing to forward over real channel SCID as our counterparty requested").unwrap(), 1);
+       let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
+       commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false);
+       expect_payment_failed_conditions!(nodes[0], payment_hash_2, false,
+               PaymentFailedConditions::new().blamed_scid(last_hop[0].short_channel_id.unwrap())
+                       .blamed_chan_closed(true).expected_htlc_error_data(0x4000|10, &[0; 0]));
+ }
+ #[test]
+ fn test_scid_alias_returned() {
+       // Tests that when we fail an HTLC (in this case due to attempting to forward more than the
+       // channel's available balance) we use the correct (in this case the aliased) SCID in the
+       // channel_update which is returned in the onion to the sender.
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let mut accept_forward_cfg = test_default_channel_config();
+       accept_forward_cfg.accept_forwards_to_priv_channels = true;
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(accept_forward_cfg), None]);
+       let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+       create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 0, InitFeatures::known(), InitFeatures::known());
+       create_unannounced_chan_between_nodes_with_value(&nodes, 1, 2, 10_000, 0, InitFeatures::known(), InitFeatures::known());
+       let last_hop = nodes[2].node.list_usable_channels();
+       let mut hop_hints = vec![RouteHint(vec![RouteHintHop {
+               src_node_id: nodes[1].node.get_our_node_id(),
+               short_channel_id: last_hop[0].inbound_scid_alias.unwrap(),
+               fees: RoutingFees {
+                       base_msat: last_hop[0].counterparty.forwarding_info.as_ref().unwrap().fee_base_msat,
+                       proportional_millionths: last_hop[0].counterparty.forwarding_info.as_ref().unwrap().fee_proportional_millionths,
+               },
+               cltv_expiry_delta: last_hop[0].counterparty.forwarding_info.as_ref().unwrap().cltv_expiry_delta,
+               htlc_maximum_msat: None,
+               htlc_minimum_msat: None,
+       }])];
+       let (mut route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], hop_hints, 10_000, 42);
+       assert_eq!(route.paths[0][1].short_channel_id, nodes[2].node.list_usable_channels()[0].inbound_scid_alias.unwrap());
+       route.paths[0][1].fee_msat = 10_000_000; // Overshoot the last channel's value
+       // Route the HTLC through to the destination.
+       nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
+       check_added_monitors!(nodes[0], 1);
+       let as_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+       nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_updates.update_add_htlcs[0]);
+       commitment_signed_dance!(nodes[1], nodes[0], &as_updates.commitment_signed, false, true);
+       expect_pending_htlcs_forwardable!(nodes[1]);
+       expect_pending_htlcs_forwardable!(nodes[1]);
+       check_added_monitors!(nodes[1], 1);
+       let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]);
+       commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false, true);
+       // Build the expected channel update
+       let contents = msgs::UnsignedChannelUpdate {
+               chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+               short_channel_id: last_hop[0].inbound_scid_alias.unwrap(),
+               timestamp: 21,
+               flags: 1,
+               cltv_expiry_delta: accept_forward_cfg.channel_options.cltv_expiry_delta,
+               htlc_minimum_msat: 1_000,
+               htlc_maximum_msat: OptionalField::Present(1_000_000), // Defaults to 10% of the channel value
+               fee_base_msat: last_hop[0].counterparty.forwarding_info.as_ref().unwrap().fee_base_msat,
+               fee_proportional_millionths: last_hop[0].counterparty.forwarding_info.as_ref().unwrap().fee_proportional_millionths,
+               excess_data: Vec::new(),
+       };
+       let msg_hash = Sha256dHash::hash(&contents.encode()[..]);
+       let signature = Secp256k1::new().sign(&hash_to_message!(&msg_hash[..]), &nodes[1].keys_manager.get_node_secret(Recipient::Node).unwrap());
+       let msg = msgs::ChannelUpdate { signature, contents };
+       expect_payment_failed_conditions!(nodes[0], payment_hash, false,
+               PaymentFailedConditions::new().blamed_scid(last_hop[0].inbound_scid_alias.unwrap())
+                       .blamed_chan_closed(false).expected_htlc_error_data(0x1000|7, &msg.encode_with_len()));
+       route.paths[0][1].fee_msat = 10_000; // Reset to the correct payment amount
+       route.paths[0][0].fee_msat = 0; // But set fee paid to the middle hop to 0
+       // Route the HTLC through to the destination.
+       nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
+       check_added_monitors!(nodes[0], 1);
+       let as_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+       nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_updates.update_add_htlcs[0]);
+       commitment_signed_dance!(nodes[1], nodes[0], &as_updates.commitment_signed, false, true);
+       let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]);
+       commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false, true);
+       let mut err_data = Vec::new();
+       err_data.extend_from_slice(&10_000u64.to_be_bytes());
+       err_data.extend_from_slice(&msg.encode_with_len());
+       expect_payment_failed_conditions!(nodes[0], payment_hash, false,
+               PaymentFailedConditions::new().blamed_scid(last_hop[0].inbound_scid_alias.unwrap())
+                       .blamed_chan_closed(false).expected_htlc_error_data(0x1000|12, &err_data));
+ }
index ed3c07595b0ebc39cb5d0d36506deb4c53e93ac2,34d4cd32fa18672903295ed54addaa1d173fbfa6..9f202557ff7066d485e59c0ce0a9710ae687e1be
@@@ -332,9 -332,11 +332,9 @@@ struct RouteGraphNode 
  impl cmp::Ord for RouteGraphNode {
        fn cmp(&self, other: &RouteGraphNode) -> cmp::Ordering {
                let other_score = cmp::max(other.lowest_fee_to_peer_through_node, other.path_htlc_minimum_msat)
 -                      .checked_add(other.path_penalty_msat)
 -                      .unwrap_or_else(|| u64::max_value());
 +                      .saturating_add(other.path_penalty_msat);
                let self_score = cmp::max(self.lowest_fee_to_peer_through_node, self.path_htlc_minimum_msat)
 -                      .checked_add(self.path_penalty_msat)
 -                      .unwrap_or_else(|| u64::max_value());
 +                      .saturating_add(self.path_penalty_msat);
                other_score.cmp(&self_score).then_with(|| other.node_id.cmp(&self.node_id))
        }
  }
@@@ -493,10 -495,6 +493,10 @@@ impl<'a> PaymentPath<'a> 
                self.hops.last().unwrap().0.fee_msat
        }
  
 +      fn get_path_penalty_msat(&self) -> u64 {
 +              self.hops.first().map(|h| h.0.path_penalty_msat).unwrap_or(u64::max_value())
 +      }
 +
        fn get_total_fee_paid_msat(&self) -> u64 {
                if self.hops.len() < 1 {
                        return 0;
@@@ -647,7 -645,7 +647,7 @@@ where L::Target: Logger 
  pub(crate) fn get_route<L: Deref, S: Score>(
        our_node_pubkey: &PublicKey, payment_params: &PaymentParameters, network_graph: &ReadOnlyNetworkGraph,
        first_hops: Option<&[&ChannelDetails]>, final_value_msat: u64, final_cltv_expiry_delta: u32,
 -      logger: L, scorer: &S, _random_seed_bytes: &[u8; 32]
 +      logger: L, scorer: &S, random_seed_bytes: &[u8; 32]
  ) -> Result<Route, LightningError>
  where L::Target: Logger {
        let payee_node_id = NodeId::from_pubkey(&payment_params.payee_pubkey);
                        }
                }
        }
 +      if payment_params.max_total_cltv_expiry_delta <= final_cltv_expiry_delta {
 +              return Err(LightningError{err: "Can't find a route where the maximum total CLTV expiry delta is below the final CLTV expiry.".to_owned(), action: ErrorAction::IgnoreError});
 +      }
  
        // The general routing idea is the following:
        // 1. Fill first/last hops communicated by the caller.
        // - when we want to stop looking for new paths.
        let mut already_collected_value_msat = 0;
  
 +      for (_, channels) in first_hop_targets.iter_mut() {
 +              // Sort the first_hops channels to the same node(s) in priority order of which channel we'd
 +              // most like to use.
 +              //
 +              // First, if channels are below `recommended_value_msat`, sort them in descending order,
 +              // preferring larger channels to avoid splitting the payment into more MPP parts than is
 +              // required.
 +              //
 +              // Second, because simply always sorting in descending order would always use our largest
 +              // available outbound capacity, needlessly fragmenting our available channel capacities,
 +              // sort channels above `recommended_value_msat` in ascending order, preferring channels
 +              // which have enough, but not too much, capacity for the payment.
 +              channels.sort_unstable_by(|chan_a, chan_b| {
 +                      if chan_b.outbound_capacity_msat < recommended_value_msat || chan_a.outbound_capacity_msat < recommended_value_msat {
 +                              // Sort in descending order
 +                              chan_b.outbound_capacity_msat.cmp(&chan_a.outbound_capacity_msat)
 +                      } else {
 +                              // Sort in ascending order
 +                              chan_a.outbound_capacity_msat.cmp(&chan_b.outbound_capacity_msat)
 +                      }
 +              });
 +      }
 +
        log_trace!(logger, "Building path from {} (payee) to {} (us/payer) for value {} msat.", payment_params.payee_pubkey, our_node_pubkey, final_value_msat);
  
        macro_rules! add_entry {
                                        .entry(short_channel_id)
                                        .or_insert_with(|| $candidate.effective_capacity().as_msat());
  
 -                              // It is tricky to substract $next_hops_fee_msat from available liquidity here.
 +                              // It is tricky to subtract $next_hops_fee_msat from available liquidity here.
                                // It may be misleading because we might later choose to reduce the value transferred
                                // over these channels, and the channel which was insufficient might become sufficient.
                                // Worst case: we drop a good channel here because it can't cover the high following
                                        // In order to already account for some of the privacy enhancing random CLTV
                                        // expiry delta offset we add on top later, we subtract a rough estimate
                                        // (2*MEDIAN_HOP_CLTV_EXPIRY_DELTA) here.
 -                                      let max_total_cltv_expiry_delta = payment_params.max_total_cltv_expiry_delta
 +                                      let max_total_cltv_expiry_delta = (payment_params.max_total_cltv_expiry_delta - final_cltv_expiry_delta)
                                                .checked_sub(2*MEDIAN_HOP_CLTV_EXPIRY_DELTA)
 -                                              .unwrap_or(payment_params.max_total_cltv_expiry_delta);
 +                                              .unwrap_or(payment_params.max_total_cltv_expiry_delta - final_cltv_expiry_delta);
                                        let hop_total_cltv_delta = ($next_hops_cltv_delta as u32)
 -                                              .checked_add($candidate.cltv_expiry_delta())
 -                                              .unwrap_or(u32::max_value());
 +                                              .saturating_add($candidate.cltv_expiry_delta());
                                        let doesnt_exceed_cltv_delta_limit = hop_total_cltv_delta <= max_total_cltv_expiry_delta;
  
                                        let value_contribution_msat = cmp::min(available_value_contribution_msat, $next_hops_value_contribution);
                                                                }
                                                        }
  
 -                                                      let path_penalty_msat = $next_hops_path_penalty_msat.checked_add(
 -                                                              scorer.channel_penalty_msat(short_channel_id, amount_to_transfer_over_msat, *available_liquidity_msat,
 -                                                                      &$src_node_id, &$dest_node_id)).unwrap_or_else(|| u64::max_value());
 +                                                      let path_penalty_msat = $next_hops_path_penalty_msat.saturating_add(
 +                                                              scorer.channel_penalty_msat(short_channel_id, amount_to_transfer_over_msat,
 +                                                                      *available_liquidity_msat, &$src_node_id, &$dest_node_id));
                                                        let new_graph_node = RouteGraphNode {
                                                                node_id: $src_node_id,
                                                                lowest_fee_to_peer_through_node: total_fee_msat,
                                                        // the fees included in $next_hops_path_htlc_minimum_msat, but also
                                                        // can't use something that may decrease on future hops.
                                                        let old_cost = cmp::max(old_entry.total_fee_msat, old_entry.path_htlc_minimum_msat)
 -                                                              .checked_add(old_entry.path_penalty_msat)
 -                                                              .unwrap_or_else(|| u64::max_value());
 +                                                              .saturating_add(old_entry.path_penalty_msat);
                                                        let new_cost = cmp::max(total_fee_msat, path_htlc_minimum_msat)
 -                                                              .checked_add(path_penalty_msat)
 -                                                              .unwrap_or_else(|| u64::max_value());
 +                                                              .saturating_add(path_penalty_msat);
  
                                                        if !old_entry.was_processed && new_cost < old_cost {
                                                                targets.push(new_graph_node);
                                                .unwrap_or_else(|| CandidateRouteHop::PrivateHop { hint: hop });
                                        let capacity_msat = candidate.effective_capacity().as_msat();
                                        aggregate_next_hops_path_penalty_msat = aggregate_next_hops_path_penalty_msat
 -                                              .checked_add(scorer.channel_penalty_msat(hop.short_channel_id, final_value_msat, capacity_msat, &source, &target))
 -                                              .unwrap_or_else(|| u64::max_value());
 +                                              .saturating_add(scorer.channel_penalty_msat(hop.short_channel_id, final_value_msat, capacity_msat, &source, &target));
  
                                        aggregate_next_hops_cltv_delta = aggregate_next_hops_cltv_delta
 -                                              .checked_add(hop.cltv_expiry_delta as u32)
 -                                              .unwrap_or_else(|| u32::max_value());
 +                                              .saturating_add(hop.cltv_expiry_delta as u32);
  
                                        if !add_entry!(candidate, source, target, aggregate_next_hops_fee_msat, path_value_msat, aggregate_next_hops_path_htlc_minimum_msat, aggregate_next_hops_path_penalty_msat, aggregate_next_hops_cltv_delta) {
                                                // If this hop was not used then there is no use checking the preceding hops
        }
  
        // Sort by total fees and take the best paths.
 -      payment_paths.sort_by_key(|path| path.get_total_fee_paid_msat());
 +      payment_paths.sort_unstable_by_key(|path| path.get_total_fee_paid_msat());
        if payment_paths.len() > 50 {
                payment_paths.truncate(50);
        }
  
        // Draw multiple sufficient routes by randomly combining the selected paths.
        let mut drawn_routes = Vec::new();
 -      for i in 0..payment_paths.len() {
 +      let mut prng = ChaCha20::new(random_seed_bytes, &[0u8; 12]);
 +      let mut random_index_bytes = [0u8; ::core::mem::size_of::<usize>()];
 +
 +      let num_permutations = payment_paths.len();
 +      for _ in 0..num_permutations {
                let mut cur_route = Vec::<PaymentPath>::new();
                let mut aggregate_route_value_msat = 0;
  
                // Step (6).
 -              // TODO: real random shuffle
 -              // Currently just starts with i_th and goes up to i-1_th in a looped way.
 -              let cur_payment_paths = [&payment_paths[i..], &payment_paths[..i]].concat();
 +              // Do a Fisher-Yates shuffle to create a random permutation of the payment paths
 +              for cur_index in (1..payment_paths.len()).rev() {
 +                      prng.process_in_place(&mut random_index_bytes);
 +                      let random_index = usize::from_be_bytes(random_index_bytes).wrapping_rem(cur_index+1);
 +                      payment_paths.swap(cur_index, random_index);
 +              }
  
                // Step (7).
 -              for payment_path in cur_payment_paths {
 +              for payment_path in &payment_paths {
                        cur_route.push(payment_path.clone());
                        aggregate_route_value_msat += payment_path.get_value_msat();
                        if aggregate_route_value_msat > final_value_msat {
                                // also makes routing more reliable.
                                let mut overpaid_value_msat = aggregate_route_value_msat - final_value_msat;
  
 -                              // First, drop some expensive low-value paths entirely if possible.
 -                              // Sort by value so that we drop many really-low values first, since
 -                              // fewer paths is better: the payment is less likely to fail.
 -                              // TODO: this could also be optimized by also sorting by feerate_per_sat_routed,
 -                              // so that the sender pays less fees overall. And also htlc_minimum_msat.
 -                              cur_route.sort_by_key(|path| path.get_value_msat());
 +                              // First, we drop some expensive low-value paths entirely if possible, since fewer
 +                              // paths is better: the payment is less likely to fail. In order to do so, we sort
 +                              // by value and fall back to total fees paid, i.e., in case of equal values we
 +                              // prefer lower cost paths.
 +                              cur_route.sort_unstable_by(|a, b| {
 +                                      a.get_value_msat().cmp(&b.get_value_msat())
 +                                              // Reverse ordering for fees, so we drop higher-fee paths first
 +                                              .then_with(|| b.get_total_fee_paid_msat().saturating_add(b.get_path_penalty_msat())
 +                                                      .cmp(&a.get_total_fee_paid_msat().saturating_add(a.get_path_penalty_msat())))
 +                              });
 +
                                // We should make sure that at least 1 path left.
                                let mut paths_left = cur_route.len();
                                cur_route.retain(|path| {
                                assert!(cur_route.len() > 0);
  
                                // Step (8).
 -                              // Now, substract the overpaid value from the most-expensive path.
 +                              // Now, subtract the overpaid value from the most-expensive path.
                                // TODO: this could also be optimized by also sorting by feerate_per_sat_routed,
                                // so that the sender pays less fees overall. And also htlc_minimum_msat.
 -                              cur_route.sort_by_key(|path| { path.hops.iter().map(|hop| hop.0.candidate.fees().proportional_millionths as u64).sum::<u64>() });
 +                              cur_route.sort_unstable_by_key(|path| { path.hops.iter().map(|hop| hop.0.candidate.fees().proportional_millionths as u64).sum::<u64>() });
                                let expensive_payment_path = cur_route.first_mut().unwrap();
 -                              // We already dropped all the small channels above, meaning all the
 -                              // remaining channels are larger than remaining overpaid_value_msat.
 +
 +                              // We already dropped all the small value paths above, meaning all the
 +                              // remaining paths are larger than remaining overpaid_value_msat.
                                // Thus, this can't be negative.
                                let expensive_path_new_value_msat = expensive_payment_path.get_value_msat() - overpaid_value_msat;
                                expensive_payment_path.update_value_and_recompute_fees(expensive_path_new_value_msat);
  
        // Step (9).
        // Select the best route by lowest total fee.
 -      drawn_routes.sort_by_key(|paths| paths.iter().map(|path| path.get_total_fee_paid_msat()).sum::<u64>());
 +      drawn_routes.sort_unstable_by_key(|paths| paths.iter().map(|path| path.get_total_fee_paid_msat()).sum::<u64>());
        let mut selected_paths = Vec::<Vec<Result<RouteHop, LightningError>>>::new();
        for payment_path in drawn_routes.first().unwrap() {
                let mut path = payment_path.hops.iter().map(|(payment_hop, node_features)| {
@@@ -1597,58 -1561,45 +1597,58 @@@ fn add_random_cltv_offset(route: &mut R
        for path in route.paths.iter_mut() {
                let mut shadow_ctlv_expiry_delta_offset: u32 = 0;
  
 -              // Choose the last publicly known node as the starting point for the random walk
 -              if let Some(starting_hop) = path.iter().rev().find(|h| network_nodes.contains_key(&NodeId::from_pubkey(&h.pubkey))) {
 -                      let mut cur_node_id = NodeId::from_pubkey(&starting_hop.pubkey);
 +              // Remember the last three nodes of the random walk and avoid looping back on them.
 +              // Init with the last three nodes from the actual path, if possible.
 +              let mut nodes_to_avoid: [NodeId; 3] = [NodeId::from_pubkey(&path.last().unwrap().pubkey),
 +                      NodeId::from_pubkey(&path.get(path.len().saturating_sub(2)).unwrap().pubkey),
 +                      NodeId::from_pubkey(&path.get(path.len().saturating_sub(3)).unwrap().pubkey)];
 +
 +              // Choose the last publicly known node as the starting point for the random walk.
 +              let mut cur_hop: Option<NodeId> = None;
 +              let mut path_nonce = [0u8; 12];
 +              if let Some(starting_hop) = path.iter().rev()
 +                      .find(|h| network_nodes.contains_key(&NodeId::from_pubkey(&h.pubkey))) {
 +                              cur_hop = Some(NodeId::from_pubkey(&starting_hop.pubkey));
 +                              path_nonce.copy_from_slice(&cur_hop.unwrap().as_slice()[..12]);
 +              }
 +
 +              // Init PRNG with the path-dependant nonce, which is static for private paths.
 +              let mut prng = ChaCha20::new(random_seed_bytes, &path_nonce);
 +              let mut random_path_bytes = [0u8; ::core::mem::size_of::<usize>()];
  
 -                      // Init PRNG with path nonce
 -                      let mut path_nonce = [0u8; 12];
 -                      path_nonce.copy_from_slice(&cur_node_id.as_slice()[..12]);
 -                      let mut prng = ChaCha20::new(random_seed_bytes, &path_nonce);
 -                      let mut random_path_bytes = [0u8; ::core::mem::size_of::<usize>()];
 +              // Pick a random path length in [1 .. 3]
 +              prng.process_in_place(&mut random_path_bytes);
 +              let random_walk_length = usize::from_be_bytes(random_path_bytes).wrapping_rem(3).wrapping_add(1);
  
 -                      // Pick a random path length in [1 .. 3]
 -                      prng.process_in_place(&mut random_path_bytes);
 -                      let random_walk_length = usize::from_be_bytes(random_path_bytes).wrapping_rem(3).wrapping_add(1);
 +              for random_hop in 0..random_walk_length {
 +                      // If we don't find a suitable offset in the public network graph, we default to
 +                      // MEDIAN_HOP_CLTV_EXPIRY_DELTA.
 +                      let mut random_hop_offset = MEDIAN_HOP_CLTV_EXPIRY_DELTA;
  
 -                      for _random_hop in 0..random_walk_length {
 +                      if let Some(cur_node_id) = cur_hop {
                                if let Some(cur_node) = network_nodes.get(&cur_node_id) {
 -                                      // Randomly choose the next hop
 +                                      // Randomly choose the next unvisited hop.
                                        prng.process_in_place(&mut random_path_bytes);
 -                                      if let Some(random_channel) = usize::from_be_bytes(random_path_bytes).checked_rem(cur_node.channels.len())
 +                                      if let Some(random_channel) = usize::from_be_bytes(random_path_bytes)
 +                                              .checked_rem(cur_node.channels.len())
                                                .and_then(|index| cur_node.channels.get(index))
                                                .and_then(|id| network_channels.get(id)) {
                                                        random_channel.as_directed_from(&cur_node_id).map(|(dir_info, next_id)| {
 -                                                              dir_info.direction().map(|channel_update_info|
 -                                                                      shadow_ctlv_expiry_delta_offset = shadow_ctlv_expiry_delta_offset
 -                                                                              .checked_add(channel_update_info.cltv_expiry_delta.into())
 -                                                                              .unwrap_or(shadow_ctlv_expiry_delta_offset));
 -                                                              cur_node_id = *next_id;
 +                                                              if !nodes_to_avoid.iter().any(|x| x == next_id) {
 +                                                                      nodes_to_avoid[random_hop] = *next_id;
 +                                                                      dir_info.direction().map(|channel_update_info| {
 +                                                                              random_hop_offset = channel_update_info.cltv_expiry_delta.into();
 +                                                                              cur_hop = Some(*next_id);
 +                                                                      });
 +                                                              }
                                                        });
                                                }
                                }
                        }
 -              } else {
 -                      // If the entire path is private, choose a random offset from multiples of
 -                      // MEDIAN_HOP_CLTV_EXPIRY_DELTA
 -                      let mut prng = ChaCha20::new(random_seed_bytes, &[0u8; 8]);
 -                      let mut random_bytes = [0u8; 4];
 -                      prng.process_in_place(&mut random_bytes);
 -                      let random_walk_length = u32::from_be_bytes(random_bytes).wrapping_rem(3).wrapping_add(1);
 -                      shadow_ctlv_expiry_delta_offset = random_walk_length * MEDIAN_HOP_CLTV_EXPIRY_DELTA;
 +
 +                      shadow_ctlv_expiry_delta_offset = shadow_ctlv_expiry_delta_offset
 +                              .checked_add(random_hop_offset)
 +                              .unwrap_or(shadow_ctlv_expiry_delta_offset);
                }
  
                // Limit the total offset to reduce the worst-case locked liquidity timevalue
@@@ -1716,6 -1667,7 +1716,7 @@@ mod tests 
                                forwarding_info: None,
                        },
                        funding_txo: Some(OutPoint { txid: bitcoin::Txid::from_slice(&[0; 32]).unwrap(), index: 0 }),
+                       channel_type: None,
                        short_channel_id,
                        inbound_scid_alias: None,
                        channel_value_satoshis: 0,
                        assert_eq!(route.paths[1][0].short_channel_id, 2);
                        assert_eq!(route.paths[1][0].fee_msat, 50_000);
                }
 +
 +              {
 +                      // If we have a bunch of outbound channels to the same node, where most are not
 +                      // sufficient to pay the full payment, but one is, we should default to just using the
 +                      // one single channel that has sufficient balance, avoiding MPP.
 +                      //
 +                      // If we have several options above the 3xpayment value threshold, we should pick the
 +                      // smallest of them, avoiding further fragmenting our available outbound balance to
 +                      // this node.
 +                      let route = get_route(&our_id, &payment_params, &network_graph.read_only(), Some(&[
 +                              &get_channel_details(Some(2), nodes[0], InitFeatures::known(), 50_000),
 +                              &get_channel_details(Some(3), nodes[0], InitFeatures::known(), 50_000),
 +                              &get_channel_details(Some(5), nodes[0], InitFeatures::known(), 50_000),
 +                              &get_channel_details(Some(6), nodes[0], InitFeatures::known(), 300_000),
 +                              &get_channel_details(Some(7), nodes[0], InitFeatures::known(), 50_000),
 +                              &get_channel_details(Some(8), nodes[0], InitFeatures::known(), 50_000),
 +                              &get_channel_details(Some(9), nodes[0], InitFeatures::known(), 50_000),
 +                              &get_channel_details(Some(4), nodes[0], InitFeatures::known(), 1_000_000),
 +                      ]), 100_000, 42, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
 +                      assert_eq!(route.paths.len(), 1);
 +                      assert_eq!(route.paths[0].len(), 1);
 +
 +                      assert_eq!(route.paths[0][0].pubkey, nodes[0]);
 +                      assert_eq!(route.paths[0][0].short_channel_id, 6);
 +                      assert_eq!(route.paths[0][0].fee_msat, 100_000);
 +              }
        }
  
        #[test]
                        .with_max_total_cltv_expiry_delta(feasible_max_total_cltv_delta);
                let keys_manager = test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
                let random_seed_bytes = keys_manager.get_secure_random_bytes();
 -              let route = get_route(&our_id, &feasible_payment_params, &network_graph, None, 100, 42, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
 +              let route = get_route(&our_id, &feasible_payment_params, &network_graph, None, 100, 0, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
                let path = route.paths[0].iter().map(|hop| hop.short_channel_id).collect::<Vec<_>>();
                assert_ne!(path.len(), 0);
  
                let fail_max_total_cltv_delta = 23;
                let fail_payment_params = PaymentParameters::from_node_id(nodes[6]).with_route_hints(last_hops(&nodes))
                        .with_max_total_cltv_expiry_delta(fail_max_total_cltv_delta);
 -              match get_route(&our_id, &fail_payment_params, &network_graph, None, 100, 42, Arc::clone(&logger), &scorer, &random_seed_bytes)
 +              match get_route(&our_id, &fail_payment_params, &network_graph, None, 100, 0, Arc::clone(&logger), &scorer, &random_seed_bytes)
                {
                        Err(LightningError { err, .. } ) => {
                                assert_eq!(err, "Failed to find a path to the given destination");
@@@ -5438,6 -5364,7 +5439,7 @@@ mod benches 
                        funding_txo: Some(OutPoint {
                                txid: bitcoin::Txid::from_slice(&[0; 32]).unwrap(), index: 0
                        }),
+                       channel_type: None,
                        short_channel_id: Some(1),
                        inbound_scid_alias: None,
                        channel_value_satoshis: 10_000_000,
                let mut routes = Vec::new();
                let mut route_endpoints = Vec::new();
                let mut seed: usize = 0xdeadbeef;
 -              'load_endpoints: for _ in 0..100 {
 +              'load_endpoints: for _ in 0..150 {
                        loop {
                                seed *= 0xdeadbeef;
                                let src = PublicKey::from_slice(nodes.keys().skip(seed % nodes.len()).next().unwrap().as_slice()).unwrap();
                        }
                }
  
 +              // Because we've changed channel scores, its possible we'll take different routes to the
 +              // selected destinations, possibly causing us to fail because, eg, the newly-selected path
 +              // requires a too-high CLTV delta.
 +              route_endpoints.retain(|(first_hop, params, amt)| {
 +                      get_route(&payer, params, &graph.read_only(), Some(&[first_hop]), *amt, 42, &DummyLogger{}, &scorer, &random_seed_bytes).is_ok()
 +              });
 +              route_endpoints.truncate(100);
 +              assert_eq!(route_endpoints.len(), 100);
 +
                // ...then benchmark finding paths between the nodes we learned.
                let mut idx = 0;
                bench.iter(|| {
index c86df5dff693f540baca838ca77fafc05e379b99,8b5a04370cfdcef0429c6a89f972bd9d7e0011a8..ea50398b5e54ad2d2d1c691dadd31b0f3fe341fb
@@@ -17,6 -17,7 +17,7 @@@
  use chain::keysinterface::SpendableOutputDescriptor;
  use ln::channelmanager::PaymentId;
  use ln::channel::FUNDING_CONF_DEADLINE_BLOCKS;
+ use ln::features::ChannelTypeFeatures;
  use ln::msgs;
  use ln::msgs::DecodeError;
  use ln::{PaymentPreimage, PaymentHash, PaymentSecret};
@@@ -365,15 -366,11 +366,15 @@@ pub enum Event 
                /// The channel_id of the channel which has been closed. Note that on-chain transactions
                /// resolving the channel are likely still awaiting confirmation.
                channel_id: [u8; 32],
 -              /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`], or 0 for
 -              /// an inbound channel. This will always be zero for objects serialized with LDK versions
 -              /// prior to 0.0.102.
 +              /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`] for outbound
 +              /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels if
 +              /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. Otherwise
 +              /// `user_channel_id` will be 0 for an inbound channel.
 +              /// This will always be zero for objects serialized with LDK versions prior to 0.0.102.
                ///
                /// [`ChannelManager::create_channel`]: crate::ln::channelmanager::ChannelManager::create_channel
 +              /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel
 +              /// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels
                user_channel_id: u64,
                /// The reason the channel was closed.
                reason: ClosureReason
                funding_satoshis: u64,
                /// Our starting balance in the channel if the request is accepted, in milli-satoshi.
                push_msat: u64,
+               /// The features that this channel will operate with. If you reject the channel, a
+               /// well-behaved counterparty may automatically re-attempt the channel with a new set of
+               /// feature flags.
+               ///
+               /// Note that if [`ChannelTypeFeatures::supports_scid_privacy`] returns true on this type,
+               /// the resulting [`ChannelManager`] will not be readable by versions of LDK prior to
+               /// 0.0.106.
+               ///
+               /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
+               channel_type: ChannelTypeFeatures,
        },
  }
  
@@@ -913,15 -920,7 +924,15 @@@ pub enum MessageSendEvent 
                node_id: PublicKey,
                /// The reply_channel_range which should be sent.
                msg: msgs::ReplyChannelRange,
 -      }
 +      },
 +      /// Sends a timestamp filter for inbound gossip. This should be sent on each new connection to
 +      /// enable receiving gossip messages from the peer.
 +      SendGossipTimestampFilter {
 +              /// The node_id of this message recipient
 +              node_id: PublicKey,
 +              /// The gossip_timestamp_filter which should be sent.
 +              msg: msgs::GossipTimestampFilter,
 +      },
  }
  
  /// A trait indicating an object may generate message send events